UNPKG

greed.js

Version:

Run Python libraries in the browser with WebGPU acceleration - PyTorch, NumPy, and more. Modular architecture with full backward compatibility.

1 lines 25.4 kB
"use strict";(this.webpackChunkGreed=this.webpackChunkGreed||[]).push([[867],{867:(e,n,t)=>{function s(){return'\n# WebGPU-enabled PyTorch polyfill setup\nimport numpy as np\nimport sys\n\nclass WebGPUDevice:\n def __init__(self, device_type):\n self.type = device_type\n \n def __str__(self):\n return self.type\n \n def __repr__(self):\n return f"device(type=\'{self.type}\')"\n\nclass WebGPUTensor:\n def __init__(self, data, device=\'cpu\', dtype=\'float32\', requires_grad=False, _force_webgpu=False):\n if isinstance(data, (list, tuple)):\n self.data = np.array(data, dtype=dtype)\n elif isinstance(data, np.ndarray):\n self.data = data.astype(dtype)\n else:\n self.data = np.array(data, dtype=dtype)\n \n # Determine actual device based on tensor size and WebGPU availability\n self._original_device = device\n self._force_webgpu = _force_webgpu\n \n # Auto-detect WebGPU usage for larger tensors or when forced\n if (_force_webgpu or self._should_use_webgpu(data)) and device != \'cpu\':\n self.device = WebGPUDevice(\'webgpu\')\n elif device == \'cuda\' or device == \'gpu\':\n # Map CUDA/GPU requests to WebGPU if available\n self.device = WebGPUDevice(\'webgpu\')\n else:\n self.device = WebGPUDevice(device) if isinstance(device, str) else device\n \n self.dtype = dtype\n self.requires_grad = requires_grad\n self.shape = self.data.shape\n self.ndim = self.data.ndim\n self.grad = None\n self.grad_fn = None\n \n def size(self, dim=None):\n """Return the size of the tensor or a specific dimension"""\n if dim is None:\n return self.shape\n else:\n if dim < 0:\n dim = self.ndim + dim\n if dim >= self.ndim or dim < 0:\n raise IndexError(f"Dimension out of range (expected to be in range of [{-self.ndim}, {self.ndim-1}], but got {dim})")\n return self.shape[dim]\n \n def _should_use_webgpu(self, data):\n """Determine if WebGPU should be used based on tensor characteristics"""\n try:\n # Use WebGPU for tensors with more than 1 element (very low threshold)\n if hasattr(data, \'size\'):\n return data.size > 1\n elif hasattr(data, \'__len__\'):\n return len(data) > 1\n return False\n except:\n return False\n \n def numpy(self):\n return self.data\n \n def tolist(self):\n return self.data.tolist()\n \n def view(self, *shape):\n """Reshape tensor maintaining data"""\n if len(shape) == 1 and isinstance(shape[0], (list, tuple)):\n shape = shape[0]\n \n # Handle -1 for automatic size calculation\n if -1 in shape:\n total_size = self.data.size\n known_size = 1\n unknown_idx = -1\n for i, s in enumerate(shape):\n if s == -1:\n unknown_idx = i\n else:\n known_size *= s\n if unknown_idx != -1:\n shape = list(shape)\n shape[unknown_idx] = total_size // known_size\n shape = tuple(shape)\n \n reshaped_data = self.data.reshape(shape)\n return WebGPUTensor(reshaped_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def reshape(self, *shape):\n return self.view(*shape)\n \n def transpose(self, dim0, dim1):\n transposed_data = np.swapaxes(self.data, dim0, dim1)\n return WebGPUTensor(transposed_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def sum(self, dim=None, keepdim=False):\n if dim is None:\n result_data = np.sum(self.data)\n else:\n result_data = np.sum(self.data, axis=dim, keepdims=keepdim)\n result = WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n if self.requires_grad:\n result._backward_fn = lambda grad: self._sum_backward(grad, dim, keepdim)\n result._inputs = [self]\n \n return result\n \n def mean(self, dim=None, keepdim=False):\n if dim is None:\n result_data = np.mean(self.data)\n else:\n result_data = np.mean(self.data, axis=dim, keepdims=keepdim)\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def std(self, dim=None, keepdim=False, unbiased=True):\n """Compute standard deviation"""\n if dim is None:\n result_data = np.std(self.data, ddof=1 if unbiased else 0)\n else:\n result_data = np.std(self.data, axis=dim, keepdims=keepdim, ddof=1 if unbiased else 0)\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def var(self, dim=None, keepdim=False, unbiased=True):\n """Compute variance"""\n if dim is None:\n result_data = np.var(self.data, ddof=1 if unbiased else 0)\n else:\n result_data = np.var(self.data, axis=dim, keepdims=keepdim, ddof=1 if unbiased else 0)\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def to(self, device):\n new_device = WebGPUDevice(device) if isinstance(device, str) else device\n return WebGPUTensor(self.data.copy(), device=new_device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def cpu(self):\n return self.to(\'cpu\')\n \n def cuda(self):\n return self.to(\'cuda\')\n \n def __repr__(self):\n return f"tensor({self.data}, device=\'{self.device}\', dtype=\'{self.dtype}\')"\n \n def __float__(self):\n """Convert single-element tensor to Python float"""\n if self.data.size == 1:\n return float(self.data.item())\n else:\n raise TypeError(f"only single-element tensors can be converted to Python scalars")\n \n def __int__(self):\n """Convert single-element tensor to Python int"""\n if self.data.size == 1:\n return int(self.data.item())\n else:\n raise TypeError(f"only single-element tensors can be converted to Python scalars")\n \n def __getitem__(self, key):\n """Support tensor indexing like tensor[indices]"""\n if isinstance(key, WebGPUTensor):\n # Convert WebGPUTensor indices to numpy array\n indices = key.data.astype(int)\n result_data = self.data[indices]\n else:\n result_data = self.data[key]\n \n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n # Arithmetic operators\n def __add__(self, other):\n if isinstance(other, WebGPUTensor):\n result_data = self.data + other.data\n else:\n result_data = self.data + other\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def __sub__(self, other):\n if isinstance(other, WebGPUTensor):\n result_data = self.data - other.data\n else:\n result_data = self.data - other\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def __mul__(self, other):\n if isinstance(other, WebGPUTensor):\n result_data = self.data * other.data\n else:\n result_data = self.data * other\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def __truediv__(self, other):\n if isinstance(other, WebGPUTensor):\n result_data = self.data / other.data\n else:\n result_data = self.data / other\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def __radd__(self, other):\n result_data = other + self.data\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def __rmul__(self, other):\n result_data = other * self.data\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def __matmul__(self, other):\n """Matrix multiplication operator (@)"""\n if isinstance(other, WebGPUTensor):\n if self.ndim == 2 and other.ndim == 2:\n result_data = np.dot(self.data, other.data)\n elif self.ndim == 1 and other.ndim == 2:\n result_data = np.dot(self.data, other.data)\n elif self.ndim == 2 and other.ndim == 1:\n result_data = np.dot(self.data, other.data)\n else:\n result_data = np.matmul(self.data, other.data)\n else:\n result_data = np.matmul(self.data, other)\n \n result = WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n if self.requires_grad or (isinstance(other, WebGPUTensor) and other.requires_grad):\n result._backward_fn = lambda grad: self._matmul_backward(grad, other)\n result._inputs = [self, other] if isinstance(other, WebGPUTensor) else [self]\n \n return result\n \n def __rmatmul__(self, other):\n """Reverse matrix multiplication"""\n result_data = np.matmul(other, self.data)\n return WebGPUTensor(result_data, device=self.device, dtype=self.dtype, requires_grad=self.requires_grad)\n \n def retain_grad(self):\n """Enable gradient retention for non-leaf tensor"""\n if not self.requires_grad:\n raise RuntimeError("can\'t retain_grad on Tensor that has requires_grad=False")\n self._retain_grad = True\n return self\n \n def backward(self, gradient=None, retain_graph=False, create_graph=False):\n """Compute gradients via automatic differentiation"""\n if not self.requires_grad:\n return\n \n if gradient is None:\n if self.data.size == 1:\n gradient = WebGPUTensor(np.ones_like(self.data), device=self.device, dtype=self.dtype)\n else:\n raise RuntimeError("grad can be implicitly created only for scalar outputs")\n \n # Initialize gradient if not present\n if self.grad is None:\n self.grad = WebGPUTensor(np.zeros_like(self.data), device=self.device, dtype=self.dtype)\n \n # Accumulate gradient\n self.grad.data += gradient.data if isinstance(gradient, WebGPUTensor) else gradient\n \n # Call backward function if exists\n if hasattr(self, \'_backward_fn\') and self._backward_fn:\n self._backward_fn(gradient)\n \n def _matmul_backward(self, grad_output, other):\n """Backward pass for matrix multiplication"""\n if isinstance(other, WebGPUTensor):\n # d/da (a @ b) = grad_output @ b.T\n if self.grad is None:\n self.grad = WebGPUTensor(np.zeros_like(self.data), device=self.device, dtype=self.dtype)\n self_grad = np.matmul(grad_output.data, other.data.T)\n self.grad.data += self_grad\n \n # d/db (a @ b) = a.T @ grad_output \n if other.requires_grad:\n if other.grad is None:\n other.grad = WebGPUTensor(np.zeros_like(other.data), device=other.device, dtype=other.dtype)\n other_grad = np.matmul(self.data.T, grad_output.data)\n other.grad.data += other_grad\n\n# Linear algebra operations module\nclass TorchLinalg:\n """Linear algebra operations module"""\n \n def __init__(self):\n pass\n \n def det(self, input_tensor):\n """Compute determinant"""\n if isinstance(input_tensor, WebGPUTensor):\n if input_tensor.ndim != 2 or input_tensor.shape[0] != input_tensor.shape[1]:\n raise RuntimeError("linalg.det() expects a 2D square tensor")\n det_value = np.linalg.det(input_tensor.data.reshape(input_tensor.shape))\n return WebGPUTensor([det_value], device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n return np.linalg.det(input_tensor)\n \n def inv(self, input_tensor):\n """Compute matrix inverse"""\n if isinstance(input_tensor, WebGPUTensor):\n if input_tensor.ndim != 2 or input_tensor.shape[0] != input_tensor.shape[1]:\n raise RuntimeError("linalg.inv() expects a 2D square tensor")\n inv_data = np.linalg.inv(input_tensor.data.reshape(input_tensor.shape))\n return WebGPUTensor(inv_data, device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n return np.linalg.inv(input_tensor)\n \n def norm(self, input_tensor, ord=None, dim=None, keepdim=False):\n """Compute matrix or vector norm"""\n if isinstance(input_tensor, WebGPUTensor):\n if dim is None:\n norm_value = np.linalg.norm(input_tensor.data, ord=ord)\n return WebGPUTensor([norm_value], device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n norm_data = np.linalg.norm(input_tensor.data.reshape(input_tensor.shape), ord=ord, axis=dim, keepdims=keepdim)\n return WebGPUTensor(norm_data, device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n return np.linalg.norm(input_tensor, ord=ord, axis=dim, keepdims=keepdim)\n \n def eig(self, input_tensor):\n """Compute eigenvalues and eigenvectors"""\n if isinstance(input_tensor, WebGPUTensor):\n if input_tensor.ndim != 2 or input_tensor.shape[0] != input_tensor.shape[1]:\n raise RuntimeError("linalg.eig() expects a 2D square tensor")\n eigenvalues, eigenvectors = np.linalg.eig(input_tensor.data.reshape(input_tensor.shape))\n return (\n WebGPUTensor(eigenvalues, device=input_tensor.device, dtype=input_tensor.dtype),\n WebGPUTensor(eigenvectors, device=input_tensor.device, dtype=input_tensor.dtype)\n )\n else:\n return np.linalg.eig(input_tensor)\n \n def svd(self, input_tensor, full_matrices=True):\n """Compute singular value decomposition"""\n if isinstance(input_tensor, WebGPUTensor):\n U, S, Vh = np.linalg.svd(input_tensor.data.reshape(input_tensor.shape), full_matrices=full_matrices)\n return (\n WebGPUTensor(U, device=input_tensor.device, dtype=input_tensor.dtype),\n WebGPUTensor(S, device=input_tensor.device, dtype=input_tensor.dtype),\n WebGPUTensor(Vh, device=input_tensor.device, dtype=input_tensor.dtype)\n )\n else:\n return np.linalg.svd(input_tensor, full_matrices=full_matrices)\n\n# Neural network functional operations\nclass TorchNNFunctional:\n @staticmethod\n def relu(input_tensor):\n if isinstance(input_tensor, WebGPUTensor):\n result_data = np.maximum(input_tensor.data, 0)\n return WebGPUTensor(result_data, device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n return np.maximum(input_tensor, 0)\n \n @staticmethod\n def sigmoid(input_tensor):\n if isinstance(input_tensor, WebGPUTensor):\n result_data = 1 / (1 + np.exp(-input_tensor.data))\n return WebGPUTensor(result_data, device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n return 1 / (1 + np.exp(-input_tensor))\n\n# Neural network modules\nclass TorchNNModule:\n def __init__(self):\n self._parameters = {}\n self._modules = {}\n \n def parameters(self):\n params = []\n for param in self._parameters.values():\n params.append(param)\n for module in self._modules.values():\n if hasattr(module, \'parameters\'):\n params.extend(module.parameters())\n return params\n \n def __call__(self, *args, **kwargs):\n return self.forward(*args, **kwargs)\n \n def forward(self, x):\n raise NotImplementedError\n\nclass TorchNNLinear(TorchNNModule):\n def __init__(self, in_features, out_features, bias=True):\n super().__init__()\n self.in_features = in_features\n self.out_features = out_features\n \n weight_data = np.random.randn(out_features, in_features) * np.sqrt(2.0 / in_features)\n self.weight = WebGPUTensor(weight_data, requires_grad=True)\n self._parameters[\'weight\'] = self.weight\n \n if bias:\n bias_data = np.zeros(out_features)\n self.bias = WebGPUTensor(bias_data, requires_grad=True)\n self._parameters[\'bias\'] = self.bias\n else:\n self.bias = None\n \n def forward(self, x):\n if isinstance(x, WebGPUTensor):\n result = WebGPUTensor(np.dot(x.data, self.weight.data.T), device=x.device, dtype=x.dtype)\n if self.bias is not None:\n result.data = result.data + self.bias.data\n return result\n else:\n raise TypeError("Input must be WebGPUTensor")\n\n# Create torch module with essential functions\nclass TorchModule:\n def __init__(self):\n self.tensor = self._tensor\n self.zeros = self._zeros\n self.ones = self._ones\n self.randn = self._randn\n self.matmul = self._matmul\n self.sum = self._sum\n self.as_tensor = self._as_tensor\n self.arange = self._arange\n self.randperm = self._randperm\n self.nn = TorchNN()\n \n # Add Tensor class reference\n self.Tensor = WebGPUTensor\n \n # Linear algebra module\n self.linalg = TorchLinalg()\n \n # Activation functions\n self.relu = self._relu\n \n # Mathematical functions\n self.round = self.round\n \n # Data types\n self.float32 = \'float32\'\n self.float64 = \'float64\'\n self.double = \'float64\'\n self.float = \'float32\'\n self.int32 = \'int32\'\n self.int64 = \'int64\'\n self.long = \'int64\'\n self.int = \'int32\'\n self.bool = \'bool\'\n self.uint8 = \'uint8\'\n \n # Device types \n self.device = self._device\n \n def _tensor(self, data, **kwargs):\n # Enable WebGPU detection by default for tensor creation\n if \'device\' not in kwargs:\n kwargs[\'device\'] = \'webgpu\' # Default to WebGPU instead of CPU\n return WebGPUTensor(data, **kwargs)\n \n def _zeros(self, *shape, **kwargs):\n data = np.zeros(shape)\n return WebGPUTensor(data, **kwargs)\n \n def _ones(self, *shape, **kwargs):\n data = np.ones(shape)\n return WebGPUTensor(data, **kwargs)\n \n def _randn(self, *shape, **kwargs):\n data = np.random.randn(*shape)\n return WebGPUTensor(data, **kwargs)\n \n def _matmul(self, a, b):\n if isinstance(a, WebGPUTensor) and isinstance(b, WebGPUTensor):\n return WebGPUTensor(np.dot(a.data, b.data), device=a.device)\n return WebGPUTensor(np.dot(a, b))\n \n def _device(self, device_type):\n """Create a device object"""\n return WebGPUDevice(device_type)\n \n def _sum(self, input_tensor, dim=None, keepdim=False, dtype=None):\n """Compute sum of tensor elements"""\n if isinstance(input_tensor, WebGPUTensor):\n return input_tensor.sum(dim=dim, keepdim=keepdim)\n else:\n # Handle numpy arrays or lists\n if dim is None:\n result_data = np.sum(input_tensor)\n else:\n result_data = np.sum(input_tensor, axis=dim, keepdims=keepdim)\n return WebGPUTensor(result_data, dtype=dtype or \'float32\')\n \n def _as_tensor(self, data, dtype=None, device=None):\n """Convert data to tensor, similar to torch.as_tensor"""\n # Determine dtype\n if dtype is None:\n if hasattr(data, \'dtype\'):\n dtype = str(data.dtype)\n else:\n dtype = \'float32\'\n \n # Determine device - default to WebGPU for better performance\n if device is None:\n device = \'webgpu\'\n \n # Create tensor\n return WebGPUTensor(data, dtype=dtype, device=device)\n \n def eye(self, n, m=None, dtype=\'float32\', device=\'webgpu\'):\n """Create identity matrix"""\n if m is None:\n m = n\n data = np.eye(n, m)\n return WebGPUTensor(data, device=device, dtype=dtype)\n \n def round(self, input_tensor, decimals=0):\n """Round tensor elements to given number of decimals"""\n if isinstance(input_tensor, WebGPUTensor):\n rounded_data = np.round(input_tensor.data, decimals=decimals)\n return WebGPUTensor(rounded_data, device=input_tensor.device, dtype=input_tensor.dtype, requires_grad=input_tensor.requires_grad)\n else:\n return WebGPUTensor(np.round(input_tensor, decimals=decimals))\n \n def det(self, input_tensor):\n """Compute determinant of square matrix"""\n if isinstance(input_tensor, WebGPUTensor):\n if input_tensor.ndim != 2 or input_tensor.shape[0] != input_tensor.shape[1]:\n raise RuntimeError("det() expects a 2D square tensor")\n det_value = np.linalg.det(input_tensor.data.reshape(input_tensor.shape))\n return WebGPUTensor([det_value], device=input_tensor.device, dtype=input_tensor.dtype)\n else:\n return np.linalg.det(input_tensor)\n \n def _arange(self, *args, **kwargs):\n """Create a 1D tensor with evenly spaced values"""\n if len(args) == 1:\n # arange(end)\n start, end, step = 0, args[0], 1\n elif len(args) == 2:\n # arange(start, end)\n start, end, step = args[0], args[1], 1\n elif len(args) == 3:\n # arange(start, end, step)\n start, end, step = args[0], args[1], args[2]\n else:\n raise ValueError("arange() takes 1 to 3 positional arguments")\n \n data = np.arange(start, end, step)\n device = kwargs.get(\'device\', \'cpu\')\n dtype = kwargs.get(\'dtype\', \'int64\' if isinstance(start, int) and isinstance(end, int) and isinstance(step, int) else \'float32\')\n return WebGPUTensor(data, device=device, dtype=dtype)\n \n def _randperm(self, n, **kwargs):\n """Generate a random permutation of integers from 0 to n-1"""\n data = np.random.permutation(n)\n device = kwargs.get(\'device\', \'cpu\')\n dtype = kwargs.get(\'dtype\', \'int64\')\n return WebGPUTensor(data, device=device, dtype=dtype)\n \n def _relu(self, input_tensor):\n """ReLU activation function"""\n if isinstance(input_tensor, WebGPUTensor):\n result_data = np.maximum(input_tensor.data, 0)\n result = WebGPUTensor(result_data, device=input_tensor.device, dtype=input_tensor.dtype, requires_grad=input_tensor.requires_grad)\n \n if input_tensor.requires_grad:\n def relu_backward(grad_output):\n if input_tensor.grad is None:\n input_tensor.grad = WebGPUTensor(np.zeros_like(input_tensor.data), device=input_tensor.device, dtype=input_tensor.dtype)\n relu_grad = grad_output.data * (input_tensor.data > 0).astype(input_tensor.dtype)\n input_tensor.grad.data += relu_grad\n \n result._backward_fn = relu_backward\n result._inputs = [input_tensor]\n \n return result\n else:\n return np.maximum(input_tensor, 0)\n\n\nclass TorchNN:\n def __init__(self):\n self.functional = TorchNNFunctional()\n self.Linear = TorchNNLinear\n self.Module = TorchNNModule\n\n# Install in global namespace\ntorch = TorchModule()\nsys.modules[\'torch\'] = torch\nsys.modules[\'torch.nn\'] = torch.nn\nsys.modules[\'torch.nn.functional\'] = torch.nn.functional\nsys.modules[\'torch.linalg\'] = torch.linalg\n'}function r(e){const n=[/\beval\s*\(/g,/\bexec\s*\(/g,/\b__import__\s*\(/g,/\bsubprocess\./g,/\bos\.system\s*\(/g];for(const t of n)if(t.test(e))throw new Error(`Dangerous pattern detected in PyTorch polyfill: ${t}`);return!0}t.d(n,{EH:()=>r,pH:()=>s})}}]);