eduardoleao052 / js-pytorch

A JavaScript library like PyTorch, with GPU acceleration.
https://eduardoleao052.github.io/js-pytorch/site/index.html
MIT License
1.02k stars 35 forks source link
automatic-differentiation deep-learning javascript-library neural-networks pytorch

js-torch

PyTorch in JavaScript

Note: You can install the package locally with: npm install js-pytorch


Implemented Tensor Operations:
- [Add](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L346-L401) - [Subtract](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L404-L438) - [Multiply](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L441-L496) - [Divide](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L498-L557) - [Matrix Multiply](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L560-L621) - [Power](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L625-L663) - [Square Root](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L666-L704) - [Exponentiate](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#706-L744) - [Log](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L746-L785) - [Sum](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L790-L842) - [Mean](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L844-L894) - [Variance](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L896-L949) - [Transpose](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L953-L1008) - [At](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L1010-L1060) - [MaskedFill](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L1062-L1095) - [Reshape](https://github.com/eduardoleao052/js-torch/blob/07c1286867b952f32c0e904033214253e8812090/src/tensor.js#L1097-L1129)
Implemented Deep Learning Layers:
- [nn.Linear](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L60-L88) - [nn.MultiHeadSelfAttention](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L90-L163) - [nn.FullyConnected](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L165-L194) - [nn.Block](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L196-L226) - [nn.Embedding](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L231-L260) - [nn.PositionalEmbedding](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L262-L291) - [nn.ReLU](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L296-L325) - [nn.Softmax](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L327-L346) - [nn.Dropout](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L351-L376) - [nn.LayerNorm](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L378-L397) - [nn.CrossEntropyLoss](https://github.com/eduardoleao052/js-torch/blob/a158c91db9775a88fae6ed2d0f76d6d8ee6f9d23/src/layers.js#L400-L441)


1.Table of Contents

2. Installation

<script src="https://github.com/eduardoleao052/js-pytorch/raw/main/node_modules/js-pytorch/dist/utils.js"></script>
<script type="module">
  import { torch } from './node_modules/js-pytorch/dist/js-pytorch-browser.js';
  window.torch = torch;
</script>

3. Running it Yourself

Simple Autograd Example:

const { torch } = require("js-pytorch");

// Pass device as an argument to a Tensor or nn.Module (same as PyTorch):
const device = 'gpu';

// Instantiate Tensors:
let x = torch.randn([8, 4, 5]);
let w = torch.randn([8, 5, 4], true, device);
let b = torch.tensor([0.2, 0.5, 0.1, 0.0], true);

// Make calculations:
let out = torch.matmul(x, w);
out = torch.add(out, b);

// Compute gradients on whole graph:
out.backward();

// Get gradients from specific Tensors:
console.log(w.grad);
console.log(b.grad);

Complex Autograd Example (Transformer):

const { torch } = require("js-pytorch");
const nn = torch.nn;
const optim = torch.optim;
const device = 'gpu';

// Define training hyperparameters:
const vocab_size = 52;
const hidden_size = 32;
const n_timesteps = 16;
const n_heads = 4;
const dropout_p = 0;
const batch_size = 8;

// Create Transformer decoder Module:
class Transformer extends nn.Module {
  constructor(vocab_size, hidden_size, n_timesteps, n_heads, dropout_p, device) {
    super();
    // Instantiate Transformer's Layers:
    this.embed = new nn.Embedding(vocab_size, hidden_size);
    this.pos_embed = new nn.PositionalEmbedding(n_timesteps, hidden_size);
    this.b1 = new nn.Block(hidden_size, hidden_size, n_heads, n_timesteps, dropout_p, device);
    this.b2 = new nn.Block(hidden_size, hidden_size, n_heads, n_timesteps, dropout_p, device);
    this.ln = new nn.LayerNorm(hidden_size);
    this.linear = new nn.Linear(hidden_size, vocab_size, device);
  }

  forward(x) {
    let z;
    z = torch.add(this.embed.forward(x), this.pos_embed.forward(x));
    z = this.b1.forward(z);
    z = this.b2.forward(z);
    z = this.ln.forward(z);
    z = this.linear.forward(z);
    return z;
  }
}

// Instantiate your custom nn.Module:
const model = new Transformer(vocab_size, hidden_size, n_timesteps, n_heads, dropout_p, device);

// Define loss function and optimizer:
const loss_func = new nn.CrossEntropyLoss();
const optimizer = new optim.Adam(model.parameters(), (lr = 5e-3), (reg = 0));

// Instantiate sample input and output:
let x = torch.randint(0, vocab_size, [batch_size, n_timesteps, 1]);
let y = torch.randint(0, vocab_size, [batch_size, n_timesteps]);
let loss;

// Training Loop:
for (let i = 0; i < 40; i++) {
  // Forward pass through the Transformer:
  let z = model.forward(x);

  // Get loss:
  loss = loss_func.forward(z, y);

  // Backpropagate the loss using torch.tensor's backward() method:
  loss.backward();

  // Update the weights:
  optimizer.step();

  // Reset the gradients to zero after each training step:
  optimizer.zero_grad();

  // Print loss at every iteration:
  console.log(`Iter ${i} - Loss ${loss.data[0].toFixed(4)}`)
}

Saving and Loading models:

// Instantiate your model:
const model = new Transformer(vocab_size, hidden_size, n_timesteps, n_heads, dropout_p);

// Train the model:
trainModel(model);

// Save model to JSON file:
torch.save(model, 'model.json')

// To load, instantiate placeHolder using the original model's architecture:
const placeHolder = new Transformer(vocab_size, hidden_size, n_timesteps, n_heads, dropout_p);

// Load weights into placeHolder:
const newModel = torch.load(placeHolder, 'model.json')


4. Distribution & Devtools

5. Future Work