js pytorch
js-pytorch 0.7.2
Hinweis: Sie können das Paket lokal mit:
npm install js-pytorch
npm install js-pytorch .<head> Ihrer HTML -Datei ein: < script src =" https://cdnjs.cloudflare.com/ajax/libs/js-pytorch/0.7.2/js-pytorch-browser.js "
integrity =" sha512-l22t7GnqXvHBMCBvPUBdFO2TEYxnb1ziCGcDQcpTB2un16IPA4FE5SIZ8bUR+RwoDZGikQkWisO+fhnakXt9rg== "
crossorigin =" anonymous "
referrerpolicy =" no-referrer " > </ script ><script> in Ihrer HTML-Datei frei verwenden: < head >
< title > My Project </ title >
<!-- New script goes here -->
< script src =" https://cdnjs.cloudflare.com/ajax/libs/js-pytorch/0.7.2/js-pytorch-browser.js "
integrity =" sha512-l22t7GnqXvHBMCBvPUBdFO2TEYxnb1ziCGcDQcpTB2un16IPA4FE5SIZ8bUR+RwoDZGikQkWisO+fhnakXt9rg== "
crossorigin =" anonymous "
referrerpolicy =" no-referrer " >
</ script >
<!---->
</ head >
< body >
< script >
let x = torch . randn ( [ 10 , 5 ] )
let linear = new torch . nn . Linear ( 5 , 1 , 'gpu' , true )
let z = linear . forward ( x )
console . log ( z . data )
</ script >
</ body > // Require the Library if running in node (not necessary in the browser):
const { torch } = require ( "js-pytorch" ) ;
// Pass device as an argument to a Tensor or nn.Module (same as PyTorch):
const device = 'gpu' ;
// Instantiate Tensors:
let x = torch . randn ( [ 8 , 4 , 5 ] ) ;
let w = torch . randn ( [ 8 , 5 , 4 ] , true , device ) ;
let b = torch . tensor ( [ 0.2 , 0.5 , 0.1 , 0.0 ] , true ) ;
// Make calculations:
let out = torch . matmul ( x , w ) ;
out = torch . add ( out , b ) ;
// Compute gradients on whole graph:
out . backward ( ) ;
// Get gradients from specific Tensors:
console . log ( w . grad ) ;
console . log ( b . grad ) ; // Require the Library if running in node (not necessary in the browser):
const { torch } = require ( "js-pytorch" ) ;
const nn = torch . nn ;
const optim = torch . optim ;
const device = 'gpu' ;
// Define training hyperparameters:
const vocab_size = 52 ;
const hidden_size = 32 ;
const n_timesteps = 16 ;
const n_heads = 4 ;
const dropout_p = 0 ;
const batch_size = 8 ;
// Create Transformer decoder Module:
class Transformer extends nn . Module {
constructor ( vocab_size , hidden_size , n_timesteps , n_heads , dropout_p , device ) {
super ( ) ;
// Instantiate Transformer's Layers:
this . embed = new nn . Embedding ( vocab_size , hidden_size ) ;
this . pos_embed = new nn . PositionalEmbedding ( n_timesteps , hidden_size ) ;
this . b1 = new nn . Block ( hidden_size , hidden_size , n_heads , n_timesteps , dropout_p , device ) ;
this . b2 = new nn . Block ( hidden_size , hidden_size , n_heads , n_timesteps , dropout_p , device ) ;
this . ln = new nn . LayerNorm ( hidden_size ) ;
this . linear = new nn . Linear ( hidden_size , vocab_size , device ) ;
}
forward ( x ) {
let z ;
z = torch . add ( this . embed . forward ( x ) , this . pos_embed . forward ( x ) ) ;
z = this . b1 . forward ( z ) ;
z = this . b2 . forward ( z ) ;
z = this . ln . forward ( z ) ;
z = this . linear . forward ( z ) ;
return z ;
}
}
// Instantiate your custom nn.Module:
const model = new Transformer ( vocab_size , hidden_size , n_timesteps , n_heads , dropout_p , device ) ;
// Define loss function and optimizer:
const loss_func = new nn . CrossEntropyLoss ( ) ;
const optimizer = new optim . Adam ( model . parameters ( ) , ( lr = 5e-3 ) , ( reg = 0 ) ) ;
// Instantiate sample input and output:
let x = torch . randint ( 0 , vocab_size , [ batch_size , n_timesteps , 1 ] ) ;
let y = torch . randint ( 0 , vocab_size , [ batch_size , n_timesteps ] ) ;
let loss ;
// Training Loop:
for ( let i = 0 ; i < 40 ; i ++ ) {
// Forward pass through the Transformer:
let z = model . forward ( x ) ;
// Get loss:
loss = loss_func . forward ( z , y ) ;
// Backpropagate the loss using torch.tensor's backward() method:
loss . backward ( ) ;
// Update the weights:
optimizer . step ( ) ;
// Reset the gradients to zero after each training step:
optimizer . zero_grad ( ) ;
// Print loss at every iteration:
console . log ( `Iter ${ i } - Loss ${ loss . data [ 0 ] . toFixed ( 4 ) } ` )
} // Instantiate your model:
const model = new Transformer ( vocab_size , hidden_size , n_timesteps , n_heads , dropout_p ) ;
// Train the model:
trainModel ( model ) ;
// Save model to JSON file:
torch . save ( model , 'model.json' )
// To load, instantiate placeHolder using the original model's architecture:
const placeHolder = new Transformer ( vocab_size , hidden_size , n_timesteps , n_heads , dropout_p ) ;
// Load weights into placeHolder:
const newModel = torch . load ( placeHolder , 'model.json' ) npm run build ausführen. CJS- und ESM -Module und index.d.ts werden im dist/ Ordner ausgegeben.npm run lint ausgeführt wird.npm test aus.npm run prettier .tests/benchmarks/ Verzeichnissen enthalten. Führen Sie alle Benchmarks mit npm run bench aus und speichern Sie neue Benchmarks mit npm run bench:update . develop und können Sie auch gerne nachreichen. Ich werde versuchen, so schnell wie möglich zu antworten.