This library provides differentiable operations and tensors. The current backend is rust-ndarray.
Here we are computing partial derivatives of z = 2x^2 + 3y + 1
.
```rust
extern crate ndarray; extern crate autograd as ag;
let ref x = ag::placeholder(&[]); let ref y = ag::placeholder(&[]); let ref z = 2xx + 3*y + 1;
// dz/dy let ref g1 = ag::grad(&[z], &[y])[0];
// dz/dx let ref g2 = ag::grad(&[z], &[x])[0];
// ddz/dx (differentiates z
again)
let ref gg = ag::grad(&[g2], &[x])[0];
// evaluation of symbolic gradients let mut ctx = ag::Context::new(); println!("{}", g1.eval(&mut ctx)); // => 3. println!("{}", gg.eval(&mut ctx)); // => 4.
// dz/dx requires to fill the placeholder x
ag::feed_input(x, ndarray::arr0(2.), &mut ctx);
println!("{}", g2.eval(&mut ctx)); // => 8.
```
Another example: multi layer perceptron for MNIST digits classification.
```rust // -- graph def -- let mut ctx = ag::Context::new(); let ref x = ag::placeholder(&[-1, 2828]); let ref y = ag::placeholder(&[-1]); let ref w = ag::variable(ag::ndarray_ext::glorot_uniform(&[2828, 10]), &mut ctx); let ref b = ag::variable(ag::ndarrayext::zeros(&[1, 10]), &mut ctx); let ref z = ag::matmul(x, w) + b; let ref loss = ag::reducemean(&ag::sparsesoftmaxcrossentropy(z, y), &[0], false); let ref grads = ag::grad(loss, &[w, b]); let ref predictions = ag::argmax(z, -1, true); let ref accuracy = ag::reducemean(&ag::equal(predictions, y), &[0], false);
// -- dataset -- let ((xtrain, ytrain), (xtest, ytest)) = dataset::load();
// -- training method -- let mut optimizer = ag::gradient_descent::SGD { lr: 0.01 };
// -- training loop -- for epoch in 0..max_epoch { ... }
``` For more, see examples or tests.
Available ops are listed here.