autograd

Build Status

Provides differentiable operations and tensors.

Features

Installation

[dependencies] autograd = "0.9.0" mkl feature is enabled by default to speedup gemm operations.

Examples

Here we are computing partial derivatives of z = 2x^2 + 3y + 1.

```rust

extern crate autograd as ag;

let ref x = ag::placeholder(&[]); let ref y = ag::placeholder(&[]); let ref z = 2.xx + 3.*y + 1.;

// dz/dy let gy = &ag::grad(&[z], &[y])[0]; println!("{:?}", gy.eval(&[])); // => Some(3.)

// dz/dx (requires to fill the placeholder x) let gx = &ag::grad(&[z], &[x])[0]; println!("{:?}", gx.eval(&[(x, &ag::ndarray::arr0(2.).into_dyn())])); // => Some(8.)

// ddz/dx (differentiates z again) let ggx = &ag::grad(&[gx], &[x])[0]; println!("{:?}", ggx.eval(&[])); // => Some(4.) ```

Another example: softmax regression for MNIST digits classification with Adam.

```rust // This achieves 0.918 test accuracy after 3 epochs, 0.14 sec/epoch on 2.7GHz Intel Core i5

let ref w = ag::variable(ag::ndarrayext::glorotuniform::(&[2828, 10])); let ref b = ag::variable(ag::ndarray_ext::zeros::(&[1, 10])); let ref x = ag::placeholder(&[-1, 2828]); let ref y = ag::placeholder(&[-1]); let ref z = ag::matmul(x, w) + b; let ref loss = ag::reducemean(&ag::sparsesoftmaxcrossentropy(z, y), &[0, 1], false); let ref params = [w, b]; let ref grads = ag::grad(&[loss], params); let ref predictions = ag::argmax(z, -1, true); let ref accuracy = ag::reducemean(&ag::equal(predictions, y), &[0], false); let ref adam = ag::gradientdescentops::Adam::default(); let mut statefulparams = ag::gradientdescentops::Adam::varswithstates(params); let ref updateops = adam.computeupdates(&stateful_params, grads);

// -- dataset -- let ((xtrain, ytrain), (xtest, ytest)) = dataset::load();

// -- training loop -- for epoch in 0..maxepoch { ... ag::eval(updateops, &[(x, &xbatch), (y, &ybatch)]); }

``` For more, see documentation or examples