Skip to content

Commit 73792cb

Browse files
committed
Build #52
1 parent ebb71ba commit 73792cb

18 files changed

+380
-374
lines changed

_downloads/dynamic_net.ipynb

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
11
{
2-
"metadata": {
3-
"language_info": {
4-
"nbconvert_exporter": "python",
5-
"mimetype": "text/x-python",
6-
"version": "3.5.2",
7-
"codemirror_mode": {
8-
"version": 3,
9-
"name": "ipython"
10-
},
11-
"name": "python",
12-
"pygments_lexer": "ipython3",
13-
"file_extension": ".py"
14-
},
15-
"kernelspec": {
16-
"language": "python",
17-
"display_name": "Python 3",
18-
"name": "python3"
19-
}
20-
},
21-
"nbformat_minor": 0,
22-
"nbformat": 4,
232
"cells": [
243
{
4+
"outputs": [],
255
"metadata": {
266
"collapsed": false
277
},
28-
"outputs": [],
29-
"cell_type": "code",
308
"execution_count": null,
319
"source": [
3210
"%matplotlib inline"
33-
]
11+
],
12+
"cell_type": "code"
3413
},
3514
{
3615
"metadata": {},
37-
"cell_type": "markdown",
3816
"source": [
3917
"\nPyTorch: Control Flow + Weight Sharing\n--------------------------------------\n\nTo showcase the power of PyTorch dynamic graphs, we will implement a very strange\nmodel: a fully-connected ReLU network that on each forward pass randomly chooses\na number between 1 and 4 and has that many hidden layers, reusing the same\nweights multiple times to compute the innermost hidden layers.\n\n"
40-
]
18+
],
19+
"cell_type": "markdown"
4120
},
4221
{
22+
"outputs": [],
4323
"metadata": {
4424
"collapsed": false
4525
},
46-
"outputs": [],
47-
"cell_type": "code",
4826
"execution_count": null,
4927
"source": [
5028
"import random\nimport torch\nfrom torch.autograd import Variable\n\nclass DynamicNet(torch.nn.Module):\n def __init__(self, D_in, H, D_out):\n \"\"\"\n In the constructor we construct three nn.Linear instances that we will use\n in the forward pass.\n \"\"\"\n super(DynamicNet, self).__init__()\n self.input_linear = torch.nn.Linear(D_in, H)\n self.middle_linear = torch.nn.Linear(H, H)\n self.output_linear = torch.nn.Linear(H, D_out)\n\n def forward(self, x):\n \"\"\"\n For the forward pass of the model, we randomly choose either 0, 1, 2, or 3\n and reuse the middle_linear Module that many times to compute hidden layer\n representations.\n\n Since each forward pass builds a dynamic computation graph, we can use normal\n Python control-flow operators like loops or conditional statements when\n defining the forward pass of the model.\n\n Here we also see that it is perfectly safe to reuse the same Module many\n times when defining a computational graph. This is a big improvement from Lua\n Torch, where each Module could be used only once.\n \"\"\"\n h_relu = self.input_linear(x).clamp(min=0)\n for _ in range(random.randint(0, 3)):\n h_relu = self.middle_linear(h_relu).clamp(min=0)\n y_pred = self.output_linear(h_relu)\n return y_pred\n\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold inputs and outputs, and wrap them in Variables\nx = Variable(torch.randn(N, D_in))\ny = Variable(torch.randn(N, D_out), requires_grad=False)\n\n# Construct our model by instantiating the class defined above\nmodel = DynamicNet(D_in, H, D_out)\n\n# Construct our loss function and an Optimizer. Training this strange model with\n# vanilla stochastic gradient descent is tough, so we use momentum\ncriterion = torch.nn.MSELoss(size_average=False)\noptimizer = torch.optim.SGD(model.parameters(), lr=1e-4, momentum=0.9)\nfor t in range(500):\n # Forward pass: Compute predicted y by passing x to the model\n y_pred = model(x)\n\n # Compute and print loss\n loss = criterion(y_pred, y)\n print(t, loss.data[0])\n\n # Zero gradients, perform a backward pass, and update the weights.\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()"
51-
]
29+
],
30+
"cell_type": "code"
31+
}
32+
],
33+
"nbformat_minor": 0,
34+
"metadata": {
35+
"kernelspec": {
36+
"language": "python",
37+
"name": "python3",
38+
"display_name": "Python 3"
39+
},
40+
"language_info": {
41+
"nbconvert_exporter": "python",
42+
"name": "python",
43+
"pygments_lexer": "ipython3",
44+
"mimetype": "text/x-python",
45+
"version": "3.5.2",
46+
"file_extension": ".py",
47+
"codemirror_mode": {
48+
"name": "ipython",
49+
"version": 3
50+
}
5251
}
53-
]
52+
},
53+
"nbformat": 4
5454
}

_downloads/reinforcement_q_learning.ipynb

Lines changed: 60 additions & 60 deletions
Large diffs are not rendered by default.

_downloads/tf_two_layer_net.ipynb

Lines changed: 30 additions & 30 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
11
{
2-
"metadata": {
3-
"language_info": {
4-
"nbconvert_exporter": "python",
5-
"mimetype": "text/x-python",
6-
"version": "3.5.2",
7-
"codemirror_mode": {
8-
"version": 3,
9-
"name": "ipython"
10-
},
11-
"name": "python",
12-
"pygments_lexer": "ipython3",
13-
"file_extension": ".py"
14-
},
15-
"kernelspec": {
16-
"language": "python",
17-
"display_name": "Python 3",
18-
"name": "python3"
19-
}
20-
},
21-
"nbformat_minor": 0,
22-
"nbformat": 4,
232
"cells": [
243
{
4+
"outputs": [],
255
"metadata": {
266
"collapsed": false
277
},
28-
"outputs": [],
29-
"cell_type": "code",
308
"execution_count": null,
319
"source": [
3210
"%matplotlib inline"
33-
]
11+
],
12+
"cell_type": "code"
3413
},
3514
{
3615
"metadata": {},
37-
"cell_type": "markdown",
3816
"source": [
3917
"\nTensorFlow: Static Graphs\n-------------------------\n\nA fully-connected ReLU network with one hidden layer and no biases, trained to\npredict y from x by minimizing squared Euclidean distance.\n\nThis implementation uses basic TensorFlow operations to set up a computational\ngraph, then executes the graph many times to actually train the network.\n\nOne of the main differences between TensorFlow and PyTorch is that TensorFlow\nuses static computational graphs while PyTorch uses dynamic computational\ngraphs.\n\nIn TensorFlow we first set up the computational graph, then execute the same\ngraph many times.\n\n"
40-
]
18+
],
19+
"cell_type": "markdown"
4120
},
4221
{
22+
"outputs": [],
4323
"metadata": {
4424
"collapsed": false
4525
},
46-
"outputs": [],
47-
"cell_type": "code",
4826
"execution_count": null,
4927
"source": [
5028
"import tensorflow as tf\nimport numpy as np\n\n# First we set up the computational graph:\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create placeholders for the input and target data; these will be filled\n# with real data when we execute the graph.\nx = tf.placeholder(tf.float32, shape=(None, D_in))\ny = tf.placeholder(tf.float32, shape=(None, D_out))\n\n# Create Variables for the weights and initialize them with random data.\n# A TensorFlow Variable persists its value across executions of the graph.\nw1 = tf.Variable(tf.random_normal((D_in, H)))\nw2 = tf.Variable(tf.random_normal((H, D_out)))\n\n# Forward pass: Compute the predicted y using operations on TensorFlow Tensors.\n# Note that this code does not actually perform any numeric operations; it\n# merely sets up the computational graph that we will later execute.\nh = tf.matmul(x, w1)\nh_relu = tf.maximum(h, tf.zeros(1))\ny_pred = tf.matmul(h_relu, w2)\n\n# Compute loss using operations on TensorFlow Tensors\nloss = tf.reduce_sum((y - y_pred) ** 2.0)\n\n# Compute gradient of the loss with respect to w1 and w2.\ngrad_w1, grad_w2 = tf.gradients(loss, [w1, w2])\n\n# Update the weights using gradient descent. To actually update the weights\n# we need to evaluate new_w1 and new_w2 when executing the graph. Note that\n# in TensorFlow the the act of updating the value of the weights is part of\n# the computational graph; in PyTorch this happens outside the computational\n# graph.\nlearning_rate = 1e-6\nnew_w1 = w1.assign(w1 - learning_rate * grad_w1)\nnew_w2 = w2.assign(w2 - learning_rate * grad_w2)\n\n# Now we have built our computational graph, so we enter a TensorFlow session to\n# actually execute the graph.\nwith tf.Session() as sess:\n # Run the graph once to initialize the Variables w1 and w2.\n sess.run(tf.global_variables_initializer())\n\n # Create numpy arrays holding the actual data for the inputs x and targets y\n x_value = np.random.randn(N, D_in)\n y_value = np.random.randn(N, D_out)\n for _ in range(500):\n # Execute the graph many times. Each time it executes we want to bind\n # x_value to x and y_value to y, specified with the feed_dict argument.\n # Each time we execute the graph we want to compute the values for loss,\n # new_w1, and new_w2; the values of these Tensors are returned as numpy\n # arrays.\n loss_value, _, _ = sess.run([loss, new_w1, new_w2],\n feed_dict={x: x_value, y: y_value})\n print(loss_value)"
51-
]
29+
],
30+
"cell_type": "code"
31+
}
32+
],
33+
"nbformat_minor": 0,
34+
"metadata": {
35+
"kernelspec": {
36+
"language": "python",
37+
"name": "python3",
38+
"display_name": "Python 3"
39+
},
40+
"language_info": {
41+
"nbconvert_exporter": "python",
42+
"name": "python",
43+
"pygments_lexer": "ipython3",
44+
"mimetype": "text/x-python",
45+
"version": "3.5.2",
46+
"file_extension": ".py",
47+
"codemirror_mode": {
48+
"name": "ipython",
49+
"version": 3
50+
}
5251
}
53-
]
52+
},
53+
"nbformat": 4
5454
}
Lines changed: 31 additions & 31 deletions
Original file line numberDiff line numberDiff line change
@@ -1,54 +1,54 @@
11
{
2-
"metadata": {
3-
"language_info": {
4-
"nbconvert_exporter": "python",
5-
"mimetype": "text/x-python",
6-
"version": "3.5.2",
7-
"codemirror_mode": {
8-
"version": 3,
9-
"name": "ipython"
10-
},
11-
"name": "python",
12-
"pygments_lexer": "ipython3",
13-
"file_extension": ".py"
14-
},
15-
"kernelspec": {
16-
"language": "python",
17-
"display_name": "Python 3",
18-
"name": "python3"
19-
}
20-
},
21-
"nbformat_minor": 0,
22-
"nbformat": 4,
232
"cells": [
243
{
4+
"outputs": [],
255
"metadata": {
266
"collapsed": false
277
},
28-
"outputs": [],
29-
"cell_type": "code",
308
"execution_count": null,
319
"source": [
3210
"%matplotlib inline"
33-
]
11+
],
12+
"cell_type": "code"
3413
},
3514
{
3615
"metadata": {},
37-
"cell_type": "markdown",
3816
"source": [
3917
"\nPyTorch: Variables and autograd\n-------------------------------\n\nA fully-connected ReLU network with one hidden layer and no biases, trained to\npredict y from x by minimizing squared Euclidean distance.\n\nThis implementation computes the forward pass using operations on PyTorch\nVariables, and uses PyTorch autograd to compute gradients.\n\nA PyTorch Variable is a wrapper around a PyTorch Tensor, and represents a node\nin a computational graph. If x is a Variable then x.data is a Tensor giving its\nvalue, and x.grad is another Variable holding the gradient of x with respect to\nsome scalar value.\n\nPyTorch Variables have the same API as PyTorch tensors: (almost) any operation\nyou can do on a Tensor you can also do on a Variable; the difference is that\nautograd allows you to automatically compute gradients.\n\n"
40-
]
18+
],
19+
"cell_type": "markdown"
4120
},
4221
{
22+
"outputs": [],
4323
"metadata": {
4424
"collapsed": false
4525
},
46-
"outputs": [],
47-
"cell_type": "code",
4826
"execution_count": null,
4927
"source": [
50-
"import torch\nfrom torch.autograd import Variable\n\ndtype = torch.FloatTensor\n# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold input and outputs, and wrap them in Variables.\n# Setting requires_grad=False indicates that we do not need to compute gradients\n# with respect to these Variables during the backward pass.\nx = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)\ny = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)\n\n# Create random Tensors for weights, and wrap them in Variables.\n# Setting requires_grad=True indicates that we want to compute gradients with\n# respect to these Variables during the backward pass.\nw1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)\nw2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(500):\n # Forward pass: compute predicted y using operations on Variables; these\n # are exactly the same operations we used to compute the forward pass using\n # Tensors, but we do not need to keep references to intermediate values since\n # we are not implementing the backward pass by hand.\n y_pred = x.mm(w1).clamp(min=0).mm(w2)\n \n # Compute and print loss using operations on Variables.\n # Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape\n # (1,); loss.data[0] is a scalar value holding the loss.\n loss = (y_pred - y).pow(2).sum()\n print(t, loss.data[0])\n \n # Manually zero the gradients before running the backward pass\n w1.grad.data.zero_()\n w2.grad.data.zero_()\n\n # Use autograd to compute the backward pass. This call will compute the\n # gradient of loss with respect to all Variables with requires_grad=True.\n # After this call w1.grad and w2.grad will be Variables holding the gradient\n # of the loss with respect to w1 and w2 respectively.\n loss.backward()\n\n # Update weights using gradient descent; w1.data and w2.data are Tensors,\n # w1.grad and w2.grad are Variables and w1.grad.data and w2.grad.data are\n # Tensors.\n w1.data -= learning_rate * w1.grad.data\n w2.data -= learning_rate * w2.grad.data"
51-
]
28+
"import torch\nfrom torch.autograd import Variable\n\ndtype = torch.FloatTensor\n# dtype = torch.cuda.FloatTensor # Uncomment this to run on GPU\n\n# N is batch size; D_in is input dimension;\n# H is hidden dimension; D_out is output dimension.\nN, D_in, H, D_out = 64, 1000, 100, 10\n\n# Create random Tensors to hold input and outputs, and wrap them in Variables.\n# Setting requires_grad=False indicates that we do not need to compute gradients\n# with respect to these Variables during the backward pass.\nx = Variable(torch.randn(N, D_in).type(dtype), requires_grad=False)\ny = Variable(torch.randn(N, D_out).type(dtype), requires_grad=False)\n\n# Create random Tensors for weights, and wrap them in Variables.\n# Setting requires_grad=True indicates that we want to compute gradients with\n# respect to these Variables during the backward pass.\nw1 = Variable(torch.randn(D_in, H).type(dtype), requires_grad=True)\nw2 = Variable(torch.randn(H, D_out).type(dtype), requires_grad=True)\n\nlearning_rate = 1e-6\nfor t in range(500):\n # Forward pass: compute predicted y using operations on Variables; these\n # are exactly the same operations we used to compute the forward pass using\n # Tensors, but we do not need to keep references to intermediate values since\n # we are not implementing the backward pass by hand.\n y_pred = x.mm(w1).clamp(min=0).mm(w2)\n\n # Compute and print loss using operations on Variables.\n # Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape\n # (1,); loss.data[0] is a scalar value holding the loss.\n loss = (y_pred - y).pow(2).sum()\n print(t, loss.data[0])\n\n # Use autograd to compute the backward pass. This call will compute the\n # gradient of loss with respect to all Variables with requires_grad=True.\n # After this call w1.grad and w2.grad will be Variables holding the gradient\n # of the loss with respect to w1 and w2 respectively.\n loss.backward()\n\n # Update weights using gradient descent; w1.data and w2.data are Tensors,\n # w1.grad and w2.grad are Variables and w1.grad.data and w2.grad.data are\n # Tensors.\n w1.data -= learning_rate * w1.grad.data\n w2.data -= learning_rate * w2.grad.data\n\n # Manually zero the gradients after updating weights\n w1.grad.data.zero_()\n w2.grad.data.zero_()"
29+
],
30+
"cell_type": "code"
31+
}
32+
],
33+
"nbformat_minor": 0,
34+
"metadata": {
35+
"kernelspec": {
36+
"language": "python",
37+
"name": "python3",
38+
"display_name": "Python 3"
39+
},
40+
"language_info": {
41+
"nbconvert_exporter": "python",
42+
"name": "python",
43+
"pygments_lexer": "ipython3",
44+
"mimetype": "text/x-python",
45+
"version": "3.5.2",
46+
"file_extension": ".py",
47+
"codemirror_mode": {
48+
"name": "ipython",
49+
"version": 3
50+
}
5251
}
53-
]
52+
},
53+
"nbformat": 4
5454
}

_downloads/two_layer_net_autograd.py

Lines changed: 6 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -46,16 +46,12 @@
4646
# Tensors, but we do not need to keep references to intermediate values since
4747
# we are not implementing the backward pass by hand.
4848
y_pred = x.mm(w1).clamp(min=0).mm(w2)
49-
49+
5050
# Compute and print loss using operations on Variables.
5151
# Now loss is a Variable of shape (1,) and loss.data is a Tensor of shape
5252
# (1,); loss.data[0] is a scalar value holding the loss.
5353
loss = (y_pred - y).pow(2).sum()
5454
print(t, loss.data[0])
55-
56-
# Manually zero the gradients before running the backward pass
57-
w1.grad.data.zero_()
58-
w2.grad.data.zero_()
5955

6056
# Use autograd to compute the backward pass. This call will compute the
6157
# gradient of loss with respect to all Variables with requires_grad=True.
@@ -68,3 +64,8 @@
6864
# Tensors.
6965
w1.data -= learning_rate * w1.grad.data
7066
w2.data -= learning_rate * w2.grad.data
67+
68+
# Manually zero the gradients after updating weights
69+
w1.grad.data.zero_()
70+
w2.grad.data.zero_()
71+

0 commit comments

Comments
 (0)