Skip to content

Commit

Permalink
first commit
Browse files Browse the repository at this point in the history
  • Loading branch information
dragen1860 committed Jan 17, 2019
0 parents commit c0d13c5
Show file tree
Hide file tree
Showing 91 changed files with 3,492 additions and 0 deletions.
14 changes: 14 additions & 0 deletions Lesson1-初见PyTorch/autograd_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,14 @@
import torch
from torch import autograd


x = torch.tensor(1.)
a = torch.tensor(1., requires_grad=True)
b = torch.tensor(2., requires_grad=True)
c = torch.tensor(3., requires_grad=True)

y = a**2 * x + b * x + c

print('before:', a.grad, b.grad, c.grad)
grads = autograd.grad(y, [a, b, c])
print('after :', grads[0], grads[1], grads[2])
11 changes: 11 additions & 0 deletions Lesson1-初见PyTorch/demo.lua
Original file line number Diff line number Diff line change
@@ -0,0 +1,11 @@
torch.Tensor{1,2,3}

function gradUpdate(mlp,x,y,learningRate)
local criterion = nn.ClassNLLCriterion()
pred = mlp:forward(x)
local err = criterion:forward(pred, y);
mlp:zeroGradParameters();
local t = criterion:backward(pred, y);
mlp:backward(x, t);
mlp:updateParameters(learningRate);
end
Binary file added Lesson1-初见PyTorch/lesson1.pdf
Binary file not shown.
29 changes: 29 additions & 0 deletions Lesson1-初见PyTorch/lesson1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,29 @@
import torch
import time
print(torch.__version__)
print(torch.cuda.is_available())
# print('hello, world.')


a = torch.randn(10000, 1000)
b = torch.randn(1000, 2000)

t0 = time.time()
c = torch.matmul(a, b)
t1 = time.time()
print(a.device, t1 - t0, c.norm(2))

device = torch.device('cuda')
a = a.to(device)
b = b.to(device)

t0 = time.time()
c = torch.matmul(a, b)
t2 = time.time()
print(a.device, t2 - t0, c.norm(2))

t0 = time.time()
c = torch.matmul(a, b)
t2 = time.time()
print(a.device, t2 - t0, c.norm(2))

Binary file added Lesson2-开发环境准备/lesson2.pdf
Binary file not shown.
4 changes: 4 additions & 0 deletions Lesson2-开发环境准备/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,4 @@
import torch

print(torch.__version__)
print('gpu:', torch.cuda.is_available())
1 change: 1 addition & 0 deletions README.md
Original file line number Diff line number Diff line change
@@ -0,0 +1 @@
# DeepLearningTutorials
Binary file added lesson10-Broadcasting/Broadcasting.pdf
Binary file not shown.
Empty file.
Binary file added lesson11-Tensor合并与分割/Concat.pdf
Binary file not shown.
Empty file.
Binary file added lesson12-Tensor运算/math operation.pdf
Binary file not shown.
Empty file.
Binary file added lesson13-Tensor统计/statisics.pdf
Binary file not shown.
Empty file.
Binary file added lesson14-Tensor高阶/tensor advanced.pdf
Binary file not shown.
Empty file.
Binary file added lesson16-什么是梯度/16 梯度.pdf
Binary file not shown.
Binary file added lesson17-常见梯度/17 常见函数梯度.pdf
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file added lesson20-链式法则/20.pdf
Binary file not shown.
Binary file added lesson21-MLP反向传播推导/21.pdf
Binary file not shown.
Binary file added lesson22-优化小实例/22 Himmelblau.pdf
Binary file not shown.
41 changes: 41 additions & 0 deletions lesson22-优化小实例/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,41 @@
import numpy as np
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import pyplot as plt
import torch



def himmelblau(x):
return (x[0] ** 2 + x[1] - 11) ** 2 + (x[0] + x[1] ** 2 - 7) ** 2


x = np.arange(-6, 6, 0.1)
y = np.arange(-6, 6, 0.1)
print('x,y range:', x.shape, y.shape)
X, Y = np.meshgrid(x, y)
print('X,Y maps:', X.shape, Y.shape)
Z = himmelblau([X, Y])

fig = plt.figure('himmelblau')
ax = fig.gca(projection='3d')
ax.plot_surface(X, Y, Z)
ax.view_init(60, -30)
ax.set_xlabel('x')
ax.set_ylabel('y')
plt.show()


# [1., 0.], [-4, 0.], [4, 0.]
x = torch.tensor([-4., 0.], requires_grad=True)
optimizer = torch.optim.Adam([x], lr=1e-3)
for step in range(20000):

pred = himmelblau(x)

optimizer.zero_grad()
pred.backward()
optimizer.step()

if step % 2000 == 0:
print ('step {}: x = {}, f(x) = {}'
.format(step, x.tolist(), pred.item()))
Binary file added lesson24-Logistic Regression/24 LR.pdf
Binary file not shown.
Binary file added lesson25-交叉熵/25 交叉熵.pdf
Binary file not shown.
Binary file added lesson26-LR多分类实战/26.pdf
Binary file not shown.
86 changes: 86 additions & 0 deletions lesson26-LR多分类实战/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,86 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms


batch_size=200
learning_rate=0.01
epochs=10

train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)



w1, b1 = torch.randn(200, 784, requires_grad=True),\
torch.zeros(200, requires_grad=True)
w2, b2 = torch.randn(200, 200, requires_grad=True),\
torch.zeros(200, requires_grad=True)
w3, b3 = torch.randn(10, 200, requires_grad=True),\
torch.zeros(10, requires_grad=True)

torch.nn.init.kaiming_normal_(w1)
torch.nn.init.kaiming_normal_(w2)
torch.nn.init.kaiming_normal_(w3)


def forward(x):
x = x@w1.t() + b1
x = F.relu(x)
x = x@w2.t() + b2
x = F.relu(x)
x = x@w3.t() + b3
x = F.relu(x)
return x



optimizer = optim.SGD([w1, b1, w2, b2, w3, b3], lr=learning_rate)
criteon = nn.CrossEntropyLoss()

for epoch in range(epochs):

for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(-1, 28*28)

logits = forward(data)
loss = criteon(logits, target)

optimizer.zero_grad()
loss.backward()
# print(w1.grad.norm(), w2.grad.norm())
optimizer.step()

if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))


test_loss = 0
correct = 0
for data, target in test_loader:
data = data.view(-1, 28 * 28)
logits = forward(data)
test_loss += criteon(logits, target).item()

pred = logits.data.max(1)[1]
correct += pred.eq(target.data).sum()

test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
Binary file added lesson27-MLP网络层/27 全连接层.pdf
Binary file not shown.
83 changes: 83 additions & 0 deletions lesson27-MLP网络层/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,83 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms


batch_size=200
learning_rate=0.01
epochs=10

train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)



class MLP(nn.Module):

def __init__(self):
super(MLP, self).__init__()

self.model = nn.Sequential(
nn.Linear(784, 200),
nn.ReLU(inplace=True),
nn.Linear(200, 200),
nn.ReLU(inplace=True),
nn.Linear(200, 10),
nn.ReLU(inplace=True),
)

def forward(self, x):
x = self.model(x)

return x

net = MLP()
optimizer = optim.SGD(net.parameters(), lr=learning_rate)
criteon = nn.CrossEntropyLoss()

for epoch in range(epochs):

for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(-1, 28*28)

logits = net(data)
loss = criteon(logits, target)

optimizer.zero_grad()
loss.backward()
# print(w1.grad.norm(), w2.grad.norm())
optimizer.step()

if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))


test_loss = 0
correct = 0
for data, target in test_loader:
data = data.view(-1, 28 * 28)
logits = net(data)
test_loss += criteon(logits, target).item()

pred = logits.data.max(1)[1]
correct += pred.eq(target.data).sum()

test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
Binary file added lesson28-激活函数与GPU加速/28.pdf
Binary file not shown.
89 changes: 89 additions & 0 deletions lesson28-激活函数与GPU加速/main.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,89 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms


batch_size=200
learning_rate=0.01
epochs=10

train_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=True, download=True,
transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=batch_size, shuffle=True)






class MLP(nn.Module):

def __init__(self):
super(MLP, self).__init__()

self.model = nn.Sequential(
nn.Linear(784, 200),
nn.LeakyReLU(inplace=True),
nn.Linear(200, 200),
nn.LeakyReLU(inplace=True),
nn.Linear(200, 10),
nn.LeakyReLU(inplace=True),
)

def forward(self, x):
x = self.model(x)

return x

device = torch.device('cuda:0')
net = MLP().to(device)
optimizer = optim.SGD(net.parameters(), lr=learning_rate)
criteon = nn.CrossEntropyLoss().to(device)

for epoch in range(epochs):

for batch_idx, (data, target) in enumerate(train_loader):
data = data.view(-1, 28*28)
data, target = data.to(device), target.cuda()

logits = net(data)
loss = criteon(logits, target)

optimizer.zero_grad()
loss.backward()
# print(w1.grad.norm(), w2.grad.norm())
optimizer.step()

if batch_idx % 100 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))


test_loss = 0
correct = 0
for data, target in test_loader:
data = data.view(-1, 28 * 28)
data, target = data.to(device), target.cuda()
logits = net(data)
test_loss += criteon(logits, target).item()

pred = logits.data.max(1)[1]
correct += pred.eq(target.data).sum()

test_loss /= len(test_loader.dataset)
print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))
Binary file added lesson29-MNIST测试/29.pdf
Binary file not shown.
Loading

0 comments on commit c0d13c5

Please sign in to comment.