Skip to content

Commit

Permalink
Initial commit
Browse files Browse the repository at this point in the history
  • Loading branch information
github-classroom[bot] authored Oct 10, 2024
0 parents commit c1bcec1
Show file tree
Hide file tree
Showing 6 changed files with 241 additions and 0 deletions.
33 changes: 33 additions & 0 deletions part1/classify.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,33 @@
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # noqa

import argparse
import tensorflow as tf
import imageio
import numpy as np


def load_image(path):
if not os.path.exists(path):
print(f'File {args.input} does not exist!')
exit(1)

img = np.array(imageio.imread(path)).reshape((1, 28, 28)) / 255.0
return tf.convert_to_tensor(img, tf.float32)


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True)
parser.add_argument('--model-path', type=str,
required=False, default='/tmp/mnist_model/')
args = parser.parse_args()

print('Loading model from:', args.model_path)
model = tf.saved_model.load(args.model_path)

print('Loading image from:', args.input)
img = load_image(args.input)

label = np.argmax(model(img))
print('\nPrediction:', label)
Binary file added part1/images.tar.gz
Binary file not shown.
54 changes: 54 additions & 0 deletions part1/pt_classify.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,54 @@
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision.io import read_image
import sys
import argparse

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)

def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True)
parser.add_argument('--model-path', type=str,
required=False, default='mnist_cnn.pt')
args = parser.parse_args()

print('Loading model from:', args.model_path)
model = torch.load( args.model_path)

print('Loading image from:', args.input)

device = torch.device("cpu")
img = read_image(args.input)
img = img[None]
img = img.type('torch.FloatTensor')

output = model(img)
prediction = torch.argmax(output)
print('\nPrediction:', prediction)
103 changes: 103 additions & 0 deletions part1/pt_train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,103 @@
from __future__ import print_function
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import StepLR

# The following code is from: https://github.com/pytorch/examples/tree/main/mnist

class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 32, 3, 1)
self.conv2 = nn.Conv2d(32, 64, 3, 1)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
self.fc1 = nn.Linear(9216, 128)
self.fc2 = nn.Linear(128, 10)

def forward(self, x):
x = self.conv1(x)
x = F.relu(x)
x = self.conv2(x)
x = F.relu(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
output = F.log_softmax(x, dim=1)
return output


def train(model, device, train_loader, optimizer, epoch):
model.train()
for batch_idx, (data, target) in enumerate(train_loader):
data, target = data.to(device), target.to(device)
optimizer.zero_grad()
output = model(data)
loss = F.nll_loss(output, target)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))


def test(model, device, test_loader):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()

test_loss /= len(test_loader.dataset)

print('\nTest set: Average loss: {:.4f}, Accuracy: {}/{} ({:.0f}%)\n'.format(
test_loss, correct, len(test_loader.dataset),
100. * correct / len(test_loader.dataset)))


def main():

device = "cpu"

train_kwargs = {'batch_size': 64}
test_kwargs = {'batch_size': 1000}

transform=transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])
dataset1 = datasets.MNIST('../data', train=True, download=True,
transform=transform)
dataset2 = datasets.MNIST('../data', train=False,
transform=transform)
train_loader = torch.utils.data.DataLoader(dataset1,**train_kwargs)
test_loader = torch.utils.data.DataLoader(dataset2, **test_kwargs)

model = Net().to(device)
optimizer = optim.Adadelta(model.parameters(), lr=1.0)

#scheduler = StepLR(optimizer, step_size=1, gamma=0.7)
#for epoch in range(1, args.epochs + 1):
train(model, device, train_loader, optimizer, 1)
#test(model, device, test_loader)
#scheduler.step()

torch.save(model, "mnist_cnn.pt")


if __name__ == '__main__':
main()
51 changes: 51 additions & 0 deletions part1/train.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,51 @@
import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3' # noqa

import argparse
import tensorflow as tf


def load_mnist_data():
mnist = tf.keras.datasets.mnist

(x_train, y_train), (x_test, y_test) = mnist.load_data()
x_train, x_test = x_train / 255.0, x_test / 255.0

return (x_train, y_train), (x_test, y_test)


def train_mnist_model(epochs):
(x_train, y_train), (x_test, y_test) = load_mnist_data()

model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(10)
])

predictions = model(x_train[:1]).numpy()

loss_fn = tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
loss_fn(y_train[:1], predictions).numpy()

model.compile(optimizer='adam', loss=loss_fn, metrics=['accuracy'])

model.fit(x_train, y_train, epochs=epochs)
model.evaluate(x_test, y_test, verbose=2)

return model


if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--model-path', type=str,
required=False, default='/tmp/mnist_model/')
parser.add_argument('--epochs', type=int, required=False, default=2)
args = parser.parse_args()

print('Training MNIST model')
model = train_mnist_model(epochs=args.epochs)

print('Saving model to', args.model_path)
tf.saved_model.save(model, args.model_path)
Binary file added part2/titles.tar.gz
Binary file not shown.

0 comments on commit c1bcec1

Please sign in to comment.