1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60
| import torch import torch.nn as nn import torch.optim as optim import torch.nn.functional as F from torchvision import datasets, transforms from torch.utils.data import DataLoader
class SoftmaxRegression(nn.Module): def __init__(self, num_input, num_output): super().__init__() self.flatten = nn.Flatten() self.linear1 = nn.Linear(num_input, 100) self.linear2 = nn.Linear(100, num_output) def forward(self, x): x = self.flatten(x) x = self.linear1(x) x = F.relu(x) x = self.linear2(x) return x def accuracy(data_iter, model): right_number = 0 number = 0 for X, y in data_iter: y_hat = model(X).argmax(dim=1) right_number += (y_hat == y).sum().item() number += len(y) return right_number / number
batch_size = 128 train_data = datasets.FashionMNIST(root='./data', transform=transforms.ToTensor(), train=True, download=False) test_data = datasets.FashionMNIST(root='./data', transform=transforms.ToTensor(), train=False, download=False) train_iter = DataLoader(dataset=train_data, shuffle=True, batch_size=batch_size, num_workers=4) test_iter = DataLoader(dataset=test_data, shuffle=False, batch_size=batch_size, num_workers=4)
num_input = 28*28 num_output = 10 loss_function = nn.CrossEntropyLoss() model = SoftmaxRegression(num_input, num_output)
lr = 0.01 optimizer = optim.SGD(model.parameters(), lr=lr)
num_epochs = 5 for epoch in range(num_epochs): for X, y in train_iter: y_hat = model(X) loss = loss_function(y_hat, y) optimizer.zero_grad() loss.backward() optimizer.step() with torch.no_grad(): acc = accuracy(test_iter, model) print(f'epoch {epoch + 1}, Test Accuracy {(acc*100):2f}%')
|