거의 알고리즘 일기장

optim & criterion 본문

pytorch 사용법

optim & criterion

건우권 2020. 9. 21. 16:31
Untitled1
In [3]:
%matplotlib inline
import matplotlib
import matplotlib.pyplot as plt

import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.utils.data import Dataset, DataLoader

import numpy as np

import torchvision 
import torchvision.transforms as transforms
In [4]:
transform = transforms.Compose([transforms.ToTensor(),
                                   transforms.Normalize((0.5,0.5,0.5), (0.5,0.5,0.5))])
In [5]:
trainset = torchvision.datasets.CIFAR10(root = './data',
                                          train = True,
                                          download = True,
                                          transform = transform)

testset = torchvision.datasets.CIFAR10(root = './data',
                                          train = False,
                                          download = True,
                                          transform = transform)
Files already downloaded and verified
Files already downloaded and verified
In [6]:
trainloader = DataLoader(trainset, batch_size=8, shuffle=True, num_workers=2)
testloader = DataLoader(testset, batch_size = 8, shuffle=False, num_workers=2)
In [22]:
class my_network(nn.Module):
    def __init__(self):
        super(my_network, self).__init__()
        self.conv1 = nn.Conv2d(3, 64, 5)
        self.conv2 = nn.Conv2d(64, 30, 5)
        self.fc1 = nn.Linear(30 *5 *5, 128)
        self.fc2 = nn.Linear(128, 10)
        
    def forward(self, x):
        x = F.relu(self.conv1(x), inplace = True)
        x = F.max_pool2d(x, (2, 2))                
        x = F.relu(self.conv2(x), inplace = True)
        x = F.max_pool2d(x, (2, 2))
        x = x.view(x.shape[0], -1)
        x = F.relu(self.fc1(x), inplace = True)
        x = F.relu(self.fc2(x), inplace = True)
        
        return x
In [23]:
my_net = my_network()
In [24]:
optim = torch.optim.SGD(my_net.parameters(), lr = 0.001, momentum = 0.9)
loss_function = nn.CrossEntropyLoss()
In [27]:
epoch_num = 3
for epoch in range(epoch_num):
    for i, data in enumerate(trainloader):
        inputs, labels = data
        inputs, labels = Variable(inputs), Variable(labels)
        
        optim.zero_grad()
        out = my_net(inputs)
        loss = loss_function(out, labels)
        loss.backward()
        optim.step()
        
        if i% 1000== 0:
            print("%d=> loss : %.3f" %(i, loss))

print("train over")
0=> loss : 1.638
1000=> loss : 1.478
2000=> loss : 1.137
3000=> loss : 1.093
4000=> loss : 1.025
5000=> loss : 1.535
6000=> loss : 1.248
0=> loss : 1.047
1000=> loss : 1.042
2000=> loss : 0.914
3000=> loss : 0.866
4000=> loss : 0.866
5000=> loss : 0.576
6000=> loss : 1.239
0=> loss : 0.816
1000=> loss : 1.040
2000=> loss : 0.919
3000=> loss : 0.889
4000=> loss : 1.042
5000=> loss : 1.440
6000=> loss : 0.789
train over
In [29]:
#test
total = 0
correct = 0
for data in testloader:
    images, labels = data
    outputs = my_net(Variable(images))
    _, predicted = torch.max(outputs.data, 1)
    total += labels.size(0)
    correct += (predicted == labels).sum()

print("Accuracy of the network on ther 10000 test images: %f" %(100*correct/total))
Accuracy of the network on ther 10000 test images: 66.000000
In [ ]:
 
반응형
Comments