boncey / Flickr4Java

Java API For Flickr. Fork of FlickrJ
BSD 2-Clause "Simplified" License
176 stars 155 forks source link

How to use trained mnist to my own data? #654

Closed Nancyshehata closed 2 years ago

Nancyshehata commented 2 years ago

Hello everyone, I have run successfully the mnist dataset on the Pytorch, and now the question is how to use this trained algorithm in my own data? please let me know?

Thank you in advance

import torch

Device configuration

device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') device from torchvision import datasets from torchvision.transforms import ToTensor train_data = datasets.MNIST( root = 'data', train = True, transform = ToTensor(), download = True, ) test_data = datasets.MNIST( root = 'data', train = False, transform = ToTensor() )

print(train_data)

print(train_data.data.size())

print(train_data.targets.size())

from torch.utils.data import DataLoader loaders = { 'train' : torch.utils.data.DataLoader(train_data, batch_size=100, shuffle=True, num_workers=1),

'test'  : torch.utils.data.DataLoader(test_data, 
                                      batch_size=100, 
                                      shuffle=True, 
                                      num_workers=1),

} loaders import torch.nn as nn class CNN(nn.Module): def init(self): super(CNN, self).init() self.conv1 = nn.Sequential(
nn.Conv2d( in_channels=1,
out_channels=16,
kernel_size=5,
stride=1,
padding=2,
),
nn.ReLU(),
nn.MaxPool2d(kernel_size=2),
) self.conv2 = nn.Sequential(
nn.Conv2d(16, 32, 5, 1, 2),
nn.ReLU(),
nn.MaxPool2d(2),
)

fully connected layer, output 10 classes

    self.out = nn.Linear(32 * 7 * 7, 10)
def forward(self, x):
    x = self.conv1(x)
    x = self.conv2(x)
    # flatten the output of conv2 to (batch_size, 32 * 7 * 7)
    x = x.view(x.size(0), -1)       
    output = self.out(x)
    return output, x    

cnn = CNN() print(cnn)

loss_func = nn.CrossEntropyLoss()
loss_func

from torch import optim optimizer = optim.Adam(cnn.parameters(), lr = 0.01)
optimizer

from torch.autograd import Variable num_epochs = 10 def train(num_epochs, cnn, loaders):

cnn.train()

# Train the model
total_step = len(loaders['train'])

for epoch in range(num_epochs):
    for i, (images, labels) in enumerate(loaders['train']):

        # gives batch data, normalize x when iterate train_loader
        b_x = Variable(images)   # batch x
        b_y = Variable(labels)   # batch y
        output = cnn(b_x)[0]               
        loss = loss_func(output, b_y)

        optimizer.zero_grad()           

        loss.backward()    

        optimizer.step()                

        if (i+1) % 100 == 0:
            print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}' 
                   .format(epoch + 1, num_epochs, i + 1, total_step, loss.item()))

train(num_epochs, cnn, loaders) def test(): cnn.eval() with torch.no_grad(): correct = 0 total = 0 for images, labels in loaders['test']: test_output, last_layer = cnn(images) pred_y = torch.max(test_output, 1)[1].data.squeeze() accuracy = (pred_y == labels).sum().item() / float(labels.size(0)) print('Test Accuracy of the model on the 10000 test images: %.2f' % accuracy)

test() sample = next(iter(loaders['test'])) imgs, lbls = sample actual_number = lbls[:10].numpy() actual_number test_output, last_layer = cnn(imgs[:10]) pred_y = torch.max(test_output, 1)[1].data.numpy().squeeze() print(f'Prediction number: {pred_y}') print(f'Actual number: {actual_number}')