stared / livelossplot

Live training loss plot in Jupyter Notebook for Keras, PyTorch and others
https://p.migdal.pl/livelossplot
MIT License
1.29k stars 143 forks source link

Plotting to two sets of boxes instead of one #79

Closed jbh1128d1 closed 4 years ago

jbh1128d1 commented 4 years ago

Hello,

I copied the script from this GIT account and end up getting testing and training in two separate plots instead of together and the legend in both say 'training'. See script and output below to spot any mistakes I'm missing:

max_trn_batch =  1000
print_interval = 100

def train_model(model, criterion, optimizer, num_epochs=10):
    liveloss = PlotLosses()

    for epoch in range(num_epochs):
        logs = {}
        for phase in ['train', 'test']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for b, (image, label, policy, categorical_data) in enumerate(dataloaders[phase]):
                image = image.cuda()
                label = label.cuda()
                #numerical_data = numerical_data.cuda()
                categorical_data = categorical_data.cuda()

                outputs = model(image, categorical_data)
                loss = criterion(outputs, label)

                if phase == 'train':
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                _, preds = torch.max(outputs, 1)
                running_loss += loss.detach() * image.size(0)
                running_corrects += torch.sum(preds == label.data)

                #count batches
                b += 1
                #throttle the batches
                if b % print_interval == 0:
                    print(epoch, b)
                if b == max_trn_batch:
                    break

            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.float() / len(dataloaders[phase].dataset)

            prefix = ''
            if phase == 'test':
                prefix = 'test_'

            logs[prefix + 'log loss'] = epoch_loss.item()
            logs[prefix + 'accuracy'] = epoch_acc.item()

        liveloss.update(logs)
        liveloss.draw()

train_model(model = combined_model, criterion = criterion, optimizer = optimizer, num_epochs=500)

image

stared commented 4 years ago

Use val_ or in creating PlotLosses object: PlotLosses(series_fmt={'training': '{}', 'test':'test_{}'}).

jbh1128d1 commented 4 years ago

I did the above:

def train_model(model, criterion, optimizer, num_epochs=10):
    liveloss = PlotLosses(series_fmt={'training': '{}', 'test':'test_{}'})
    max_accuracy = 0
    for epoch in range(num_epochs):
        logs = {}
        for phase in ['train', 'test']:
            if phase == 'train':
                model.train()
            else:
                model.eval()

            running_loss = 0.0
            running_corrects = 0

            for b, (image, label, policy, categorical_data) in enumerate(dataloaders[phase]):
                image = image.cuda()
                label = label.cuda()
                #numerical_data = numerical_data.cuda()
                categorical_data = categorical_data.cuda()

                outputs = model(image, categorical_data)
                loss = criterion(outputs, label)

                if phase == 'train':
                    optimizer.zero_grad()
                    loss.backward()
                    optimizer.step()

                _, preds = torch.max(outputs, 1)
                running_loss += loss.detach() * image.size(0)
                running_corrects += torch.sum(preds == label.data)

                b += 1
                if b % print_interval == 0:
                    print(epoch, b)

                if b == max_trn_batch:
                    break

            epoch_loss = running_loss / len(dataloaders[phase].dataset)
            epoch_acc = running_corrects.float() / len(dataloaders[phase].dataset)

            prefix = ''
            if phase == 'test':
                prefix = 'test_'
                if epoch_acc > max_accuracy:
                    max_accuracy = epoch_acc
                    torch.save(combined_model.state_dict()
                               , 'D:\\CIS inspection images 0318\\self_build\\combined_model_1.pt') 
                #if es.step(epoch_acc):
                    #break

            else:
                prefix = 'train_'

            logs[prefix + 'log loss'] = epoch_loss.item()
            logs[prefix + 'accuracy'] = epoch_acc.item()
            print(logs)

        liveloss.update(logs)
        liveloss.draw()

    print(max_accuracy)
    scheduler.step(loss)

Still the same issue. Same with `val_'