model.train()
for epoch_ in range(epoch):
optimizer.clear_grad()
for batch_idx, (data, label) in enumerate(train_loader):
# data = data.cuda()
# label = label.cuda()
# print(np.array(data).shape)
# optimizer.clear_grad()
outputs = model(data, label)
# print(outputs.shape)
# print(label.shape)
outputs, label = paddle.to_tensor(outputs), paddle.to_tensor(label)
loss = criterion(outputs, label)
loss.backward()
optimizer.step()
if batch_idx % 10 == 0:
print('Train Epoch: {} [{}/{} ({:.0f}%)]\tLoss: {:.6f}'.format(
epoch_, batch_idx * len(data), len(train_loader.dataset),
100. * batch_idx / len(train_loader), loss.item()))
def test(test_loader, model):
model.eval()
losses = []
for batch_idx, (data, label) in enumerate(test_loader):
outputs = model(data, label)
outputs, label = paddle.to_tensor(outputs), paddle.to_tensor(label)
loss = criterion(outputs, label)
loss.backward()
if batch_idx % 10 == 0:
print("Batch {}: loss {}".format(batch_idx, loss.item()))
losses.append(loss.item())
print('Test set: Average loss: {:.4f}"'.format(sum(losses) / len(losses)))```
How can I do that in this loop? Could you give more details in the document?