torch / cunn

Other
215 stars 174 forks source link

get NAN when training with multiple GPU #396

Open foelin opened 7 years ago

foelin commented 7 years ago

Hi All,

I get NAN in gradParameters when training with multiple GPU. I have tried on both cuda 7.5 (two K80) and cuda 8.0 (two 1080P), and got similar error. Any suggetion will be great appreciated! Thanks

code

require 'torch'
require 'cutorch'
require 'nn'
require 'optim'
require 'paths'
require 'cunn'
require 'cudnn'

local ngf = 64
local ndf = 64
local nz = 128
local nc = 3
local batchSize = 48
local loadSize = 64

function makeDataParallel(model, nGPU)
  if nGPU > 1 then

    print('converting module to nn.DataParallelTable')
    assert(nGPU <= cutorch.getDeviceCount(), 'number of GPUs less than nGPU specified')

    local gpus = torch.range(1, nGPU):totable()
    local fastest, benchmark = cudnn.fastest, cudnn.benchmark

    local dpt = nn.DataParallelTable(1, true, true)
    :add(model, gpus)
    :threads(function()
        local cudnn = require 'cudnn'
        cudnn.fastest, cudnn.benchmark = fastest, benchmark
      end)
    --dpt.gradInput = nil

    model = dpt:cuda()
  end
  return model
end

function createModel()

  local netD = nn.Sequential()
  -- input is (nc) x 64 x 64
  netD:add(nn.SpatialConvolution(nc, ndf, 4, 4, 2, 2, 1, 1))
  netD:add(nn.LeakyReLU(0.2, true))
  -- state size: (ndf) x 32 x 32
  netD:add(nn.SpatialConvolution(ndf, ndf * 2, 4, 4, 2, 2, 1, 1))
  netD:add(nn.SpatialBatchNormalization(ndf * 2)):add(nn.LeakyReLU(0.2, true))
  -- state size: (ndf*2) x 16 x 16
  netD:add(nn.SpatialConvolution(ndf * 2, ndf * 4, 4, 4, 2, 2, 1, 1))
  netD:add(nn.SpatialBatchNormalization(ndf * 4)):add(nn.LeakyReLU(0.2, true))
  -- state size: (ndf*4) x 8 x 8
  netD:add(nn.SpatialConvolution(ndf * 4, ndf * 8, 4, 4, 2, 2, 1, 1))
  netD:add(nn.SpatialBatchNormalization(ndf * 8)):add(nn.LeakyReLU(0.2, true))
  -- state size: (ndf*8) x 4 x 4
  netD:add(nn.SpatialConvolution(ndf * 8, 1, 4, 4))
  netD:add(nn.Sigmoid())
  -- state size: 1 x 1 x 1
  netD:add(nn.View(1):setNumInputDims(3))
  -- state size: 1

  cudnn.convert(netD, cudnn)
  netD:cuda()

  return netD
end

torch.manualSeed(torch.random(1, 10000))
torch.setnumthreads(1)
torch.setdefaulttensortype('torch.FloatTensor')
cutorch.setDevice(1) -- by default, use GPU 1

model_single = createModel()
model_multi = makeDataParallel(model_single:clone(), 2)

-- 2. Create Criterion
criterion = nn.BCECriterion()
criterion:cuda()

collectgarbage()

--[[
   Verifying
]]--

local optimState_single = { learningRate = 0.0001,  weightDecay=5e-4 }
local optimState_multi = { learningRate = 0.0001,  weightDecay=5e-4 }

-- GPU inputs (preallocate)
local input = torch.CudaTensor(batchSize, nc, loadSize, loadSize)
input:uniform(-1,1)

local gt = torch.CudaTensor(batchSize, 1)

-- paramers and gradient parameters

local params_single, gradParams_single = model_single:getParameters()
local params_multi, gradParams_multi = model_multi:getParameters()

local loss_iter = 0
local real_label = 1
local fake_label = 0

local output_single 
local output_multi

local fevalNetD_single = function (x)
  gradParams_single:zero()

  gt:fill(real_label)
  output_single = model_single:forward(input)
  local real_err = criterion:forward(output_single, gt)
  local df_dd = criterion:backward(output_single, gt)
  model_single:backward(input, df_dd)

  loss_iter = real_err
  print("gradParams_single:sum(): ", gradParams_single:sum())
  return loss_iter, gradParams_single
end

local fevalNetD_multi = function (x)
  gradParams_multi:zero()

  -- train real
  gt:fill(real_label)
  output_multi = model_multi:forward(input)
  local real_err = criterion:forward(output_multi, gt)
  local df_dd = criterion:backward(output_multi, gt)
  model_multi:backward(input, df_dd)

  loss_iter = real_err
  print("gradParams_multi:sum(): ", gradParams_multi:sum())
  return loss_iter, gradParams_multi
end

-- Testing
print('netD params error before optim ', (params_single - params_multi):abs():max())

optim.adam(fevalNetD_single, params_single, optimState_single)
optim.adam(fevalNetD_multi, params_multi, optimState_multi)

print('netD params error after optim: ', (params_single - params_multi):abs():max())
print('netD gradParams error: ', (gradParams_single - gradParams_multi):abs():max())
print('netD output error: ', (output_single - output_multi):abs():max())

print('End of Testing!')

output:

converting module to nn.DataParallelTable netD params error before optim 0 gradParams_single:sum(): -658.27288818359 gradParams_multi:sum(): nan netD params error after optim: 0.00020003318786621 netD gradParams error: 28949807628288 netD output error: 0.0060847103595734 End of Testing!

fmassa commented 7 years ago

Instead of calling gradParams:zero(), try calling instead model:zeroGradParameters()

foelin commented 7 years ago

Thanks a lot for your help! It works in one machine with nccl intalled The output:

converting module to nn.DataParallelTable
netD params error before optim  0
gradParams_single:sum():        -658.81219482422
gradParams_multi:sum():         -660.19378662109
netD params error after optim:  0.00019995868206024
netD gradParams error:  0.0081400275230408
netD output error:      0.0074694454669952
End of Testing!

However, in another machine without nccl, it fails.. The output:

converting module to nn.DataParallelTable
warning: could not load nccl, falling back to default communication
converting module to nn.DataParallelTable
warning: could not load nccl, falling back to default communication
netD params error before optim 0
gradParams_single:sum(): -453.74481201172
gradParams_multi:sum(): -280.88250732422
netD params error after optim: 0.00019999733194709
netD gradParams error: 0.62991511821747
netD output error: 5.8884222854298e+23
End of Testing!
arunmallya commented 7 years ago

Same issue: https://github.com/torch/cunn/issues/457

Why does this not cause an issue with optim, which directly operates on the gradParams?