ruby-numo / numo-narray

Ruby/Numo::NArray - New NArray class library
http://ruby-numo.github.io/narray/
BSD 3-Clause "New" or "Revised" License
415 stars 41 forks source link

All Values Converge To "Nan" #75

Closed ghost closed 6 years ago

ghost commented 6 years ago

OS: Ubuntu 17.10 x86_64 ruby -v : ruby 2.3.3p222 Hi For Some Reason, after some iterations of 'train' all my weight Values are equal to "-Nan"

require "numo/narray"

class NeuralNetwork

  def initialize(inputNodes:0,hiddenNodes:[],outputNodes:0,learningRate: 0.01)
    @nInputs = inputNodes
    @nHidden = hiddenNodes
    @nHiddenLayers = hiddenNodes.length
    @nOutputs = outputNodes
    @learningRate = learningRate
    @hiddenWeights = []
    @hiddenBias = []
    tmp1,tmp2 = @nInputs,@nHidden[0]
    @hiddenWeights[0] = Numo::Float32.new(tmp2,tmp1).rand(-1,1)
    @hiddenBias[0] = Numo::Float32.new(tmp2,1).rand(-1,1)

    for i in (1...@nHiddenLayers)
      tmp1,tmp2 = @nHidden[i-1],@nHidden[i]
      @hiddenWeights[i] = Numo::Float32.new(tmp2,tmp1).rand(-1,1)
      @hiddenBias[i] = Numo::Float32.new(tmp2,1).rand(-1,1)
    end

    @outputWeights = Numo::Float32.new(@nOutputs,@nHidden[@nHiddenLayers-1]).rand(-1,1)
    @outputBias =  Numo::Float32.new(@nOutputs,1).rand(-1,1)

  end

  def train!(data,labels)
    data = data.map{|d| Numo::Float32.cast([d]).transpose}
    data.each.with_index do |x,ind|
      layers_inputs = [x]
      #feed forward
      for i in(0...@nHiddenLayers)
        x1 = relu(@hiddenWeights[i].dot(layers_inputs[-1])+@hiddenBias[i])
        layers_inputs.push(x1)
      end
      output = softmax(@outputWeights.dot(layers_inputs[-1])+@outputBias)

      #backpropagation
      diff = output.clone
      diff[labels[ind]]-= 1
      outdelta = softmax_prime(output).dot(diff) * @learningRate
      @outputBias = @outputBias - outdelta
      @outputWeights = @outputBias - outdelta.dot(layers_inputs[-1].transpose)
      delta = @outputWeights.transpose.dot(outdelta)
      (@nHiddenLayers-1).downto(0) do |i|
        delta = delta*relu_prime(layers_inputs[i+1])
        @hiddenWeights[i] = @hiddenWeights[i] - delta.dot(layers_inputs[i].transpose)
        @hiddenBias[i] = @hiddenBias[i] - delta
        delta = @hiddenWeights[i].transpose.dot(delta)
      end
    end
  end

  def test(data)
    data = data.map{|d| Numo::Float32.cast([d]).transpose}
    out = []
    data.each do |x|
      layers_inputs = [x]
      #feed forward
      for i in(0...@nHiddenLayers)
        x1 = relu(@hiddenWeights[i].dot(layers_inputs[-1])+@hiddenBias[i])
        layers_inputs.push(x1)
      end
      out.push(softmax(@outputWeights.dot(layers_inputs[-1])+@outputBias))
    end
    return out.map{|e| e.max_index}
  end

  private
  def relu(x)
    return (x > 0).cast_to(Numo::Float32) * x
  end

  def relu_prime(x)
    return (x > 0).cast_to(Numo::Float32)
  end

  def softmax(x)
    v = Numo::NMath.exp(x)
    return (v*(v.sum**-1))
  end

  def softmax_prime(y)
    return y.flatten.diag - y.dot(y.transpose)
  end

  def toOneHot(x)
    a = Numo::Float32.zeros(@nOutputs,1)
    a[x] = 1
    return a
  end
  def sigmoid(x)
    return (1+Numo::NMath.exp(-x))**-1
  end

  def sigmoid_prime(y)
    return y*(1-y)
  end
end