guillaume-chevalier / LSTM-Human-Activity-Recognition

Human Activity Recognition example using TensorFlow on smartphone sensors dataset and an LSTM RNN. Classifying the type of movement amongst six activity categories - Guillaume Chevalier
MIT License
3.33k stars 935 forks source link

ValueError: Variable rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel already exists #14

Closed AllenCoder closed 5 years ago

AllenCoder commented 6 years ago

when I run this code

# Graph input/output
x = tf.placeholder(tf.float32, [None, n_steps, n_input])
y = tf.placeholder(tf.float32, [None, n_classes])

# Graph weights
weights = {
    'hidden': tf.Variable(tf.random_normal([n_input, n_hidden])), # Hidden layer weights
    'out': tf.Variable(tf.random_normal([n_hidden, n_classes], mean=1.0))
}
biases = {
    'hidden': tf.Variable(tf.random_normal([n_hidden])),
    'out': tf.Variable(tf.random_normal([n_classes]))
}

pred = LSTM_RNN(x, weights, biases)

# Loss, optimizer and evaluation
l2 = lambda_loss_amount * sum(
    tf.nn.l2_loss(tf_var) for tf_var in tf.trainable_variables()
) # L2 loss prevents this overkill neural network to overfit the data
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=y, logits=pred)) + l2 # Softmax loss
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate).minimize(cost) # Adam Optimizer

correct_pred = tf.equal(tf.argmax(pred,1), tf.argmax(y,1))
accuracy = tf.reduce_mean(tf.cast(correct_pred, tf.float32))

I get this error

---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-20-7963db4edbf4> in <module>()
     14 }
     15 
---> 16 pred = LSTM_RNN(x, weights, biases)
     17 
     18 # Loss, optimizer and evaluation

<ipython-input-13-1da1ce9bcbd5> in LSTM_RNN(_X, _weights, _biases)
     24     lstm_cells = tf.contrib.rnn.MultiRNNCell([lstm_cell_1, lstm_cell_2], state_is_tuple=True)
     25     # Get LSTM cell output
---> 26     outputs, states = tf.contrib.rnn.static_rnn(lstm_cells, _X, dtype=tf.float32)
     27 
     28     # Get last time step's output feature for a "many to one" style classifier,

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn.py in static_rnn(cell, inputs, initial_state, dtype, sequence_length, scope)
   1235             state_size=cell.state_size)
   1236       else:
-> 1237         (output, state) = call_cell()
   1238 
   1239       outputs.append(output)

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn.py in <lambda>()
   1222         varscope.reuse_variables()
   1223       # pylint: disable=cell-var-from-loop
-> 1224       call_cell = lambda: cell(input_, state)
   1225       # pylint: enable=cell-var-from-loop
   1226       if sequence_length is not None:

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
    178       with vs.variable_scope(vs.get_variable_scope(),
    179                              custom_getter=self._rnn_get_variable):
--> 180         return super(RNNCell, self).__call__(inputs, state)
    181 
    182   def _rnn_get_variable(self, getter, *args, **kwargs):

E:\Anaconda\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
    448         # Check input assumptions set after layer building, e.g. input shape.
    449         self._assert_input_compatibility(inputs)
--> 450         outputs = self.call(inputs, *args, **kwargs)
    451 
    452         # Apply activity regularization.

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in call(self, inputs, state)
    936                                       [-1, cell.state_size])
    937           cur_state_pos += cell.state_size
--> 938         cur_inp, new_state = cell(cur_inp, cur_state)
    939         new_states.append(new_state)
    940 

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
    178       with vs.variable_scope(vs.get_variable_scope(),
    179                              custom_getter=self._rnn_get_variable):
--> 180         return super(RNNCell, self).__call__(inputs, state)
    181 
    182   def _rnn_get_variable(self, getter, *args, **kwargs):

E:\Anaconda\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
    448         # Check input assumptions set after layer building, e.g. input shape.
    449         self._assert_input_compatibility(inputs)
--> 450         outputs = self.call(inputs, *args, **kwargs)
    451 
    452         # Apply activity regularization.

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in call(self, inputs, state)
    399       c, h = array_ops.split(value=state, num_or_size_splits=2, axis=1)
    400 
--> 401     concat = _linear([inputs, h], 4 * self._num_units, True)
    402 
    403     # i = input_gate, j = new_input, f = forget_gate, o = output_gate

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in _linear(args, output_size, bias, bias_initializer, kernel_initializer)
   1037         _WEIGHTS_VARIABLE_NAME, [total_arg_size, output_size],
   1038         dtype=dtype,
-> 1039         initializer=kernel_initializer)
   1040     if len(args) == 1:
   1041       res = math_ops.matmul(args[0], weights)

E:\Anaconda\lib\site-packages\tensorflow\python\ops\variable_scope.py in get_variable(name, shape, dtype, initializer, regularizer, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter)
   1063       collections=collections, caching_device=caching_device,
   1064       partitioner=partitioner, validate_shape=validate_shape,
-> 1065       use_resource=use_resource, custom_getter=custom_getter)
   1066 get_variable_or_local_docstring = (
   1067     """%s

E:\Anaconda\lib\site-packages\tensorflow\python\ops\variable_scope.py in get_variable(self, var_store, name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter)
    960           collections=collections, caching_device=caching_device,
    961           partitioner=partitioner, validate_shape=validate_shape,
--> 962           use_resource=use_resource, custom_getter=custom_getter)
    963 
    964   def _get_partitioned_variable(self,

E:\Anaconda\lib\site-packages\tensorflow\python\ops\variable_scope.py in get_variable(self, name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource, custom_getter)
    358           reuse=reuse, trainable=trainable, collections=collections,
    359           caching_device=caching_device, partitioner=partitioner,
--> 360           validate_shape=validate_shape, use_resource=use_resource)
    361     else:
    362       return _true_getter(

E:\Anaconda\lib\site-packages\tensorflow\python\ops\variable_scope.py in wrapped_custom_getter(getter, *args, **kwargs)
   1403     return custom_getter(
   1404         functools.partial(old_getter, getter),
-> 1405         *args, **kwargs)
   1406   return wrapped_custom_getter
   1407 

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in _rnn_get_variable(self, getter, *args, **kwargs)
    181 
    182   def _rnn_get_variable(self, getter, *args, **kwargs):
--> 183     variable = getter(*args, **kwargs)
    184     trainable = (variable in tf_variables.trainable_variables() or
    185                  (isinstance(variable, tf_variables.PartitionedVariable) and

E:\Anaconda\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in _rnn_get_variable(self, getter, *args, **kwargs)
    181 
    182   def _rnn_get_variable(self, getter, *args, **kwargs):
--> 183     variable = getter(*args, **kwargs)
    184     trainable = (variable in tf_variables.trainable_variables() or
    185                  (isinstance(variable, tf_variables.PartitionedVariable) and

E:\Anaconda\lib\site-packages\tensorflow\python\ops\variable_scope.py in _true_getter(name, shape, dtype, initializer, regularizer, reuse, trainable, collections, caching_device, partitioner, validate_shape, use_resource)
    350           trainable=trainable, collections=collections,
    351           caching_device=caching_device, validate_shape=validate_shape,
--> 352           use_resource=use_resource)
    353 
    354     if custom_getter is not None:

E:\Anaconda\lib\site-packages\tensorflow\python\ops\variable_scope.py in _get_single_variable(self, name, shape, dtype, initializer, regularizer, partition_info, reuse, trainable, collections, caching_device, validate_shape, use_resource)
    662                          " Did you mean to set reuse=True in VarScope? "
    663                          "Originally defined at:\n\n%s" % (
--> 664                              name, "".join(traceback.format_list(tb))))
    665       found_var = self._vars[name]
    666       if not shape.is_compatible_with(found_var.get_shape()):

ValueError: Variable rnn/multi_rnn_cell/cell_0/basic_lstm_cell/kernel already exists, disallowed. Did you mean to set reuse=True in VarScope? Originally defined at:

  File "E:\Anaconda\lib\site-packages\tensorflow\python\framework\ops.py", line 1204, in __init__
    self._traceback = self._graph._extract_stack()  # pylint: disable=protected-access
  File "E:\Anaconda\lib\site-packages\tensorflow\python\framework\ops.py", line 2630, in create_op
    original_op=self._default_original_op, op_def=op_def)
  File "E:\Anaconda\lib\site-packages\tensorflow\python\framework\op_def_library.py", line 767, in apply_op
    op_def=op_def)
guillaume-chevalier commented 6 years ago

You probably executed the iPython cell twice without resetting the tf graph to default or something like that.

Wentong-DST commented 5 years ago

Agree. Please also see: https://github.com/kratzert/finetune_alexnet_with_tensorflow/issues/8#issuecomment-312211170, which explains the exact reason