Implementations for my blog post [here](https://medium.com/@TalPerry/deep-learning-the-stock-market-df853d139e02#.flflpo3xf)
MIT License
260
stars
169
forks
source link
Tensorflow v1.4 compatibility: ValueError: Dimensions must be equal, but are 200 and 2428 for 'rnn/rnn/attention_cell_wrapper/attention_cell_wrapper_1/multi_rnn_cell/cell_0/cell_0/gru_cell/MatMul_2' (op: 'MatMul') with input shapes: [1,200], [2428,200] #9
Attempting to run this in Tensorflow v1.4, and running into the issue below when replacing tf.pack/unpack with tf.stack/unstack. Any thoughts on what's going on here? I attached full stack from the run in question.
Thanks!
Jim
WARNING:tensorflow:<tensorflow.contrib.rnn.python.ops.rnn_cell.AttentionCellWrapper object at 0x0000000048BDA588>: Using a concatenated state is slower and will soon be deprecated. Use state_is_tuple=True.
Tensor("Shape:0", shape=(3,), dtype=int32)
Tensor("Shape_1:0", shape=(3,), dtype=int32)
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\errors_impl.py in exit(self, type_arg, value_arg, traceback_arg)
472 compat.as_text(c_api.TF_Message(self.status.status)),
--> 473 c_api.TF_GetCode(self.status.status))
474 # Delete the underlying status object from memory otherwise it stays alive
WARNING:tensorflow:<tensorflow.contrib.rnn.python.ops.rnn_cell.AttentionCellWrapper object at 0x0000000048BDA588>: Using a concatenated state is slower and will soon be deprecated. Use state_is_tuple=True.
Tensor("Shape:0", shape=(3,), dtype=int32)
Tensor("Shape_1:0", shape=(3,), dtype=int32)
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\errors_impl.py in exit(self, type_arg, value_arg, traceback_arg)
472 compat.as_text(c_api.TF_Message(self.status.status)),
--> 473 c_api.TF_GetCode(self.status.status))
474 # Delete the underlying status object from memory otherwise it stays alive
InvalidArgumentError: Dimensions must be equal, but are 200 and 2428 for 'rnn/rnn/attention_cell_wrapper/attention_cell_wrapper_1/multi_rnn_cell/cell_0/cell_0/gru_cell/MatMul_2' (op: 'MatMul') with input shapes: [1,200], [2428,200].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)
in ()
1 with tf.Graph().as_default():
----> 2 model = RNNModel()
3 input_ = train[0]
4 target = train[1]
5 with tf.Session() as sess:
in __init__(self)
41 scope.reuse_variables()
42
---> 43 output, state = self.gru_cell(inp, state)
44 states.append(state)
45 outputs.append(output)
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
181 with vs.variable_scope(vs.get_variable_scope(),
182 custom_getter=self._rnn_get_variable):
--> 183 return super(RNNCell, self).__call__(inputs, state)
184
185 def _rnn_get_variable(self, getter, *args, **kwargs):
C:\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
573 if in_graph_mode:
574 self._assert_input_compatibility(inputs)
--> 575 outputs = self.call(inputs, *args, **kwargs)
576
577 if outputs is None:
C:\Anaconda3\lib\site-packages\tensorflow\contrib\rnn\python\ops\rnn_cell.py in call(self, inputs, state)
1117 self._linear1 = _Linear([inputs, attns], input_size, True)
1118 inputs = self._linear1([inputs, attns])
-> 1119 cell_output, new_state = self._cell(inputs, state)
1120 if self._state_is_tuple:
1121 new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
181 with vs.variable_scope(vs.get_variable_scope(),
182 custom_getter=self._rnn_get_variable):
--> 183 return super(RNNCell, self).__call__(inputs, state)
184
185 def _rnn_get_variable(self, getter, *args, **kwargs):
C:\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
573 if in_graph_mode:
574 self._assert_input_compatibility(inputs)
--> 575 outputs = self.call(inputs, *args, **kwargs)
576
577 if outputs is None:
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in call(self, inputs, state)
1064 [-1, cell.state_size])
1065 cur_state_pos += cell.state_size
-> 1066 cur_inp, new_state = cell(cur_inp, cur_state)
1067 new_states.append(new_state)
1068
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
181 with vs.variable_scope(vs.get_variable_scope(),
182 custom_getter=self._rnn_get_variable):
--> 183 return super(RNNCell, self).__call__(inputs, state)
184
185 def _rnn_get_variable(self, getter, *args, **kwargs):
C:\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
573 if in_graph_mode:
574 self._assert_input_compatibility(inputs)
--> 575 outputs = self.call(inputs, *args, **kwargs)
576
577 if outputs is None:
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in call(self, inputs, state)
320 kernel_initializer=self._kernel_initializer)
321
--> 322 value = math_ops.sigmoid(self._gate_linear([inputs, state]))
323 r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
324
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, args)
1187 res = math_ops.matmul(args[0], self._weights)
1188 else:
-> 1189 res = math_ops.matmul(array_ops.concat(args, 1), self._weights)
1190 if self._build_bias:
1191 res = nn_ops.bias_add(res, self._biases)
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py in matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse, name)
1889 else:
1890 return gen_math_ops._mat_mul(
-> 1891 a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
1892
1893
C:\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in _mat_mul(a, b, transpose_a, transpose_b, name)
2434 _, _, _op = _op_def_lib._apply_op_helper(
2435 "MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b,
-> 2436 name=name)
2437 _result = _op.outputs[:]
2438 _inputs_flat = _op.inputs
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
785 op = g.create_op(op_type_name, inputs, output_types, name=scope,
786 input_types=input_types, attrs=attr_protos,
--> 787 op_def=op_def)
788 return output_structure, op_def.is_stateful, op
789
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in create_op(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_shapes, compute_device)
2956 op_def=op_def)
2957 if compute_shapes:
-> 2958 set_shapes_for_outputs(ret)
2959 self._add_op(ret)
2960 self._record_op_seen_by_control_dependencies(ret)
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in set_shapes_for_outputs(op)
2207 shape_func = _call_cpp_shape_fn_and_require_op
2208
-> 2209 shapes = shape_func(op)
2210 if shapes is None:
2211 raise RuntimeError(
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in call_with_requiring(op)
2157
2158 def call_with_requiring(op):
-> 2159 return call_cpp_shape_fn(op, require_shape_fn=True)
2160
2161 _call_cpp_shape_fn_and_require_op = call_with_requiring
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py in call_cpp_shape_fn(op, require_shape_fn)
625 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
626 input_tensors_as_shapes_needed,
--> 627 require_shape_fn)
628 if not isinstance(res, dict):
629 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn)
689 missing_shape_fn = True
690 else:
--> 691 raise ValueError(err.message)
692
693 if missing_shape_fn:
ValueError: Dimensions must be equal, but are 200 and 2428 for 'rnn/rnn/attention_cell_wrapper/attention_cell_wrapper_1/multi_rnn_cell/cell_0/cell_0/gru_cell/MatMul_2' (op: 'MatMul') with input shapes: [1,200], [2428,200].
>
> During handling of the above exception, another exception occurred:
>
> ValueError Traceback (most recent call last)
> in ()
> 1 with tf.Graph().as_default():
> ----> 2 model = RNNModel()
> 3 input_ = train[0]
> 4 target = train[1]
> 5 with tf.Session() as sess:
>
> in __init__(self)
> 41 scope.reuse_variables()
> 42
> ---> 43 output, state = self.gru_cell(inp, state)
> 44 states.append(state)
> 45 outputs.append(output)
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
> 181 with vs.variable_scope(vs.get_variable_scope(),
> 182 custom_getter=self._rnn_get_variable):
> --> 183 return super(RNNCell, self).__call__(inputs, state)
> 184
> 185 def _rnn_get_variable(self, getter, *args, **kwargs):
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
> 573 if in_graph_mode:
> 574 self._assert_input_compatibility(inputs)
> --> 575 outputs = self.call(inputs, *args, **kwargs)
> 576
> 577 if outputs is None:
>
> C:\Anaconda3\lib\site-packages\tensorflow\contrib\rnn\python\ops\rnn_cell.py in call(self, inputs, state)
> 1117 self._linear1 = _Linear([inputs, attns], input_size, True)
> 1118 inputs = self._linear1([inputs, attns])
> -> 1119 cell_output, new_state = self._cell(inputs, state)
> 1120 if self._state_is_tuple:
> 1121 new_state_cat = array_ops.concat(nest.flatten(new_state), 1)
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
> 181 with vs.variable_scope(vs.get_variable_scope(),
> 182 custom_getter=self._rnn_get_variable):
> --> 183 return super(RNNCell, self).__call__(inputs, state)
> 184
> 185 def _rnn_get_variable(self, getter, *args, **kwargs):
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
> 573 if in_graph_mode:
> 574 self._assert_input_compatibility(inputs)
> --> 575 outputs = self.call(inputs, *args, **kwargs)
> 576
> 577 if outputs is None:
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in call(self, inputs, state)
> 1064 [-1, cell.state_size])
> 1065 cur_state_pos += cell.state_size
> -> 1066 cur_inp, new_state = cell(cur_inp, cur_state)
> 1067 new_states.append(new_state)
> 1068
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, inputs, state, scope)
> 181 with vs.variable_scope(vs.get_variable_scope(),
> 182 custom_getter=self._rnn_get_variable):
> --> 183 return super(RNNCell, self).__call__(inputs, state)
> 184
> 185 def _rnn_get_variable(self, getter, *args, **kwargs):
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\layers\base.py in __call__(self, inputs, *args, **kwargs)
> 573 if in_graph_mode:
> 574 self._assert_input_compatibility(inputs)
> --> 575 outputs = self.call(inputs, *args, **kwargs)
> 576
> 577 if outputs is None:
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in call(self, inputs, state)
> 320 kernel_initializer=self._kernel_initializer)
> 321
> --> 322 value = math_ops.sigmoid(self._gate_linear([inputs, state]))
> 323 r, u = array_ops.split(value=value, num_or_size_splits=2, axis=1)
> 324
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\rnn_cell_impl.py in __call__(self, args)
> 1187 res = math_ops.matmul(args[0], self._weights)
> 1188 else:
> -> 1189 res = math_ops.matmul(array_ops.concat(args, 1), self._weights)
> 1190 if self._build_bias:
> 1191 res = nn_ops.bias_add(res, self._biases)
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\math_ops.py in matmul(a, b, transpose_a, transpose_b, adjoint_a, adjoint_b, a_is_sparse, b_is_sparse, name)
> 1889 else:
> 1890 return gen_math_ops._mat_mul(
> -> 1891 a, b, transpose_a=transpose_a, transpose_b=transpose_b, name=name)
> 1892
> 1893
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\ops\gen_math_ops.py in _mat_mul(a, b, transpose_a, transpose_b, name)
> 2434 _, _, _op = _op_def_lib._apply_op_helper(
> 2435 "MatMul", a=a, b=b, transpose_a=transpose_a, transpose_b=transpose_b,
> -> 2436 name=name)
> 2437 _result = _op.outputs[:]
> 2438 _inputs_flat = _op.inputs
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\framework\op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
> 785 op = g.create_op(op_type_name, inputs, output_types, name=scope,
> 786 input_types=input_types, attrs=attr_protos,
> --> 787 op_def=op_def)
> 788 return output_structure, op_def.is_stateful, op
> 789
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in create_op(self, op_type, inputs, dtypes, input_types, name, attrs, op_def, compute_shapes, compute_device)
> 2956 op_def=op_def)
> 2957 if compute_shapes:
> -> 2958 set_shapes_for_outputs(ret)
> 2959 self._add_op(ret)
> 2960 self._record_op_seen_by_control_dependencies(ret)
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in set_shapes_for_outputs(op)
> 2207 shape_func = _call_cpp_shape_fn_and_require_op
> 2208
> -> 2209 shapes = shape_func(op)
> 2210 if shapes is None:
> 2211 raise RuntimeError(
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\framework\ops.py in call_with_requiring(op)
> 2157
> 2158 def call_with_requiring(op):
> -> 2159 return call_cpp_shape_fn(op, require_shape_fn=True)
> 2160
> 2161 _call_cpp_shape_fn_and_require_op = call_with_requiring
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py in call_cpp_shape_fn(op, require_shape_fn)
> 625 res = _call_cpp_shape_fn_impl(op, input_tensors_needed,
> 626 input_tensors_as_shapes_needed,
> --> 627 require_shape_fn)
> 628 if not isinstance(res, dict):
> 629 # Handles the case where _call_cpp_shape_fn_impl calls unknown_shape(op).
>
> C:\Anaconda3\lib\site-packages\tensorflow\python\framework\common_shapes.py in _call_cpp_shape_fn_impl(op, input_tensors_needed, input_tensors_as_shapes_needed, require_shape_fn)
> 689 missing_shape_fn = True
> 690 else:
> --> 691 raise ValueError(err.message)
> 692
> 693 if missing_shape_fn:
>
> **ValueError: Dimensions must be equal, but are 200 and 2428 for 'rnn/rnn/attention_cell_wrapper/attention_cell_wrapper_1/multi_rnn_cell/cell_0/cell_0/gru_cell/MatMul_2' (op: 'MatMul') with input shapes: [1,200], [2428,200].**
Howdy Tal,
Attempting to run this in Tensorflow v1.4, and running into the issue below when replacing tf.pack/unpack with tf.stack/unstack. Any thoughts on what's going on here? I attached full stack from the run in question.
Thanks! Jim
C:\Anaconda3\lib\site-packages\tensorflow\python\framework\errors_impl.py in exit(self, type_arg, value_arg, traceback_arg) 472 compat.as_text(c_api.TF_Message(self.status.status)), --> 473 c_api.TF_GetCode(self.status.status)) 474 # Delete the underlying status object from memory otherwise it stays alive
InvalidArgumentError: Dimensions must be equal, but are 200 and 2428 for 'rnn/rnn/attention_cell_wrapper/attention_cell_wrapper_1/multi_rnn_cell/cell_0/cell_0/gru_cell/MatMul_2' (op: 'MatMul') with input shapes: [1,200], [2428,200].
During handling of the above exception, another exception occurred:
ValueError Traceback (most recent call last)