tflearn / tflearn

Deep learning library featuring a higher-level API for TensorFlow.
http://tflearn.org
Other
9.62k stars 2.41k forks source link

ValueError: List argument 'inputs' to 'MergeSummary' Op with length 0 shorter than minimum length 1 #731

Open jdvala opened 7 years ago

jdvala commented 7 years ago

While working on an autoencoder I started implementing pertaining phase, It all looked good for quite a while and the weights and biases were properly shared among layers but when started training the whole network it should me a value error, it has been two day tweaking and twisting the code but i am unable to come up with an explanation.

Here is the whole traceback

Traceback (most recent call last):
  File "1.py", line 118, in <module>
    model = tflearn.DNN(net, tensorboard_verbose=0,best_val_accuracy=0.1)
  File "/usr/local/lib/python3.5/site-packages/tflearn/models/dnn.py", line 64, in __init__
    best_val_accuracy=best_val_accuracy)
  File "/usr/local/lib/python3.5/site-packages/tflearn/helpers/trainer.py", line 131, in __init__
    clip_gradients)
  File "/usr/local/lib/python3.5/site-packages/tflearn/helpers/trainer.py", line 668, in initialize_training_ops
    self.create_summaries(tensorboard_verbose)
  File "/usr/local/lib/python3.5/site-packages/tflearn/helpers/trainer.py", line 864, in create_summaries
    self.summ_op = merge_summary(tf.get_collection(summ_collection))
  File "/usr/local/lib/python3.5/site-packages/tensorflow/python/summary/summary.py", line 296, in merge
    val = _gen_logging_ops._merge_summary(inputs=inputs, name=name)
  File "/usr/local/lib/python3.5/site-packages/tensorflow/python/ops/gen_logging_ops.py", line 227, in _merge_summary
    result = _op_def_lib.apply_op("MergeSummary", inputs=inputs, name=name)
  File "/usr/local/lib/python3.5/site-packages/tensorflow/python/framework/op_def_library.py", line 551, in apply_op
    (input_name, op_type_name, len(values), num_attr.minimum))
ValueError: List argument 'inputs' to 'MergeSummary' Op with length 0 shorter than minimum length 1.

The code for the same is here.

x = tf.placeholder("float",[None, 41])

# First Layer
encoder_layer1 = tflearn.fully_connected(x,41,activation='relu',bias=True,scope='layer1')

loss = tflearn.losses.L2(encoder_layer1, wd = 0.001)

#optimizer function which will try and minimize the loss function

sgd = tflearn.optimizers.SGD(learning_rate = 0.01,name ='SGD').get_tensor()

# we will now define training operations we want to perform on the data

train_ops = tflearn.helpers.trainer.TrainOp(loss,sgd, batch_size = 64)
#Now we will define the trainer function which will take the train_op and create a network

#this is our model
trainer = tflearn.helpers.trainer.Trainer(train_ops,tensorboard_verbose=0)
layer1_weights = encoder_layer1.W.eval(session=trainer.session)
layer1_bias = encoder_layer1.b.eval(session=trainer.session)
#we will use fit method to train the network with our data.
trainer.fit(feed_dicts={x:train_set},n_epoch=1, show_metric=False, run_id='firstlayer_pretraining')
print("Saving weights and biases...") 
trainer.save('layer1')

#Layer two pre-traning
encoder_layer2 = tflearn.fully_connected(encoder_layer1,30,bias=True,scope='layer2')
#Restore the model
trainer.restore('layer1',trainable_variable_only=True)
layer2_weights = (encoder_layer2.W).eval(session=trainer.session)
layer2_bias = (encoder_layer2.b).eval(session=trainer.session)
print("shape of weights layer 2 :",tf.shape(layer2_weights).eval(session=trainer.session))
trainer.fit(feed_dicts={x:train_set},n_epoch=1, show_metric=False, run_id='secondlayer_pretraining')
#saving the second layers model
print("Saving weights and biases...") 
trainer.save('layer2')

#Layer three pre-traning 
encoder_layer3 = tflearn.fully_connected(encoder_layer2,20,bias=True,scope='layer3')
trainer.restore('layer2',trainable_variable_only=True)
layer3_weights = (encoder_layer3.W).eval(session=trainer.session)
layer3_bias = (encoder_layer3.b).eval(session=trainer.session)
print("shape of weights layer 3 :",tf.shape(layer3_weights).eval(session=trainer.session))

trainer.fit(feed_dicts={x:train_set},n_epoch=1, show_metric=False, run_id='Thirdlayer_pretraining')
print("Saving the weights and biases...")
trainer.save('layer3')

#Layer 4
encoder_layer4 = tflearn.fully_connected(encoder_layer3,10,scope='layer4')
#resoting the mdoel
trainer.restore('layer3',trainable_variable_only=True)
layer4_weights = (encoder_layer4.W).eval(session=trainer.session) 
layer4_bias = (encoder_layer4.b).eval(session=trainer.session)
trainer.fit(feed_dicts={x:train_set},n_epoch=1, show_metric=False, run_id='Fourthlayer_pretraining')
print("Saving the weights and biases...")
trainer.save('layer4')

#Layer 5 pretraining
encoder_layer5 =tflearn.fully_connected(encoder_layer4,5,activation='softmax',scope='layer5')
#restoring the model
trainer.restore('layer4',trainable_variable_only=True)
layer5_weights = (encoder_layer5.W).eval(session=trainer.session)
layer5_bias =(encoder_layer5.b).eval(session=trainer.session)
trainer.fit(feed_dicts={x:train_set},n_epoch=1, show_metric=False, run_id='Fifthlayer_pretraining')

##Building the full network
encoder = tflearn.input_data(shape=[None, 41])
encoder = tflearn.fully_connected(encoder, 41,trainable = True,restore=False,scope ='layer1',reuse=True)
encoder = tflearn.fully_connected(encoder, 30,trainable = True,restore=False,scope ='layer2',reuse=True)
encoder = tflearn.fully_connected(encoder, 20,trainable = True,restore=False,scope ='layer3',reuse=True)
encoder = tflearn.fully_connected(encoder, 10,trainable = True,restore=False,scope ='layer4',reuse=True)
encoder = tflearn.fully_connected(encoder, 5, activation='softmax',trainable = True,restore=False,scope ='layer5',reuse=True)

acc = tflearn.metrics.Accuracy()

net = tflearn.regression(encoder, optimizer='adam', learning_rate=0.01,
                         loss='mean_square', metric=acc, shuffle_batches=True)
# Mpdeling the Neural Network (for details http://tflearn.org/models/dnn/)
model = tflearn.DNN(net, tensorboard_verbose=0)
# Training the Neural Network (for details http://tflearn.org/models/dnn/)
model.fit(test_set, test_labels_set, n_epoch=1, validation_set=(valid_set, valid_labels_set),
          run_id="auto_encoder", batch_size=100 ,show_metric=True, snapshot_epoch=False)
jdvala commented 7 years ago

@aymericdamien Please see into it