infocusp / tf_cnnvis

CNN visualization tool in TensorFlow
MIT License
780 stars 208 forks source link

Fetch argument None has invalid type <class 'NoneType'> #12

Closed jubjamie closed 6 years ago

jubjamie commented 7 years ago

Hi. I'm having a lot of trouble trying to get your function to work. I'm getting an error now that looks like it is coming from your deconvolution functions. Could you please let me know what you think the solution is?

Code I used:

is_loaded=tf_cnnvis.deconv_visualization(graph_or_path=tf.get_default_graph(),
                                         value_feed_dict={x_pl:x_batch, y_gt:targets_batch, is_training:False, valid_eval_accs:1000.0, valid_xent:1000.0},
                                         layers='r',
                                         path_logdir="c:/p17/logs/tf_cnnvis/flowers/1/logs",
                                         path_outdir="c:/p17/logs/tf_cnnvis/flowers/1/out")

Thanks.

INFO:tensorflow:Restoring parameters from model\tmp-model
---------------------------------------------------------------------------
TypeError                                 Traceback (most recent call last)
<ipython-input-13-41761d870143> in <module>()
      3                                          layers='r',
      4                                          path_logdir="c:/p17/logs/tf_cnnvis/flowers/1/logs",
----> 5                                          path_outdir="c:/p17/logs/tf_cnnvis/flowers/1/out")

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tf_cnnvis-1.0.0-py3.5.egg\tf_cnnvis\tf_cnnvis.py in deconv_visualization(graph_or_path, value_feed_dict, input_tensor, layers, path_logdir, path_outdir)
    381 def deconv_visualization(graph_or_path, value_feed_dict, input_tensor = None, layers = 'r', path_logdir = './Log', path_outdir = "./Output"):
    382     is_success = _get_visualization(graph_or_path, value_feed_dict, input_tensor = input_tensor, layers = layers, method = "deconv", 
--> 383         path_logdir = path_logdir, path_outdir = path_outdir)
    384         return is_success
    385 def deepdream_visualization(graph_or_path, value_feed_dict, layer, classes, input_tensor = None, path_logdir = './Log', path_outdir = "./Output"):

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tf_cnnvis-1.0.0-py3.5.egg\tf_cnnvis\tf_cnnvis.py in _get_visualization(graph_or_path, value_feed_dict, input_tensor, layers, path_logdir, path_outdir, method)
    149                         elif layers != None and layers.lower() in dict_layer.keys():
    150                                 layer_type = dict_layer[layers.lower()]
--> 151                                 is_success = _visualization_by_layer_type(g, value_feed_dict, input_tensor, layer_type, method, path_logdir, path_outdir)
    152                         else:
    153                                 is_success = False

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tf_cnnvis-1.0.0-py3.5.egg\tf_cnnvis\tf_cnnvis.py in _visualization_by_layer_type(graph, value_feed_dict, input_tensor, layer_type, method, path_logdir, path_outdir)
    202 
    203         for layer in layers:
--> 204                 is_success = _visualization_by_layer_name(graph, value_feed_dict, input_tensor, layer, method, path_logdir, path_outdir)
    205         return is_success
    206 

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tf_cnnvis-1.0.0-py3.5.egg\tf_cnnvis\tf_cnnvis.py in _visualization_by_layer_name(graph, value_feed_dict, input_tensor, layer_name, method, path_logdir, path_outdir)
    263                         elif method == "deconv":
    264                                 # deconvolution
--> 265                                 results = _deconvolution(graph, sess, op_tensor, X, feed_dict)
    266                         elif method == "deepdream":
    267                                 # deepdream

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tf_cnnvis-1.0.0-py3.5.egg\tf_cnnvis\tf_cnnvis.py in _deconvolution(graph, sess, op_tensor, X, feed_dict)
    310                                                 c += 1
    311                                 if c > 0:
--> 312                                         out.extend(sess.run(reconstruct[:c], feed_dict = feed_dict))
    313         return out
    314 def _deepdream(graph, sess, op_tensor, X, feed_dict, layer, path_outdir, path_logdir):

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in run(self, fetches, feed_dict, options, run_metadata)
    787     try:
    788       result = self._run(None, fetches, feed_dict, options_ptr,
--> 789                          run_metadata_ptr)
    790       if run_metadata:
    791         proto_data = tf_session.TF_GetBuffer(run_metadata_ptr)

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in _run(self, handle, fetches, feed_dict, options, run_metadata)
    982     # Create a fetch handler to take care of the structure of fetches.
    983     fetch_handler = _FetchHandler(
--> 984         self._graph, fetches, feed_dict_string, feed_handles=feed_handles)
    985 
    986     # Run request and get response.

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in __init__(self, graph, fetches, feeds, feed_handles)
    408     """
    409     with graph.as_default():
--> 410       self._fetch_mapper = _FetchMapper.for_fetch(fetches)
    411     self._fetches = []
    412     self._targets = []

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in for_fetch(fetch)
    228     elif isinstance(fetch, (list, tuple)):
    229       # NOTE(touts): This is also the code path for namedtuples.
--> 230       return _ListFetchMapper(fetch)
    231     elif isinstance(fetch, dict):
    232       return _DictFetchMapper(fetch)

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in __init__(self, fetches)
    335     """
    336     self._fetch_type = type(fetches)
--> 337     self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
    338     self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
    339 

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in <listcomp>(.0)
    335     """
    336     self._fetch_type = type(fetches)
--> 337     self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
    338     self._unique_fetches, self._value_indices = _uniquify_fetches(self._mappers)
    339 

C:\Users\username\Anaconda3\envs\tfGPU\lib\site-packages\tensorflow\python\client\session.py in for_fetch(fetch)
    225     if fetch is None:
    226       raise TypeError('Fetch argument %r has invalid type %r' %
--> 227                       (fetch, type(fetch)))
    228     elif isinstance(fetch, (list, tuple)):
    229       # NOTE(touts): This is also the code path for namedtuples.

TypeError: Fetch argument None has invalid type <class 'NoneType'>
falaktheoptimist commented 7 years ago

From the error message it seems that the feed_dict going in has some element of type "None". Could you please recheck the value_feed_dict that you're passing in?

jubjamie commented 7 years ago

I've had a check and that feed_dict value is exactly what is going into one of my previous .run() statements. In fact it's just copied over from my testing code block which functions fine. I've even tried putting the values into the feed_dict manually and it seems to fail. Anything that tf_cnnvis does behind the scenes that means that even though it works perfectly fine within plain Tensorflow, when tf_cnnvis has a go it would fail?

falaktheoptimist commented 7 years ago

There's a transfer of feed_dict elements happening inside tf_cnnvis. We cannot think of any case in which this would lead to the change of dictionary element which you to None, but still yours could be a corner case we overlooked. Could you add a print statement in tf_cnnvis/utils.py (In the function parse_tensors_dict could you add 2 lines print(X_in) and print(feed_dict[X_in]) at line 84 of that function and post the results- it would help us debug what is causing the error.

Or, a simpler solution, if you don't mind- just send us the graph/ feed_dict and we'll check things at our end.

jubjamie commented 7 years ago

Hmm. So it prints out my image data like so:

Tensor("ImageIn:0", shape=(?, 224, 224, 3), dtype=float32)
[[[[ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   ..., 
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]]

  [[ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   ..., 
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]]

  [[ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   ..., 
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]]

  ..., 
  [[ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   ..., 
   [ 0.09019608  0.14117648  0.09019608]
   [ 0.07450981  0.12156863  0.08235294]
   [ 0.07843138  0.1254902   0.08627451]]

  [[ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   ..., 
   [ 0.08627451  0.1254902   0.09019608]
   [ 0.07058824  0.11764706  0.08235294]
   [ 0.07450981  0.11764706  0.08627451]]

  [[ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   [ 0.          0.          0.        ]
   ..., 
   [ 0.08235294  0.12156863  0.09019608]
   [ 0.07058824  0.11372549  0.08235294]
   [ 0.07450981  0.11764706  0.08627451]]]]

And then returns the error from my original problem. Doesn't look like it's printing out the second feed_dict[X_in]). But I know that stuff is being sent to it.

falaktheoptimist commented 7 years ago

No, actually it's printing out both - X_in is the Tensor("ImageIn:0", shape=(?, 224, 224, 3), dtype=float32) and feed_dict[X_in] is the array shown below. So, we can be sure that there are no issues there- and that is the only element we're modifying a bit. So, then one of the other feed_dict elements must be becoming None. Since you're not getting any issues running your test code, all the inputs at your end must be fine. I'm still scratching my head on how this error is popping up then. BTW, does the activation visualization case work for you?

ghost commented 7 years ago

@falaktheoptimist I can reproduce the issue and have the same problem. I have implemented a standard CNN classifying MNIST numbers. I call tf_cnnvis in the following function:

def visualize_layers(feed_dict):
    # activation visualization
    layers = ['r', 'p', 'c']    # r = ReLU, p = pooling, c = convolutional layers

    start = time.time()
    is_success = activation_visualization(graph_or_path=tf.get_default_graph(), value_feed_dict=feed_dict,
                                          layers=layers, path_logdir="C:/Users/bucpau/PycharmProjects/Academy/Logs/", path_outdir="C:/Users/bucpau/PycharmProjects/Academy/Visualization/")
    start = time.time() - start
    print("Total time for neuron activation visualization : {} Success: {}".format(start, is_success))
    # deconv visualization
    layers = ['r', 'p', 'c']

    start = time.time()
    is_success = deconv_visualization(graph_or_path=tf.get_default_graph(), value_feed_dict=feed_dict,
                                      layers=layers, path_logdir="C:/Users/bucpau/PycharmProjects/Academy/Logs/", path_outdir="C:/Users/bucpau/PycharmProjects/Academy/Visualization/")
    start = time.time() - start
    print("Total time for deconvolution visualization: {} Success: {}".format(start, is_success))
    return None

The feed_dict provided as an input is the same I use for testing, but with only one image, see here:

    random_representation = mnist_data.test.next_batch(1)
    ran_x = preprocess_batch(random_representation[0], mean, std)
    ran_y = random_representation[1]
    test_dict = {x: ran_x, y: ran_y, is_training: False, keep_probability: 1}
    visualize_layers(test_dict)

The exact error reads:

Traceback (most recent call last):
  File "C:/Users/bucpau/PycharmProjects/Academy/CNN_MNIST.py", line 139, in <module>
    main()
  File "C:/Users/bucpau/PycharmProjects/Academy/CNN_MNIST.py", line 136, in main
    visualize_layers(test_dict)
  File "C:/Users/bucpau/PycharmProjects/Academy/CNN_MNIST.py", line 68, in visualize_layers
    layers=layers, path_logdir="C:/Users/bucpau/PycharmProjects/Academy/Logs/", path_outdir="C:/Users/bucpau/PycharmProjects/Academy/Visualization/")
  File "C:\ProgramData\Anaconda3\lib\site-packages\tf_cnnvis-1.0.0-py3.6.egg\tf_cnnvis\tf_cnnvis.py", line 379, in deconv_visualization
  File "C:\ProgramData\Anaconda3\lib\site-packages\tf_cnnvis-1.0.0-py3.6.egg\tf_cnnvis\tf_cnnvis.py", line 141, in _get_visualization
  File "C:\ProgramData\Anaconda3\lib\site-packages\tf_cnnvis-1.0.0-py3.6.egg\tf_cnnvis\tf_cnnvis.py", line 200, in _visualization_by_layer_type
  File "C:\ProgramData\Anaconda3\lib\site-packages\tf_cnnvis-1.0.0-py3.6.egg\tf_cnnvis\tf_cnnvis.py", line 261, in _visualization_by_layer_name
  File "C:\ProgramData\Anaconda3\lib\site-packages\tf_cnnvis-1.0.0-py3.6.egg\tf_cnnvis\tf_cnnvis.py", line 308, in _deconvolution
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 895, in run
    run_metadata_ptr)
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 1109, in _run
    self._graph, fetches, feed_dict_tensor, feed_handles=feed_handles)
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 413, in __init__
    self._fetch_mapper = _FetchMapper.for_fetch(fetches)
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 233, in for_fetch
    return _ListFetchMapper(fetch)
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 340, in __init__
    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 340, in <listcomp>
    self._mappers = [_FetchMapper.for_fetch(fetch) for fetch in fetches]
  File "C:\ProgramData\Anaconda3\lib\site-packages\tensorflow\python\client\session.py", line 230, in for_fetch
    (fetch, type(fetch)))
TypeError: Fetch argument None has invalid type <class 'NoneType'>

What wonders me is that the activation visualization works perfectly, with the same arguments. Any ideas on what is happening here?

falaktheoptimist commented 7 years ago

We were finally able to replicate when this was happening. It occurs for the flattened input case. Passing in the input_tensor argument as the reshaped (or unflattned) tensor should resolve this.

jubjamie commented 7 years ago

I've been so busy i've not had time to look at this since filing the issue! Do you mean feed the image as a rectangle instead of a flattened tensor? Because i'm pretty sure that's what I was doing!

ghost commented 7 years ago

@falaktheoptimist I've tried it out. Here for the deconvolution it works!

falaktheoptimist commented 7 years ago

@jubjamie Then we have yet to figure out your particular corner case. I see from your output above that it was an RGB image and not a flattened one.

fqfqiumou commented 7 years ago

@falaktheoptimist how to reshaped the input_tensor?

falaktheoptimist commented 7 years ago

Use tf.reshape

fqfqiumou commented 7 years ago

@falaktheoptimist below is my code, i use InceptionResnetV2 model . the activation visualization works perfectly,but deconv_visualization don''t work (Fetch argument None has invalid type <class 'NoneType'>). Can you help me ?


import sys
import tensorflow as tf
import urllib2

from datasets import imagenet
from nets import vgg
from nets import inception_v4
from nets import inception_resnet_v2
from preprocessing import vgg_preprocessing
from preprocessing import inception_preprocessing

import time
import copy
import h5py
import numpy as np
from scipy.misc import imread, imresize
from tf_cnnvis import *

def get_img(src, img_size=False):
   img = imread(src, mode='RGB') # misc.imresize(, (256, 256, 3))
   if not (len(img.shape) == 3 and img.shape[2] == 3):
       img = np.dstack((img,img,img))
   if img_size != False:
       img = imresize(img, img_size)
   return img

checkpoints_dir = '/home/tfs/workspace/train_inception_resnet'
checkpoint = 'model.ckpt-326664'
slim = tf.contrib.slim
#image_size=vgg.vgg_16.default_image_size
image_size=inception_v4.inception_v4.default_image_size

with tf.Graph().as_default():
  url=("http://img6.cache.netease.com/photo/0001/2017-05-12/CK8G865719BR0001.jpg")
  if (len(sys.argv) > 1):
    url = (sys.argv[1])

  print(url)

  image_string=urllib2.urlopen(url).read()
  image = tf.image.decode_jpeg(image_string,channels=3)

  processed_image=inception_preprocessing.preprocess_image(image,image_size,image_size,is_training=False)
  processed_images=tf.expand_dims(processed_image,0)

  with slim.arg_scope(inception_resnet_v2.inception_resnet_v2_arg_scope()):

    logits,_=inception_resnet_v2.inception_resnet_v2(processed_images,num_classes=6,is_training=False)
    probabilities = tf.nn.softmax(logits)

    init_fn = slim.assign_from_checkpoint_fn(os.path.join(checkpoints_dir,checkpoint),slim.get_model_variables('InceptionResnetV2'))

    with tf.Session() as sess:
      init_fn(sess)

      X = tf.placeholder(tf.float32, shape = [None, image_size, image_size, 3]) # placeholder for input images

      im = get_img(os.path.join("./sample_images", "images.jpg"), (image_size,image_size,3)).astype(np.float32)
      im = np.expand_dims(im, axis = 0)

      # deconv visualization
      layers = ['r', 'p', 'c']

      start = time.time()
      is_success = deconv_visualization(graph_or_path = tf.get_default_graph(), value_feed_dict = {X : im}, 
                                  layers=layers, path_logdir="./Log/AlexNet", path_outdir="./Output/AlexNet")
      start = time.time() - start
      print("Total Time = %f" % (start))

res=slim.get_model_variables()
sconeyard commented 7 years ago

Same error:

with tf.Session() as sess:
    X = tf.placeholder(tf.float32, shape = [None, 400, 600, 3])
    input_tensor = {X: np.array([image_data])}
    vis.deconv_visualization(sess.graph, input_tensor, sess.graph.get_tensor_by_name('sab/DecodeJpeg:0'), layers=layers)

I use the inception_v3 network, after retraining. Activation Visualization works, though it shows a whole lot of pandas, no idea if retraining just failed or if the code is broken. Deconv fails.

BhagyeshVikani commented 7 years ago

Hello @sconeyard, Can you please change your code as below and post the error here?

with tf.Session() as sess:
    X = tf.placeholder(tf.float32, shape = [None, 400, 600, 3])
    input_tensor = {X: np.array([image_data])}
    vis.deconv_visualization(sess.graph, input_tensor, X, layers=layers)
BhagyeshVikani commented 7 years ago

Hiii @fqfqiumou,

Can you make change as following and re-run your experiment? Replace this

is_success = deconv_visualization(graph_or_path = tf.get_default_graph(), value_feed_dict = {X : im}, layers=layers, path_logdir="./Log/AlexNet", path_outdir="./Output/AlexNet")

with this

is_success = deconv_visualization(graph_or_path = tf.get_default_graph(), value_feed_dict = {X : im}, input_tensor=processed_images, layers=layers, path_logdir="./Log/AlexNet", path_outdir="./Output/AlexNet")

Thank You

fqeqiq168 commented 7 years ago

@BhagyeshVikani , Thank you very much , it works. because i use slim , so don't need to feed value_feed_dict, just pass the image to input_tensor.

BhagyeshVikani commented 7 years ago

Hello,

Clarification regarding the input_tensor argument in all three visualization function: input_tensor (tf.tensor object (Default = None)) – tf.tensor (input tensor to the model - where images enter into the models) Note: This is not a standalone tensor/placeholder separate from the model

Thank You

BhagyeshVikani commented 6 years ago

Closing this now, as the issue seems to have been resolved. Reopen if needed.

ishwara-bhat commented 6 years ago

@BhagyeshVikani I have this trouble. I followed all of the answers here and still could not get it working.

Here is my scenario. I have a class that opens a model created from estimator API. It is not a frozen graph. It is from .meta, .index, checkpoint, .data-xx files. The CNN input shape is [864,480] I load it in the class constructor.

def init(self,frozen_model_filename): tf.reset_default_graph() self.session = tf.Session() self.input_placeholder = tf.placeholder(tf.float32, shape=[ 1,864,480,3]) self.model = self.load_model_from_meta(vad_model_file_fullpath)

def load_model_from_meta(self, vad_model_file_name):
    saver = tf.train.import_meta_graph(vad_model_file_name)
    saver.restore(self.session,tf.train.latest_checkpoint(vad_model_dir))
    print("restore successful")

def visualize(self): im = np.expand_dims(imresize(imresize(imread(os.path.join("sample_images", "flicker1.jpg")), (256,256)) - mean, (864, 480)), axis = 0)

   layers = ['r', 'p', 'c']
    start = time.time()
    print("before session")
    self.session.run(tf.global_variables_initializer())
    is_success = activation_visualization(sess_graph_path = self.session, value_feed_dict = {self.input_placeholder : im}, 
                                      layers=layers, path_logdir=os.path.join("Log","MyModelfromMeta"), 
                                      path_outdir=os.path.join("Output","MyModelfromMeta"))

    layers = ['r', 'p', 'c']
    is_success = deconv_visualization(sess_graph_path = self.session, value_feed_dict = {self.input_placeholder : im},
            #                          input_tensor="softmax_tensor", #Tried this with and without input_tensor
                                  layers=layers, path_logdir=os.path.join("Log","MyModelfromMeta"), 
                                  path_outdir=os.path.join("Output","MyModelfromMeta"))

main code

mean = np.load("./img_mean.npy").transpose((1, 2, 0)) # load mean image of imagenet dataset
Myclassifier = MyModel(unfrozen_model_Metafile_name)

print(model.predict(images))

Myclassifier.visualize()

Like with others, it works for activation_visualization API, but fails with deconv_visualization(). It gives the same error - TypeError: Fetch argument None has invalid type <class 'NoneType'>

Please suggest the way out.

jaspreet-sambee commented 5 years ago

@ishwara-bhat The above mentioned solution worled for me too. Could you specify what exactly is the "softmax tensor"? Secondly, you might want to check your reconstruction tensor as well.

sidharthnpisharody commented 4 years ago
        for epoch in range(training_epochs):
            avg_cost = 0.
            total_batch = int(mnist.train.num_examples/batch_size)

            for i in range(total_batch):

                minibatch_x, minibatch_y = mnist.train.next_batch(batch_size) 
                sess.run(train_op, feed_dict={x: minibatch_x, y: minibatch_y})
                                    # Compute average loss 

                avg_cost += sess.run(cost, feed_dict={x: minibatch_x, y: minibatch_y})/total_batch 
                # Display Logs per epOch step

            if  epoch % display_step == 0:
                print("Epoch:", '%04d' % (epoch+1), "cost =", "{:.9f}".format(avg_cost))
                accuracy = sess.run(eval_op, feed_dict={x: mnist.validation.images, y: mnist.validation.labels})
                print("Validation Error:", (1 - accuracy))

                summary_str = sess.run(summary_op, feed_dict={x: minibatch_x, y: minibatch_y})
                summary_writer.add_summary(summary_str, sess.run(global_step))

                saver.save(sess, "mlp_logs/model-checkpoint", global_step=global_step)
                print("Optimization Finished!")
                accuracy = sess.run(eval_op, feed_dict={x: mnist.test.images, y: mnist.test.labels})
                print("Test Accuracy: ", accuracy)

Here also the same error is coming can someone help (fetch, type(fetch))) TypeError: Fetch argument None has invalid type <class 'NoneType'>