robisz1911 / lucidfinetuning

2 stars 0 forks source link

can't render finetuned InceptionV1 #1

Open virilo opened 4 years ago

virilo commented 4 years ago

I finetuned InceptionV1 with flowers17 dataset

At the very end, the log said:

saving model.pb model.ckpt googlenetLucid.pb with top node Mixed_5c_Concatenated/concat

Then, I executed a copy of atlas-activation-simple.ipynb (https://colab.research.google.com/github/tensorflow/lucid/blob/master/notebooks/activation-atlas/activation-atlas-simple.ipynb)

And added these two lines, to set up the new trained nework

model = models.InceptionV1() model.model_path='model.pb' #'googlenetLucid.pb' model.labels_path='flowers17.txt' #sorted flower directory names model.load_graphdef()

It was able to print the HMAP 2D scatterplot.

But it complains trying to create the canvas: ValueError: Attempted to map inputs that were not found in graph_def: [input:0]

attempt:  0
---------------------------------------------------------------------------
ValueError                                Traceback (most recent call last)
<ipython-input-38-ebcaa285565a> in <module>()
      4 xs = layout[:, 0]
      5 ys = layout[:, 1]
----> 6 canvas = render_layout(model, layer, S, xs, ys, raw_activations, n_steps=512, grid_size=(20, 20), n_attempts=1)
      7 show(canvas)

7 frames
<ipython-input-37-4ab50294c46c> in render_layout(model, layer, S, xs, ys, activ, n_steps, n_attempts, min_density, grid_size, icon_size, x_extent, y_extent)
    173 
    174     icons = np.asarray(icons)
--> 175     icon_batch, losses = render_icons(icons[:,0], model, alpha=False, layer=layer, S=S, n_steps=n_steps, size=icon_size, num_attempts=n_attempts)
    176 
    177     canvas = np.ones((icon_size * grid_size[0], icon_size * grid_size[1], 3))

<ipython-input-37-4ab50294c46c> in render_icons(directions, model, layer, size, n_steps, verbose, S, num_attempts, cossim, alpha)
     90           losses = []
     91           trainer = tf.train.AdamOptimizer(learning_rate)
---> 92           T = render.make_vis_T(model, obj, param_f, trainer, transforms)
     93           loss_t, vis_op, t_image = T("loss"), T("vis_op"), T("input")
     94           losses_ = [obj_part(T) for obj_part in obj_list]

/usr/local/lib/python3.6/dist-packages/lucid/optvis/render.py in make_vis_T(model, objective_f, param_f, optimizer, transforms, relu_gradient_override)
    176       T = import_model(model, transform_f(t_image), t_image)
    177   else:
--> 178     T = import_model(model, transform_f(t_image), t_image)
    179   loss = objective_f(T)
    180 

/usr/local/lib/python3.6/dist-packages/lucid/optvis/render.py in import_model(model, t_image, t_image_raw)
    252 def import_model(model, t_image, t_image_raw):
    253 
--> 254   model.import_graph(t_image, scope="import", forget_xy_shape=True)
    255 
    256   def T(layer):

/usr/local/lib/python3.6/dist-packages/lucid/modelzoo/vision_base.py in import_graph(self, t_input, scope, forget_xy_shape)
    140     t_input, t_prep_input = self.create_input(t_input, forget_xy_shape)
    141     tf.import_graph_def(
--> 142         self.graph_def, {self.input_name: t_prep_input}, name=scope)
    143     self.post_import(scope)
    144 

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/util/deprecation.py in new_func(*args, **kwargs)
    505                 'in a future version' if date is None else ('after %s' % date),
    506                 instructions)
--> 507       return func(*args, **kwargs)
    508 
    509     doc = _add_deprecated_arg_notice_to_docstring(

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/importer.py in import_graph_def(graph_def, input_map, return_elements, name, op_dict, producer_op_list)
    403       name=name,
    404       op_dict=op_dict,
--> 405       producer_op_list=producer_op_list)
    406 
    407 

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/importer.py in _import_graph_def_internal(graph_def, input_map, return_elements, validate_colocation_constraints, name, op_dict, producer_op_list)
    533     raise ValueError(
    534         'Attempted to map inputs that were not found in graph_def: [%s]' %
--> 535         ', '.join(missing_unused_input_keys))
    536 
    537   if return_elements is None:

ValueError: Attempted to map inputs that were not found in graph_def: [input:0]

Is this version of lucidfinetuning valid for activation-atlas-simple.ipynb? Should I configure something else?

Thanks in advance

virilo commented 4 years ago

I configured the input name as I could see in your example vis.py

model = models.InceptionV1() model.model_path='model.pb' #'model.pb' #'googlenetLucid.pb' model.labels_path='flowers17.txt' model.input_name = 'input_1' model.image_shape = [224, 224, 3] #[299, 299, 3] model.image_value_range = (0, 1) model.load_graphdef()

Now it stoppend complaining about input 0; but a new error has arisen:

TypeError: Tensors in list passed to 'values' of 'ConcatV2' Op have types [float32, float32, float32, int32] that don't all match.

---------------------------------------------------------------------------

ValueError                                Traceback (most recent call last)

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
    471                 preferred_dtype=default_dtype,
--> 472                 as_ref=input_arg.is_ref)
    473             if input_arg.number_attr and len(

13 frames

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in internal_convert_n_to_tensor(values, dtype, name, as_ref, preferred_dtype, ctx)
   1361             preferred_dtype=preferred_dtype,
-> 1362             ctx=ctx))
   1363   return ret

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/ops.py in internal_convert_to_tensor(value, dtype, name, as_ref, preferred_dtype, ctx, accepted_result_types)
   1272           "Tensor conversion requested dtype %s for Tensor with dtype %s: %r" %
-> 1273           (dtype.name, value.dtype.name, value))
   1274     return value

ValueError: Tensor conversion requested dtype float32 for Tensor with dtype int32: <tf.Tensor 'import/Mixed_3b_Concatenated/concat/axis:0' shape=() dtype=int32>

During handling of the above exception, another exception occurred:

TypeError                                 Traceback (most recent call last)

<ipython-input-15-ebcaa285565a> in <module>()
      4 xs = layout[:, 0]
      5 ys = layout[:, 1]
----> 6 canvas = render_layout(model, layer, S, xs, ys, raw_activations, n_steps=512, grid_size=(20, 20), n_attempts=1)
      7 show(canvas)

<ipython-input-14-4ab50294c46c> in render_layout(model, layer, S, xs, ys, activ, n_steps, n_attempts, min_density, grid_size, icon_size, x_extent, y_extent)
    173 
    174     icons = np.asarray(icons)
--> 175     icon_batch, losses = render_icons(icons[:,0], model, alpha=False, layer=layer, S=S, n_steps=n_steps, size=icon_size, num_attempts=n_attempts)
    176 
    177     canvas = np.ones((icon_size * grid_size[0], icon_size * grid_size[1], 3))

<ipython-input-14-4ab50294c46c> in render_icons(directions, model, layer, size, n_steps, verbose, S, num_attempts, cossim, alpha)
     90           losses = []
     91           trainer = tf.train.AdamOptimizer(learning_rate)
---> 92           T = render.make_vis_T(model, obj, param_f, trainer, transforms)
     93           loss_t, vis_op, t_image = T("loss"), T("vis_op"), T("input")
     94           losses_ = [obj_part(T) for obj_part in obj_list]

/usr/local/lib/python3.6/dist-packages/lucid/optvis/render.py in make_vis_T(model, objective_f, param_f, optimizer, transforms, relu_gradient_override)
    176       T = import_model(model, transform_f(t_image), t_image)
    177   else:
--> 178     T = import_model(model, transform_f(t_image), t_image)
    179   loss = objective_f(T)
    180 

/usr/local/lib/python3.6/dist-packages/lucid/optvis/render.py in import_model(model, t_image, t_image_raw)
    252 def import_model(model, t_image, t_image_raw):
    253 
--> 254   model.import_graph(t_image, scope="import", forget_xy_shape=True)
    255 
    256   def T(layer):

/usr/local/lib/python3.6/dist-packages/lucid/modelzoo/vision_base.py in import_graph(self, t_input, scope, forget_xy_shape)
    141     tf.import_graph_def(
    142         self.graph_def, {self.input_name: t_prep_input}, name=scope)
--> 143     self.post_import(scope)
    144 
    145   def show_graph(self):

/usr/local/lib/python3.6/dist-packages/lucid/modelzoo/other_models/InceptionV1.py in post_import(self, scope)
     57 
     58   def post_import(self, scope):
---> 59     _populate_inception_bottlenecks(scope)
     60 
     61 InceptionV1.layers = _layers_from_list_of_dicts(InceptionV1, [

/usr/local/lib/python3.6/dist-packages/lucid/modelzoo/other_models/InceptionV1.py in _populate_inception_bottlenecks(scope)
     32         pre_relus.append(tower)
     33       concat_name = scope + '/' + name + '_pre_relu'
---> 34       _ = tf.concat(pre_relus, -1, name=concat_name)
     35 
     36 

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/util/dispatch.py in wrapper(*args, **kwargs)
    178     """Call target, and fall back on dispatchers if there is a TypeError."""
    179     try:
--> 180       return target(*args, **kwargs)
    181     except (TypeError, ValueError):
    182       # Note: convert_to_eager_tensor currently raises a ValueError, not a

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/array_ops.py in concat(values, axis, name)
   1418           dtype=dtypes.int32).get_shape().assert_has_rank(0)
   1419       return identity(values[0], name=name)
-> 1420   return gen_array_ops.concat_v2(values=values, axis=axis, name=name)
   1421 
   1422 

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/ops/gen_array_ops.py in concat_v2(values, axis, name)
   1255   _attr_N = len(values)
   1256   _, _, _op = _op_def_lib._apply_op_helper(
-> 1257         "ConcatV2", values=values, axis=axis, name=name)
   1258   _result = _op.outputs[:]
   1259   _inputs_flat = _op.inputs

/usr/local/lib/python3.6/dist-packages/tensorflow_core/python/framework/op_def_library.py in _apply_op_helper(self, op_type_name, name, **keywords)
    498                                 (prefix, dtype.name))
    499               else:
--> 500                 raise TypeError("%s that don't all match." % prefix)
    501             else:
    502               raise TypeError(

TypeError: Tensors in list passed to 'values' of 'ConcatV2' Op have types [float32, float32, float32, int32] that don't all match.
virilo commented 4 years ago

I shared the code here:

https://colab.research.google.com/drive/1ngs5lmxzbacx48QXWpsVkqd9ULdrEtr7

Cell [7] contains the model loading code