balancap / SSD-Tensorflow

Single Shot MultiBox Detector in TensorFlow
4.11k stars 1.89k forks source link

ipynb notebook test error #50

Open freaad opened 7 years ago

freaad commented 7 years ago

I am running the ipynb on the windows. I have tensor flow anaconda version. I face the error in the 6 box. I don't get it about this error. Also I want to run my personel image. Any tool can you recommend to make like pascal voc. I've already collect the image. I just need to make annotation.

TypeError Traceback (most recent call last)

in () 11 reuse = True if 'ssd_net' in locals() else None 12 ssd_net = ssd_vgg_300.SSDNet() ---> 13 with slim.arg_scope(ssd_net.arg_scope(data_format=data_format)): 14 predictions, localisations, _, _ = ssd_net.net(image_4d, is_training=False, reuse=reuse) 15 C:\Users\kangd345\Desktop\New folder (3)\SSD-Tensorflow-master\nets\ssd_vgg_300.py in arg_scope(self, weight_decay, data_format) 163 """Network arg_scope. 164 """ --> 165 return ssd_arg_scope(weight_decay, data_format=data_format) 166 167 def arg_scope_caffe(self, caffe_scope): C:\Users\kangd345\Desktop\New folder (3)\SSD-Tensorflow-master\nets\ssd_vgg_300.py in ssd_arg_scope(weight_decay, data_format) 534 weights_regularizer=slim.l2_regularizer(weight_decay), 535 weights_initializer=tf.contrib.layers.xavier_initializer(), --> 536 biases_initializer=tf.zeros_initializer()): 537 with slim.arg_scope([slim.conv2d, slim.max_pool2d], 538 padding='SAME', TypeError: zeros_initializer() missing 1 required positional argument: 'shape'
balancap commented 7 years ago

Can you check the version of TensorFlow you are using? Typing tensorflow.__version__ It looks very similar to errors with pre-1.0 TF versions.

freaad commented 7 years ago

mine is 0.12

benn94 commented 7 years ago

Me too encounter some error in this notebook I run this

from datasets import pascalvoc_2007
from datasets import pascalvoc_2012

DATASET_DIR = '/media/paul/DataExt4/PascalVOC/dataset/'
SPLIT_NAME = 'test'
BATCH_SIZE = 16

# Dataset provider loading data from the dataset.
dataset = pascalvoc_2007.get_split(SPLIT_NAME, DATASET_DIR)
provider = slim.dataset_data_provider.DatasetDataProvider(dataset, 
                                                          shuffle=False,
                                                           num_epochs=1,
                                                          common_queue_capacity=2 * BATCH_SIZE,
                                                          common_queue_min=BATCH_SIZE)
[image, shape, bboxes, labels] = provider.get(['image', 'shape', 'object/bbox', 'object/label'])
print('Dataset:', dataset.data_sources, '|', dataset.num_samples)

and there the output

NotFoundError                             Traceback (most recent call last)
<ipython-input-16-4ead4da6636f> in <module>()
     12                                                            num_epochs=1,
     13                                                           common_queue_capacity=2 * BATCH_SIZE,
---> 14                                                           common_queue_min=BATCH_SIZE)
     15 [image, shape, bboxes, labels] = provider.get(['image', 'shape', 'object/bbox', 'object/label'])
     16 print('Dataset:', dataset.data_sources, '|', dataset.num_samples)

/usr/local/lib/python3.5/dist-packages/tensorflow/contrib/slim/python/slim/data/dataset_data_provider.py in __init__(self, dataset, num_readers, reader_kwargs, shuffle, num_epochs, common_queue_capacity, common_queue_min, record_key, seed)
     89         capacity=common_queue_capacity,
     90         min_after_dequeue=common_queue_min,
---> 91         seed=seed)
     92 
     93     items = dataset.decoder.list_items()

/usr/local/lib/python3.5/dist-packages/tensorflow/contrib/slim/python/slim/data/parallel_reader.py in parallel_read(data_sources, reader_class, num_epochs, num_readers, reader_kwargs, shuffle, dtypes, capacity, min_after_dequeue, seed, scope)
    208     key, value: a tuple of keys and values from the data_source.
    209   """
--> 210   data_files = get_data_files(data_sources)
    211   with ops.name_scope(scope, 'parallel_read'):
    212     filename_queue = tf_input.string_input_producer(

/usr/local/lib/python3.5/dist-packages/tensorflow/contrib/slim/python/slim/data/parallel_reader.py in get_data_files(data_sources)
    277   else:
    278     if '*' in data_sources or '?' in data_sources or '[' in data_sources:
--> 279       data_files = gfile.Glob(data_sources)
    280     else:
    281       data_files = [data_sources]

/usr/local/lib/python3.5/dist-packages/tensorflow/python/lib/io/file_io.py in get_matching_files(filename)
    280     return [compat.as_str_any(matching_filename)
    281             for matching_filename in pywrap_tensorflow.GetMatchingFiles(
--> 282                 compat.as_bytes(filename), status)]
    283 
    284 

/usr/lib/python3.5/contextlib.py in __exit__(self, type, value, traceback)
     64         if type is None:
     65             try:
---> 66                 next(self.gen)
     67             except StopIteration:
     68                 return

/usr/local/lib/python3.5/dist-packages/tensorflow/python/framework/errors_impl.py in raise_exception_on_not_ok_status()
    464           None, None,
    465           compat.as_text(pywrap_tensorflow.TF_Message(status)),
--> 466           pywrap_tensorflow.TF_GetCode(status))
    467   finally:
    468     pywrap_tensorflow.TF_DeleteStatus(status)

NotFoundError: /media/paul/DataExt4/PascalVOC/dataset
shartoo commented 7 years ago

change biases_initializer=tf.zeros_initializer() into biases_initializer=tf.zeros_initializer works for me.