BVLC / caffe

Caffe: a fast open framework for deep learning.
http://caffe.berkeleyvision.org/
Other
33.96k stars 18.72k forks source link

F0707 15:46:19.091634 13341 io.cpp:325] Check failed: 'anno' Must be non NULL #6955

Open Mrsherlock1996 opened 4 years ago

Mrsherlock1996 commented 4 years ago

Hello, I successfully compiled your caffe-ssd, but it prompted me the error during create lmdb file by cread_data.sh:

sherlock@Y50-70:~/Downloads/myproject$ ./create_data.sh root directory ok list_file OK img_file,anno,have been read text_format has been used anno_type is detection /home/sherlock/Documents/caffe/build/tools/convert_annoset --anno_type=detection --label_type=xml --label_map_file=/home/sherlock/Downloads/myproject/labelmap_voc.prototxt --check_label=True --min_dim=0 --max_dim=0 --resize_height=256 --resize_width=256 --backend=lmdb --shuffle=False --check_size=False --encode_type=jpg --encoded=True --gray=False /home/sherlock/Downloads/myproject/ /home/sherlock/Downloads/myproject/trainval.txt /home/sherlock/Downloads/myproject/wider_face_LMDB/lmdb/wider_face_LMDB_trainval_lmdb I0707 15:46:19.063556 13341 convert_annoset.cpp:122] A total of 12876 images. I0707 15:46:19.063902 13341 db_lmdb.cpp:35] Opened lmdb /home/sherlock/Downloads/myproject/wider_face_LMDB/lmdb/wider_face_LMDB_trainval_lmdb F0707 15:46:19.091634 13341 io.cpp:325] Check failed: 'anno' Must be non NULL *** Check f

ailure stack trace: *** @ 0x7f4ff26160cd google::LogMessage::Fail() @ 0x7f4ff2617f33 google::LogMessage::SendToLog() @ 0x7f4ff2615c28 google::LogMessage::Flush() @ 0x7f4ff2618999 google::LogMessageFatal::~LogMessageFatal() @ 0x7f4ff2a6ad45 google::CheckNotNull<>() @ 0x7f4ff2a653cf caffe::ReadXMLToAnnotatedDatum() @ 0x7f4ff2a6715e caffe::ReadRichImageToAnnotatedDatum() @ 0x55c155bb1341 (unknown) @ 0x7f4ff1416b97 __libc_start_main @ 0x55c155bb1dfa (unknown)

could you help me solve this problem, thanks!!

Mrsherlock1996 commented 4 years ago

this is create_data.sh

cur_dir=$(cd $( dirname ${BASH_SOURCE[0]} ) && pwd ) root_dir=$cur_dir

cd $root_dir

redo=1 data_root_dir="$root_dir" dataset_name="wider_face_LMDB" mapfile="$root_dir/labelmap_voc.prototxt" anno_type="detection" db="lmdb" min_dim=0 max_dim=0 width=256 height=256

extra_cmd="--encode-type=jpg --encoded" if [ $redo ] then extra_cmd="$extra_cmd --redo" fi for subset in trainval do python3 $root_dir/create_annoset.py --anno-type=$anno_type --label-map-file=$mapfile --min-dim=$min_dim --max-dim=$max_dim --resize-width=$width --resize-height=$height --check-label $extra_cmd $data_root_dir /home/sherlock/Downloads/myproject/trainval.txt $data_root_dir/$dataset_name/$db/$datasetname""$subset"_"$db examples/$dataset_name done

Mrsherlock1996 commented 4 years ago

this is create_annoset.py

import argparse import os import shutil import subprocess import sys

from caffe.proto import caffe_pb2 from google.protobuf import text_format

if name == "main": parser = argparse.ArgumentParser(description="Create AnnotatedDatum database") parser.add_argument("root", help="The root directory which contains the images and annotations.") parser.add_argument("listfile", help="The file which contains image paths and annotation info.") parser.add_argument("outdir", help="The output directory which stores the database file.") parser.add_argument("exampledir", help="The directory to store the link of the database files.") parser.add_argument("--redo", default = False, action = "store_true", help="Recreate the database.") parser.add_argument("--anno-type", default = "classification", help="The type of annotation {classification, detection}.") parser.add_argument("--label-type", default = "xml", help="The type of label file format for detection {xml, json, txt}.") parser.add_argument("--backend", default = "lmdb", help="The backend {lmdb, leveldb} for storing the result") parser.add_argument("--check-size", default = False, action = "store_true", help="Check that all the datum have the same size.") parser.add_argument("--encode-type", default = "", help="What type should we encode the image as ('png','jpg',...).") parser.add_argument("--encoded", default = False, action = "store_true", help="The encoded image will be save in datum.") parser.add_argument("--gray", default = False, action = "store_true", help="Treat images as grayscale ones.") parser.add_argument("--label-map-file", default = "", help="A file with LabelMap protobuf message.") parser.add_argument("--min-dim", default = 0, type = int, help="Minimum dimension images are resized to.") parser.add_argument("--max-dim", default = 0, type = int, help="Maximum dimension images are resized to.") parser.add_argument("--resize-height", default = 0, type = int, help="Height images are resized to.") parser.add_argument("--resize-width", default = 0, type = int, help="Width images are resized to.") parser.add_argument("--shuffle", default = False, action = "store_true", help="Randomly shuffle the order of images and their labels.") parser.add_argument("--check-label", default = False, action = "store_true", help="Check that there is no duplicated name/label.")

args = parser.parse_args() root_dir = args.root list_file = args.listfile out_dir = args.outdir example_dir = args.exampledir

redo = args.redo anno_type = args.anno_type label_type = args.label_type backend = args.backend check_size = args.check_size encode_type = args.encode_type encoded = args.encoded gray = args.gray label_map_file = args.label_map_file min_dim = args.min_dim max_dim = args.max_dim resize_height = args.resize_height resize_width = args.resize_width shuffle = args.shuffle check_label = args.check_label

check if root directory exists

if not os.path.exists(root_dir): print("root directory: {} does not exist".format(root_dir)) sys.exit() print("root directory ok")

add "/" to root directory if needed

if root_dir[-1] != "/": root_dir += "/"

check if list file exists

if not os.path.exists(list_file): print("list file: {} does not exist".format(list_file)) sys.exit() print("list_file OK")

check list file format is correct

with open(list_file, "r") as lf: for line in lf.readlines(): img_file, anno = line.strip("\n").split(" ") if not os.path.exists(root_dir + img_file): print("image file: {} does not exist".format(root_dir + img_file)) if anno_type == "classification": if not anno.isdigit(): print("annotation: {} is not an integer".format(anno)) elif anno_type == "detection": if not os.path.exists(root_dir + anno): print("annofation file: {} does not exist".format(root_dir + anno)) sys.exit() print("img_file,anno,have been read") break

check if label map file exist

if anno_type == "detection": if not os.path.exists(label_map_file): print("label map file: {} does not exist".format(label_map_file)) sys.exit() label_map = caffe_pb2.LabelMap() lmf = open(label_map_file, "r") try: text_format.Merge(str(lmf.read()), label_map) print("text_format has been used") except: print("Cannot parse label map file: {}".format(label_map_file)) sys.exit() print("anno_type is detection") out_parent_dir = os.path.dirname(out_dir) if not os.path.exists(out_parent_dir): os.makedirs(out_parent_dir) if os.path.exists(out_dir) and not redo: print("{} already exists and I do not hear redo".format(out_dir)) sys.exit() if os.path.exists(out_dir): shutil.rmtree(out_dir) print("finished shutil")

get caffe root directory

caffe_root = "/home/sherlock/Documents/caffe" if anno_type == "detection": cmd = "{}/build/tools/convert_annoset" \ " --anno_type={}" \ " --label_type={}" \ " --label_map_file={}" \ " --check_label={}" \ " --min_dim={}" \ " --max_dim={}" \ " --resize_height={}" \ " --resize_width={}" \ " --backend={}" \ " --shuffle={}" \ " --check_size={}" \ " --encode_type={}" \ " --encoded={}" \ " --gray={}" \ " {} {} {}" \ .format(caffe_root, anno_type, label_type, label_map_file, check_label, min_dim, max_dim, resize_height, resize_width, backend, shuffle, check_size, encode_type, encoded, gray, root_dir, list_file, out_dir) elif anno_type == "classification": cmd = "{}/build/tools/convert_annoset" \ " --anno_type={}" \ " --min_dim={}" \ " --max_dim={}" \ " --resize_height={}" \ " --resize_width={}" \ " --backend={}" \ " --shuffle={}" \ " --check_size={}" \ " --encode_type={}" \ " --encoded={}" \ " --gray={}" \ " {} {} {}" \ .format(caffe_root, anno_type, min_dim, max_dim, resize_height, resize_width, backend, shuffle, check_size, encode_type, encoded, gray, root_dir, list_file, out_dir) print(cmd) process = subprocess.Popen(cmd.split(), stdout=subprocess.PIPE) output = process.communicate()[0]

if not os.path.exists(example_dir): os.makedirs(example_dir) link_dir = os.path.join(example_dir, os.path.basename(out_dir)) if os.path.exists(link_dir): os.unlink(link_dir) os.symlink(out_dir, link_dir)