Open yzbx opened 6 years ago
summary, _ = sess.run([merged, train_step],
feed_dict=feed_dict(True),
options=run_options,
run_metadata=run_metadata)
train_writer.add_run_metadata(run_metadata, 'step%d' % i)
train_writer.add_summary(summary, i)
Operations in TensorFlow don't do anything until you run them, or an op that depends on their output. And the summary nodes that we've just created are peripheral to your graph: none of the ops you are currently running depend on them. So, to generate summaries, we need to run all of these summary nodes. Managing them by hand would be tedious, so use tf.summary.merge_all to combine them into a single op that generates all the summary data.
Why does it read the whole directory, rather than an individual file? You might have been using supervisor.py to run your model, in which case if TensorFlow crashes, the supervisor will restart it from a checkpoint. When it restarts, it will start writing to a new events file, and TensorBoard will stitch the various event files together to produce a consistent history of what happened.
Tensorboard Text: Markdown Support
The Text Dashboard displays text snippets saved via tf.summary.text. Markdown features including hyperlinks, lists, and tables are all supported.
# use the accuracy as a metric
accuracy = tf.metrics.accuracy(
labels=labels,
predictions=tf.argmax(logits, axis=1))
# create an estimator spec with the loss and accuracy
estimator_spec = tf.estimator.EstimatorSpec(
mode=tf.estimator.ModeKeys.EVAL,
loss=loss,
eval_metric_ops={
'accuracy': accuracy
})
the right way to use metrics, note for eval_metric_ops['accuracy']!=accuracy[0] if not run accuracy[1], the accuracy[0] will always be 0 and will not be updated!
metric_map = {}
metric_map['miou'] = tf.metrics.mean_iou(
predictions, labels, num_classes, weights=weights)
metric_map['acc'] = tf.metrics.accuracy(
labels=labels, predictions=predictions, weights=tf.reshape(weights, shape=[-1]))
metrics = {}
metrics['miou']=tf.identity(metric_map['miou'][0],name='hello')
metrics['acc']=tf.identity(metric_map['acc'][0],name='world')
for x in ['miou', 'acc']:
tf.summary.scalar('metrics/%s' % x, metrics[x])
summary_op = tf.summary.merge_all()
sess = tf.Session()
sess.run(tf.global_variables_initializer())
sess.run(tf.local_variables_initializer())
for i in trange(5):
np_img, np_seg, np_edge, img_name, np_height, np_width, summary, metrics_np = sess.run(
[img, seg, edge, img_filename, height, width, summary_op, metric_map])
print(img_name, np_height, np_width)
print(metrics_np['miou'][0],metrics_np['acc'][0])
writer.add_summary(summary, i)
with tf.Graph().as_default() as graph:
dataset = dataset_factory.get_dataset(FLAGS.dataset_name, 'train',
FLAGS.dataset_dir)
network_fn = nets_factory.get_network_fn(
FLAGS.model_name,
num_classes=(dataset.num_classes - FLAGS.labels_offset),
is_training=FLAGS.is_training)
image_size = FLAGS.image_size or network_fn.default_image_size
placeholder = tf.placeholder(name='input', dtype=tf.float32,
shape=[FLAGS.batch_size, image_size,
image_size, 3])
network_fn(placeholder)
graph_def = graph.as_graph_def()
with gfile.GFile(FLAGS.output_file, 'wb') as f:
f.write(graph_def.SerializeToString())
# Define the metrics:
names_to_values, names_to_updates = slim.metrics.aggregate_metric_map({
'Accuracy': slim.metrics.streaming_accuracy(predictions, labels),
'Recall_5': slim.metrics.streaming_recall_at_k(
logits, labels, 5),
})
for name, value in names_to_values.items(): summary_name = 'eval/%s' % name op = tf.summary.scalar(summary_name, value, collections=[]) op = tf.Print(op, [value], summary_name) tf.add_to_collection(tf.GraphKeys.SUMMARIES, op)
# slim init weight from checkpoint path
variables_to_restore = [] for var in slim.get_model_variables(): for exclusion in exclusions: if var.op.name.startswith(exclusion): break else: variables_to_restore.append(var)
init_fn=slim.assign_from_checkpoint_fn( checkpoint_path, variables_to_restore, ignore_missing_vars=FLAGS.ignore_missing_vars)
todo
ignore label for edge (orange line for psp_edge, green line for pspnet)