C4K-TensorFlow / C4K-DWTF

MIT License
0 stars 0 forks source link

TensorBoard #1

Open dkmin opened 7 years ago

dkmin commented 7 years ago

image

sonkwon commented 7 years ago

Code List

with tf.name_scope("test") as scope tf.histogram_summary("weigths",W),tf.scalar_summary("accuracy",accuracy) merged = tf.merge_all_summaries() writer = tf.train.SummaryWriter("/tmp/minist_logs",sess.graph_def) summary = sess.run(merged, ...); writer.add_summary(summary); tensorboard --logdir=/tmp/mnist_logs

sonkwon commented 7 years ago
# -*- coding: utf-8 -*-
# 텐서보드를 이용하기 위해 각종 변수들을 설정하고 저장하는 방법을 익혀봅니다.

import tensorflow as tf
import numpy as np

data = np.loadtxt('./data.csv', delimiter=',',
                  unpack=True, dtype='float32')

x_data = np.transpose(data[0:2])
y_data = np.transpose(data[2:])

#########
# 신경망 모델 구성
######
X = tf.placeholder(tf.float32)
Y = tf.placeholder(tf.float32)

# with tf.name_scope 으로 묶은 블럭은 텐서보드에서 한 레이어안에 표현해줍니다
with tf.name_scope('layer1'):
    W1 = tf.Variable(tf.random_uniform([2, 10], -1., 1.), name="W1")
    L1 = tf.nn.relu(tf.matmul(X, W1))

with tf.name_scope('layer2'):
    W2 = tf.Variable(tf.random_uniform([10, 20], -1., 1.), name="W2")
    L2 = tf.nn.relu(tf.matmul(L1, W2))

with tf.name_scope('output'):
    W3 = tf.Variable(tf.random_uniform([20, 3], -1., 1.), name="W3")
    model = tf.matmul(L2, W3)

with tf.name_scope('optimizer'):
    cost = tf.reduce_mean(
        tf.nn.softmax_cross_entropy_with_logits(model, Y))

    optimizer = tf.train.AdamOptimizer(learning_rate=0.01)
    train_op = optimizer.minimize(cost)

    # tf.summary.scalar 를 이용해 수집하고 싶은 값들을 지정할 수 있습니다.
    tf.summary.scalar('cost', cost)

#########
# 신경망 모델 학습
######
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)

# 텐서보드에서 표시해주기 위한 값들을 수집합니다.
merged = tf.summary.merge_all()
# 저장할 그래프와 로그를 저장할 디렉토리를 설정합니다.
writer = tf.summary.FileWriter('./logs', sess.graph)
# 이렇게 저장한 로그는, 학습 후 다음의 명령어를 이용해 웹서버를 실행시킨 뒤
# tensorboard --logdir=./logs
# 다음 주소와 웹브라우저를 이용해 텐서보드에서 확인할 수 있습니다.
# http://localhost:6006

for step in xrange(100):
    sess.run(train_op, feed_dict={X: x_data, Y: y_data})

    # 적절한 시점에 저장할 값들을 수집하고 저장합니다.
    summary = sess.run(merged, feed_dict={X: x_data, Y:y_data})
    writer.add_summary(summary, step)

#########
# 결과 확인
# 0: 기타 1: 포유류, 2: 조류
######
prediction = tf.argmax(model, 1)
target = tf.argmax(Y, 1)
print '예측값:', sess.run(prediction, feed_dict={X: x_data})
print '실제값:', sess.run(target, feed_dict={Y: y_data})

check_prediction = tf.equal(prediction, target)
accuracy = tf.reduce_mean(tf.cast(check_prediction, tf.float32))
print ('정확도: %.2f' % sess.run(accuracy * 100, feed_dict={X: x_data, Y: y_data}))
dkmin commented 7 years ago
import tensorflow as tf
from tensorflow.examples.tutorials.mnist import input_data

def init_weights(shape, name):
    return tf.Variable(tf.random_normal(shape, stddev=0.01), name=name)

# This network is the same as the previous one except with an extra hidden layer + dropout
def model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden):
    # Add layer name scopes for better graph visualization
    with tf.name_scope("layer1"):
        X = tf.nn.dropout(X, p_keep_input)
        h = tf.nn.relu(tf.matmul(X, w_h))
    with tf.name_scope("layer2"):
        h = tf.nn.dropout(h, p_keep_hidden)
        h2 = tf.nn.relu(tf.matmul(h, w_h2))
    with tf.name_scope("layer3"):
        h2 = tf.nn.dropout(h2, p_keep_hidden)
        return tf.matmul(h2, w_o)

#Step 1 - Get Input Data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images, mnist.test.labels

#Step 2 - Create input and output placeholders for data
X = tf.placeholder("float", [None, 784], name="X")
Y = tf.placeholder("float", [None, 10], name="Y")

#Step 3 - Initialize weights
w_h = init_weights([784, 625], "w_h")
w_h2 = init_weights([625, 625], "w_h2")
w_o = init_weights([625, 10], "w_o")

#Step 4 - Add histogram summaries for weights
tf.histogram_summary("w_h_summ", w_h)
tf.histogram_summary("w_h2_summ", w_h2)
tf.histogram_summary("w_o_summ", w_o)

#Step 5 - Add dropout to input and hidden layers
p_keep_input = tf.placeholder("float", name="p_keep_input")
p_keep_hidden = tf.placeholder("float", name="p_keep_hidden")

#Step 6 - Create Model
py_x = model(X, w_h, w_h2, w_o, p_keep_input, p_keep_hidden)

#Step 7 Create cost function
with tf.name_scope("cost"):
    cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(py_x, Y))
    train_op = tf.train.RMSPropOptimizer(0.001, 0.9).minimize(cost)
    # Add scalar summary for cost tensor
    tf.scalar_summary("cost", cost)

#Step 8 Measure accuracy
with tf.name_scope("accuracy"):
    correct_pred = tf.equal(tf.argmax(Y, 1), tf.argmax(py_x, 1)) # Count correct predictions
    acc_op = tf.reduce_mean(tf.cast(correct_pred, "float")) # Cast boolean to float to average
    # Add scalar summary for accuracy tensor
    tf.scalar_summary("accuracy", acc_op)

#Step 9 Create a session
with tf.Session() as sess:
    # Step 10 create a log writer. run 'tensorboard --logdir=./logs/nn_logs'
    writer = tf.train.SummaryWriter("./logs/nn_logs", sess.graph) # for 0.8
    merged = tf.merge_all_summaries()

    # Step 11 you need to initialize all variables
    tf.initialize_all_variables().run()

    #Step 12 train the  model
    for i in range(20):
        for start, end in zip(range(0, len(trX), 128), range(128, len(trX)+1, 128)):
            sess.run(train_op, feed_dict={X: trX[start:end], Y: trY[start:end],
                                          p_keep_input: 0.8, p_keep_hidden: 0.5})
        summary, acc = sess.run([merged, acc_op], feed_dict={X: teX, Y: teY,
                                          p_keep_input: 1.0, p_keep_hidden: 1.0})
        writer.add_summary(summary, i)  # Write summary
        print(i, acc)                   # Report the accuracy
sonkwon commented 7 years ago
# Import data
from tensorflow.examples.tutorials.mnist import input_data
import tensorflow as tf

mnist = input_data.read_data_sets('/tmp/tensorflow/mnist/input_data', one_hot=True)

# Create the model
x = tf.placeholder(tf.float32, [None, 784], name="x_ph")
with tf.name_scope('layer1'):
    W = tf.Variable(tf.zeros([784, 10]), name="W_var")
    b = tf.Variable(tf.zeros([10]), name="b_var")
    tf.summary.histogram('Weights', W)
    tf.summary.histogram('Biases', b)
    k = tf.matmul(x, W) + b
    y = tf.nn.softmax(k)

# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10], name="y_ph")                                                                               
learning_rate = 0.5

with tf.name_scope('optimizer'):
  cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(k, y_))
  train_step = tf.train.GradientDescentOptimizer(learning_rate).minimize(cost)
  tf.summary.scalar('cost', cost)

print ("Training")
sess = tf.Session()
init = tf.global_variables_initializer() #.run()
sess.run(init)

# 텐서보드에서 표시해주기 위한 값들을 수집합니다.
merged = tf.summary.merge_all()
# 저장할 그래프와 로그를 저장할 디렉토리를 설정합니다.
writer = tf.summary.FileWriter('./mnist_logs', sess.graph)

for step in range(1000):
    # 1000번씩, 전체 데이타에서 100개씩 뽑아서 트레이닝을 함.  
    batch_xs, batch_ys = mnist.train.next_batch(100)
    sess.run(train_step, feed_dict={x: batch_xs, y_: batch_ys})
    # 적절한 시점에 저장할 값들을 수집하고 저장합니다.
    summary = sess.run(merged, feed_dict={x: batch_xs, y_:batch_ys})
    writer.add_summary(summary, step)

print ('b is ',sess.run(b))
print('W is',sess.run(W))