Open omerarshad opened 6 years ago
I didn't try this before, but I would imagine you can do things by reusing variables:
with tf.variable_scope("siamese") as scope:
emb1 = encode(x1)
scope.reuse_variables()
emb2 = encode(x2)
Can you please guide me where to use above mentioned code ? Below is sniped taken from example , X is first input and X2 is second input
with tf.Session(graph=graph) as sess:
# universal sentence encoder input/output
in_tensor_1 = tf.get_default_graph().get_tensor_by_name(scope + '/module/fed_input_values:0')
ou_tensor1 = tf.get_default_graph().get_tensor_by_name(scope + '/module/Encoder_en/hidden_layers/l2_normalize:0')
# a simple softmax classification on top of universal sentence encoder
input_y = tf.placeholder(tf.int32, shape=(None))
labels = tf.one_hot(input_y, 2)
print("ou_tensor1.shape ",ou_tensor1.shape)
out=tf.concat([ou_tensor1, ou_tensor2], 0)
print((out.shape))
out1=tf.reduce_mean(out,0)
print((out1.shape))
out1=(tf.expand_dims(out1, 0))
print((out1.shape))
logits = tf.layers.dense(out1, 2)
loss = tf.nn.softmax_cross_entropy_with_logits(logits=logits, labels=labels)
optimizer = tf.train.AdamOptimizer(learning_rate=0.001).minimize(loss)
sess.run(tf.global_variables_initializer())
sess.run(tf.get_default_graph().get_operation_by_name('finetune/init_all_tables'))
for epoch in range(10):
feed_dict = {
in_tensor_1: X,
in_tensor_2: X2,
input_y: y
}
sess.run(optimizer, feed_dict)
how can we use sentence encoder as siamese architecture to pass two sentences as input?