Open liuchuanloong opened 6 years ago
when I define a new loss function like this
` def batch_all_triplet_loss(X): # Get the pairwise distance matrix print('22') labels, embeddings = X print('1') margin = 1.0 pairwise_dist = _pairwise_distances(embeddings, squared=False) # shape (batch_size, batch_size, 1) anchor_positive_dist = tf.expand_dims(pairwise_dist, 2) assert anchor_positive_dist.shape[2] == 1, "{}".format(anchor_positive_dist.shape)` # shape (batch_size, 1, batch_size) anchor_negative_dist = tf.expand_dims(pairwise_dist, 1) assert anchor_negative_dist.shape[1] == 1, "{}".format(anchor_negative_dist.shape) # Compute a 3D tensor of size (batch_size, batch_size, batch_size) # triplet_loss[i, j, k] will contain the triplet loss of anchor=i, positive=j, negative=k # Uses broadcasting where the 1st argument has shape (batch_size, batch_size, 1) # and the 2nd (batch_size, 1, batch_size) triplet_loss = anchor_positive_dist - anchor_negative_dist + margin # Put to zero the invalid triplets # (where label(a) != label(p) or label(n) == label(a) or a == p) mask = _get_triplet_mask(labels) mask = tf.to_float(mask) triplet_loss = tf.multiply(mask, triplet_loss) # Remove negative losses (i.e. the easy triplets) triplet_loss = tf.maximum(triplet_loss, 0.0) # add my loss triplet_loss = tf.multiply(0.5,triplet_loss) # Count number of positive triplets (where triplet_loss > 0) valid_triplets = tf.to_float(tf.greater(triplet_loss, 1e-16)) num_positive_triplets = tf.reduce_sum(valid_triplets) # num_valid_triplets = tf.reduce_sum(mask) # fraction_positive_triplets = num_positive_triplets / (num_valid_triplets + 1e-16) # Get final mean triplet loss over the positive valid triplets triplet_loss = tf.reduce_sum(triplet_loss) / (num_positive_triplets + 1e-16) # return triplet_loss, fraction_positive_triplets return triplet_loss`
and i merge it
` triplet_losses = merge([label, final_rmac_a], mode=batch_all_triplet_loss, name='loss', output_shape=(1,))
rmac_model = Model( inputs=[image_a, roi_a], outputs=triplet_losses)`
label label = Input(shape=(batch_size,)) final_rmac_a final_rmac_a = BatchNormalization()(rmac_a) why raise this wrong tips? i guess due to the keras version
label = Input(shape=(batch_size,))
final_rmac_a = BatchNormalization()(rmac_a)
when I define a new loss function like this
and i merge it
label
label = Input(shape=(batch_size,))
final_rmac_afinal_rmac_a = BatchNormalization()(rmac_a)
why raise this wrong tips? i guess due to the keras version