Commit ae80945e authored by Yuxin Wu's avatar Yuxin Wu

fix build

parent 2488a34f
...@@ -243,11 +243,12 @@ class SiameseModel(EmbeddingModel): ...@@ -243,11 +243,12 @@ class SiameseModel(EmbeddingModel):
def build_graph(self, x, y, label): def build_graph(self, x, y, label):
# embed them # embed them
single_input = x
x, y = self.embed([x, y]) x, y = self.embed([x, y])
# tag the embedding of 'input' with name 'emb', just for inference later on # tag the embedding of 'input' with name 'emb', just for inference later on
with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(x), name="emb") tf.identity(self.embed(single_input), name="emb")
# compute the actual loss # compute the actual loss
cost, pos_dist, neg_dist = contrastive_loss(x, y, label, 5., extra=True, scope="loss") cost, pos_dist, neg_dist = contrastive_loss(x, y, label, 5., extra=True, scope="loss")
...@@ -260,10 +261,11 @@ class SiameseModel(EmbeddingModel): ...@@ -260,10 +261,11 @@ class SiameseModel(EmbeddingModel):
class CosineModel(SiameseModel): class CosineModel(SiameseModel):
def build_graph(self, x, y, label): def build_graph(self, x, y, label):
single_input = x
x, y = self.embed([x, y]) x, y = self.embed([x, y])
with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(x), name="emb") tf.identity(self.embed(single_input), name="emb")
cost = siamese_cosine_loss(x, y, label, scope="loss") cost = siamese_cosine_loss(x, y, label, scope="loss")
cost = tf.identity(cost, name="cost") cost = tf.identity(cost, name="cost")
...@@ -287,10 +289,11 @@ class TripletModel(EmbeddingModel): ...@@ -287,10 +289,11 @@ class TripletModel(EmbeddingModel):
return triplet_loss(a, p, n, 5., extra=True, scope="loss") return triplet_loss(a, p, n, 5., extra=True, scope="loss")
def build_graph(self, a, p, n): def build_graph(self, a, p, n):
single_input = a
a, p, n = self.embed([a, p, n]) a, p, n = self.embed([a, p, n])
with tf.variable_scope(tf.get_variable_scope(), reuse=True): with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(a), name="emb") tf.identity(self.embed(single_input), name="emb")
cost, pos_dist, neg_dist = self.loss(a, p, n) cost, pos_dist, neg_dist = self.loss(a, p, n)
...@@ -318,10 +321,7 @@ class CenterModel(EmbeddingModel): ...@@ -318,10 +321,7 @@ class CenterModel(EmbeddingModel):
def build_graph(self, x, label): def build_graph(self, x, label):
# embed them # embed them
x = self.embed(x) x = self.embed(x)
x = tf.identity(x, name='emb')
# tag the embedding of 'input' with name 'emb', just for inference later on
with tf.variable_scope(tf.get_variable_scope(), reuse=True):
tf.identity(self.embed(x), name="emb")
# compute the embedding loss # compute the embedding loss
emb_cost = center_loss(x, label, 10, 0.01) emb_cost = center_loss(x, label, 10, 0.01)
......
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment