From 0f500526fb03d60f308608a4350921e640eb1036 Mon Sep 17 00:00:00 2001 From: Zain Ul Abidin Date: Sun, 1 Mar 2020 10:01:01 +0100 Subject: [PATCH] updated sub to subtract so code run under tf=14+ --- NeuralFM.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/NeuralFM.py b/NeuralFM.py index fe8d4e6..f77819d 100644 --- a/NeuralFM.py +++ b/NeuralFM.py @@ -118,7 +118,7 @@ def _init_graph(self): self.squared_sum_features_emb = tf.reduce_sum(self.squared_features_emb, 1) # None * K # ________ FM __________ - self.FM = 0.5 * tf.sub(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K + self.FM = 0.5 * tf.subtract(self.summed_features_emb_square, self.squared_sum_features_emb) # None * K if self.batch_norm: self.FM = self.batch_norm_layer(self.FM, train_phase=self.train_phase, scope_bn='bn_fm') self.FM = tf.nn.dropout(self.FM, self.dropout_keep[-1]) # dropout at the bilinear interactin layer @@ -141,9 +141,9 @@ def _init_graph(self): # Compute the loss. if self.loss_type == 'square_loss': if self.lamda_bilinear > 0: - self.loss = tf.nn.l2_loss(tf.sub(self.train_labels, self.out)) + tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(self.weights['feature_embeddings']) # regulizer + self.loss = tf.nn.l2_loss(tf.subtract(self.train_labels, self.out)) + tf.contrib.layers.l2_regularizer(self.lamda_bilinear)(self.weights['feature_embeddings']) # regulizer else: - self.loss = tf.nn.l2_loss(tf.sub(self.train_labels, self.out)) + self.loss = tf.nn.l2_loss(tf.subtract(self.train_labels, self.out)) elif self.loss_type == 'log_loss': self.out = tf.sigmoid(self.out) if self.lambda_bilinear > 0: