From 6374cea0040124e9da1fea100acefff10423d1ad Mon Sep 17 00:00:00 2001 From: m-ochi Date: Thu, 11 Jan 2018 18:29:15 +0900 Subject: [PATCH] updated for keras2.1.2 and tensorflow1.4.1 --- GMF.py | 17 +++++++++-------- MLP.py | 15 +++++---------- NeuMF.py | 19 ++++++------------- 3 files changed, 20 insertions(+), 31 deletions(-) diff --git a/GMF.py b/GMF.py index 8471310..6faf84e 100644 --- a/GMF.py +++ b/GMF.py @@ -10,7 +10,7 @@ import theano.tensor as T import keras from keras import backend as K -from keras import initializations +from keras import initializers from keras.models import Sequential, Model, load_model, save_model from keras.layers.core import Dense, Lambda, Activation from keras.layers import Embedding, Input, Dense, merge, Reshape, Merge, Flatten @@ -51,18 +51,19 @@ def parse_args(): help='Whether to save the trained model.') return parser.parse_args() -def init_normal(shape, name=None): - return initializations.normal(shape, scale=0.01, name=name) +#def init_normal(shape, name=None): +# return initializations.normal(shape, scale=0.01, name=name) +# return initializers.normal() def get_model(num_users, num_items, latent_dim, regs=[0,0]): # Input variables user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') - MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', - init = init_normal, W_regularizer = l2(regs[0]), input_length=1) - MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', - init = init_normal, W_regularizer = l2(regs[1]), input_length=1) +# MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', init = init_normal, W_regularizer = l2(regs[0]), input_length=1) + MF_Embedding_User = Embedding(input_dim = num_users, output_dim = latent_dim, name = 'user_embedding', embeddings_initializer = 'random_normal', W_regularizer = l2(regs[0]), input_length=1) +# MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', init = init_normal, W_regularizer = l2(regs[1]), input_length=1) + MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = latent_dim, name = 'item_embedding', embeddings_initializer = 'random_normal', W_regularizer = l2(regs[1]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MF_Embedding_User(user_input)) @@ -168,4 +169,4 @@ def get_train_instances(train, num_negatives): print("End. Best Iteration %d: HR = %.4f, NDCG = %.4f. " %(best_iter, best_hr, best_ndcg)) if args.out > 0: - print("The best GMF model is saved to %s" %(model_out_file)) \ No newline at end of file + print("The best GMF model is saved to %s" %(model_out_file)) diff --git a/MLP.py b/MLP.py index 70566c7..6974d8d 100644 --- a/MLP.py +++ b/MLP.py @@ -12,9 +12,9 @@ import theano.tensor as T import keras from keras import backend as K -from keras import initializations -from keras.regularizers import l2, activity_l2 -from keras.models import Sequential, Graph, Model +from keras import initializers +from keras.regularizers import l2 +from keras.models import Sequential, Model from keras.layers.core import Dense, Lambda, Activation from keras.layers import Embedding, Input, Dense, merge, Reshape, Merge, Flatten, Dropout from keras.constraints import maxnorm @@ -53,9 +53,6 @@ def parse_args(): help='Whether to save the trained model.') return parser.parse_args() -def init_normal(shape, name=None): - return initializations.normal(shape, scale=0.01, name=name) - def get_model(num_users, num_items, layers = [20,10], reg_layers=[0,0]): assert len(layers) == len(reg_layers) num_layer = len(layers) #Number of layers in the MLP @@ -63,10 +60,8 @@ def get_model(num_users, num_items, layers = [20,10], reg_layers=[0,0]): user_input = Input(shape=(1,), dtype='int32', name = 'user_input') item_input = Input(shape=(1,), dtype='int32', name = 'item_input') - MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = 'user_embedding', - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) - MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'item_embedding', - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = 'user_embedding', embeddings_initializer = 'random_normal', W_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'item_embedding', embeddings_initializer = 'random_normal', W_regularizer = l2(reg_layers[0]), input_length=1) # Crucial to flatten an embedding vector! user_latent = Flatten()(MLP_Embedding_User(user_input)) diff --git a/NeuMF.py b/NeuMF.py index be04d53..f8e3d99 100644 --- a/NeuMF.py +++ b/NeuMF.py @@ -11,8 +11,8 @@ import theano.tensor as T import keras from keras import backend as K -from keras import initializations -from keras.regularizers import l1, l2, l1l2 +from keras import initializers +from keras.regularizers import l1, l2 from keras.models import Sequential, Model from keras.layers.core import Dense, Lambda, Activation from keras.layers import Embedding, Input, Dense, merge, Reshape, Merge, Flatten, Dropout @@ -59,9 +59,6 @@ def parse_args(): help='Specify the pretrain model file for MLP part. If empty, no pretrain will be used') return parser.parse_args() -def init_normal(shape, name=None): - return initializations.normal(shape, scale=0.01, name=name) - def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_mf=0): assert len(layers) == len(reg_layers) num_layer = len(layers) #Number of layers in the MLP @@ -70,15 +67,11 @@ def get_model(num_users, num_items, mf_dim=10, layers=[10], reg_layers=[0], reg_ item_input = Input(shape=(1,), dtype='int32', name = 'item_input') # Embedding layer - MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user', - init = init_normal, W_regularizer = l2(reg_mf), input_length=1) - MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item', - init = init_normal, W_regularizer = l2(reg_mf), input_length=1) + MF_Embedding_User = Embedding(input_dim = num_users, output_dim = mf_dim, name = 'mf_embedding_user', embeddings_initializer = 'random_normal', W_regularizer = l2(reg_mf), input_length=1) + MF_Embedding_Item = Embedding(input_dim = num_items, output_dim = mf_dim, name = 'mf_embedding_item', embeddings_initializer = 'random_normal', W_regularizer = l2(reg_mf), input_length=1) - MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = "mlp_embedding_user", - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) - MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'mlp_embedding_item', - init = init_normal, W_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_User = Embedding(input_dim = num_users, output_dim = layers[0]/2, name = "mlp_embedding_user", embeddings_initializer = 'random_normal', W_regularizer = l2(reg_layers[0]), input_length=1) + MLP_Embedding_Item = Embedding(input_dim = num_items, output_dim = layers[0]/2, name = 'mlp_embedding_item', embeddings_initializer = 'random_normal', W_regularizer = l2(reg_layers[0]), input_length=1) # MF part mf_user_latent = Flatten()(MF_Embedding_User(user_input))