如题,保存模型会出现警告说 自定义网络层需要什么什么的,但是我看来看去感觉模型代码里好像也没有自定义层
CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument. warnings.warn('Custom mask layers require a config and must override '
代码如下
def get_compiled_neumf_model(num_users, num_items, lr = 0.001, mf_dim=10, layers_num=[10], reg_layers=[0], reg_mf=0):
assert len(layers_num) == len(reg_layers)
num_layer = len(layers_num) #Number of layers in the MLP
# Input variables
user_input = layers.Input(shape=(1,), dtype='int32', name='user_input')
item_input = layers.Input(shape=(1,), dtype='int32', name='item_input')
# Embedding layer
mf_embedding_user = layers.Embedding(input_dim=num_users, output_dim=mf_dim, name='mf_embedding_user',
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_mf),
input_length=1)
mf_embedding_item = layers.Embedding(input_dim=num_items, output_dim=mf_dim, name='mf_embedding_item',
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_mf), input_length=1)
mlp_embedding_user = layers.Embedding(input_dim=num_users, output_dim=int(layers_num[0]/2), name="mlp_embedding_user",
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_layers[0]), input_length=1)
mlp_embedding_item = layers.Embedding(input_dim=num_items, output_dim=int(layers_num[0]/2), name='mlp_embedding_item',
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_layers[0]), input_length=1)
# MF part
mf_user_latent = layers.Flatten()(mf_embedding_user(user_input))
mf_item_latent = layers.Flatten()(mf_embedding_item(item_input))
mf_vector = layers.multiply([mf_user_latent, mf_item_latent])
# MLP part
mlp_user_latent = layers.Flatten()(mlp_embedding_user(user_input))
mlp_item_latent = layers.Flatten()(mlp_embedding_item(item_input))
mlp_vector = layers.concatenate([mlp_user_latent, mlp_item_latent])
for idx in range(1, num_layer):
layer = layers.Dense(layers_num[idx], kernel_regularizer=regularizers.l2(reg_layers[idx]), activation='relu', name="layer%d" % idx)
mlp_vector = layer(mlp_vector)
# Concatenate MF and MLP parts
predict_vector = layers.concatenate([mf_vector, mlp_vector])
# Final prediction layer
prediction = layers.Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
name="prediction")(predict_vector)
model_nuemf = models.Model(inputs=[user_input, item_input], outputs=prediction)
model_nuemf.compile(optimizer=optimizers.Adam(learning_rate=lr, clipnorm=0.5), loss='binary_crossentropy')
return model_nuemf
CustomMaskWarning: Custom mask layers require a config and must override get_config. When loading, the custom mask layer must be passed to the custom_objects argument. warnings.warn('Custom mask layers require a config and must override '
代码如下
def get_compiled_neumf_model(num_users, num_items, lr = 0.001, mf_dim=10, layers_num=[10], reg_layers=[0], reg_mf=0):
assert len(layers_num) == len(reg_layers)
num_layer = len(layers_num) #Number of layers in the MLP
# Input variables
user_input = layers.Input(shape=(1,), dtype='int32', name='user_input')
item_input = layers.Input(shape=(1,), dtype='int32', name='item_input')
# Embedding layer
mf_embedding_user = layers.Embedding(input_dim=num_users, output_dim=mf_dim, name='mf_embedding_user',
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_mf),
input_length=1)
mf_embedding_item = layers.Embedding(input_dim=num_items, output_dim=mf_dim, name='mf_embedding_item',
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_mf), input_length=1)
mlp_embedding_user = layers.Embedding(input_dim=num_users, output_dim=int(layers_num[0]/2), name="mlp_embedding_user",
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_layers[0]), input_length=1)
mlp_embedding_item = layers.Embedding(input_dim=num_items, output_dim=int(layers_num[0]/2), name='mlp_embedding_item',
embeddings_initializer=initializers.RandomNormal(),
embeddings_regularizer=regularizers.l2(reg_layers[0]), input_length=1)
# MF part
mf_user_latent = layers.Flatten()(mf_embedding_user(user_input))
mf_item_latent = layers.Flatten()(mf_embedding_item(item_input))
mf_vector = layers.multiply([mf_user_latent, mf_item_latent])
# MLP part
mlp_user_latent = layers.Flatten()(mlp_embedding_user(user_input))
mlp_item_latent = layers.Flatten()(mlp_embedding_item(item_input))
mlp_vector = layers.concatenate([mlp_user_latent, mlp_item_latent])
for idx in range(1, num_layer):
layer = layers.Dense(layers_num[idx], kernel_regularizer=regularizers.l2(reg_layers[idx]), activation='relu', name="layer%d" % idx)
mlp_vector = layer(mlp_vector)
# Concatenate MF and MLP parts
predict_vector = layers.concatenate([mf_vector, mlp_vector])
# Final prediction layer
prediction = layers.Dense(1, activation='sigmoid', kernel_initializer=initializers.lecun_normal(),
name="prediction")(predict_vector)
model_nuemf = models.Model(inputs=[user_input, item_input], outputs=prediction)
model_nuemf.compile(optimizer=optimizers.Adam(learning_rate=lr, clipnorm=0.5), loss='binary_crossentropy')
return model_nuemf