I am using the LightFM recommender library on my dataset, which gives me the results in the image below.
NUM_THREADS = 4
NUM_COMPONENTS = 30
NUM_EPOCHS = 5
ITEM_ALPHA = 1e-6
LEARNING_RATE = 0.005
LEARNING_SCHEDULE = 'adagrad'
RANDOM_SEED = 29031994
warp_model = LightFM(loss='warp',
learning_rate=LEARNING_RATE,
learning_schedule=LEARNING_SCHEDULE,
item_alpha=ITEM_ALPHA,
no_components=NUM_COMPONENTS,
random_state=RANDOM_SEED)
bpr_model = LightFM(loss='bpr',
learning_rate=LEARNING_RATE,
learning_schedule=LEARNING_SCHEDULE,
item_alpha=ITEM_ALPHA,
no_components=NUM_COMPONENTS,
random_state=RANDOM_SEED)
The shapes of my features are as follows:
How can I optimize my hyperparameters in order to improve Area Under Curve (AUC) scores?
You can find a good general guide to hyperparameter optimization in the sklearn docs.
One simple but effective technique which you can apply to optimizing a LightFM model is random search. Roughly, it consists of the following steps:
['warp', 'bpr', 'warp-kos']
.To gauge the performance of the final model you should use the test set: simply evaluate the best validation model on the test set.
The following script illustrates this:
import itertools
import numpy as np
from lightfm import LightFM
from lightfm.evaluation import auc_score
def sample_hyperparameters():
"""
Yield possible hyperparameter choices.
"""
while True:
yield {
"no_components": np.random.randint(16, 64),
"learning_schedule": np.random.choice(["adagrad", "adadelta"]),
"loss": np.random.choice(["bpr", "warp", "warp-kos"]),
"learning_rate": np.random.exponential(0.05),
"item_alpha": np.random.exponential(1e-8),
"user_alpha": np.random.exponential(1e-8),
"max_sampled": np.random.randint(5, 15),
"num_epochs": np.random.randint(5, 50),
}
def random_search(train, test, num_samples=10, num_threads=1):
"""
Sample random hyperparameters, fit a LightFM model, and evaluate it
on the test set.
Parameters
----------
train: np.float32 coo_matrix of shape [n_users, n_items]
Training data.
test: np.float32 coo_matrix of shape [n_users, n_items]
Test data.
num_samples: int, optional
Number of hyperparameter choices to evaluate.
Returns
-------
generator of (auc_score, hyperparameter dict, fitted model)
"""
for hyperparams in itertools.islice(sample_hyperparameters(), num_samples):
num_epochs = hyperparams.pop("num_epochs")
model = LightFM(**hyperparams)
model.fit(train, epochs=num_epochs, num_threads=num_threads)
score = auc_score(model, test, train_interactions=train, num_threads=num_threads).mean()
hyperparams["num_epochs"] = num_epochs
yield (score, hyperparams, model)
if __name__ == "__main__":
from lightfm.datasets import fetch_movielens
data = fetch_movielens()
train = data["train"]
test = data["test"]
(score, hyperparams, model) = max(random_search(train, test, num_threads=2), key=lambda x: x[0])
print("Best score {} at {}".format(score, hyperparams))
If you love us? You can donate to us via Paypal or buy me a coffee so we can maintain and grow! Thank you!
Donate Us With