tensorflow / recommenders

TensorFlow Recommenders is a library for building recommender system models using TensorFlow.
Apache License 2.0
1.82k stars 273 forks source link

[Issue] K parameter in tfrs.metrics.FactorizedTopK doesn't work #449

Closed Cafelatte1 closed 2 years ago

Cafelatte1 commented 2 years ago
!pip install -q tensorflow-recommenders
!pip install -q --upgrade tensorflow_addons
!pip install -q --upgrade tensorflow-datasets
!pip install -q scann

import os
import pprint
import tempfile
from typing import Dict, Text

import numpy as np
import tensorflow as tf
import tensorflow_datasets as tfds
import tensorflow_recommenders as tfrs

import tensorflow_addons as tfa
from tqdm.keras import TqdmCallback
from tensorflow.keras import callbacks as tf_callbacks

# === 연습용 Rawdata 로딩 ===
# 해당 예제에서는 ratings 테이블에 있는 데이터를 '유저가 해당 영화를 보았다'는 것으로 간주하고
# Ratings data.
ratings = tfds.load("movielens/100k-ratings", split="train")
# Features of all the available movies.
movies = tfds.load("movielens/100k-movies", split="train")

ratings = ratings.map(lambda x: {
    "movie_title": x["movie_title"],
    "user_id": x["user_id"],
})
movies = movies.map(lambda x: x["movie_title"])

# embedding 시, vocabulary size 설정을 위해
# user 수와 movie 수 파악
movie_titles = movies
user_ids = ratings.map(lambda x: x["user_id"])
# movie_titles = movies
# user_ids = ratings.map(lambda x: x["user_id"])

unique_movie_titles = np.unique(np.concatenate(list(movie_titles.batch(1))))
unique_user_ids = np.unique(np.concatenate(list(user_ids.batch(1))))

embedding_dimension = 64

user_model = tf.keras.Sequential([
  # embedding layer에 대한 input 용 이므로 레이블인코딩 수행
  tf.keras.layers.StringLookup(vocabulary=unique_user_ids, output_mode="int", mask_token=None),
  # We add an additional embedding to account for unknown tokens.
  tf.keras.layers.Embedding(len(unique_user_ids) + 1, embedding_dimension),
  tf.keras.layers.Dense(32, activation="relu")
])

movie_model = tf.keras.Sequential([
  # embedding layer에 대한 input 용 이므로 레이블인코딩 수행
  tf.keras.layers.StringLookup(vocabulary=unique_movie_titles, mask_token=None),
  tf.keras.layers.Embedding(len(unique_movie_titles) + 1, embedding_dimension),
  tf.keras.layers.Dense(32, activation="relu")
])

# learning parameter setting
batch_size = 1024
eta = 1e-3
weight_decay = 1e-5
# model_save_flag = False
# checkpoint_filepath = folder_path + 'models/tmp_checkpoint/'

task = tfrs.tasks.Retrieval(
  loss=tf.keras.losses.CategoricalCrossentropy(),
  metrics=tfrs.metrics.FactorizedTopK(candidates=movies.batch(batch_size).map(movie_model), k=200, name="top_k_acc")
)

cb_reduceLR = tf_callbacks.ReduceLROnPlateau(patience=1, factor=0.6, min_lr=1e-7)
cb_earlyStopping = tf_callbacks.EarlyStopping(patience=10, monitor='val_top_k_acc/top_100_categorical_accuracy', mode='max')

tf.random.set_seed(42)
shuffled = ratings.shuffle(batch_size, seed=42, reshuffle_each_iteration=True)

train = shuffled.take(80000)
test = shuffled.skip(80000).take(2000)

cached_train = train.shuffle(batch_size, seed=42, reshuffle_each_iteration=True).batch(batch_size).prefetch(tf.data.AUTOTUNE)
cached_test = test.batch(batch_size).prefetch(tf.data.AUTOTUNE)

# tfrs.Model 클래스를 상속받아 모델 빌드
class MovielensModel(tfrs.Model):

  def __init__(self, user_model, movie_model):
    super().__init__()
    self.movie_model: tf.keras.Model = movie_model
    self.user_model: tf.keras.Model = user_model
    self.task: tf.keras.layers.Layer = task

  def compute_loss(self, features: Dict[Text, tf.Tensor], training=False) -> tf.Tensor:
    # We pick out the user features and pass them into the user model.
    user_embeddings = self.user_model(features["user_id"])
    # And pick out the movie features and pass them into the movie model,
    # getting embeddings back.
    positive_movie_embeddings = self.movie_model(features["movie_title"])

    # The task computes the loss and the metrics.
    return self.task(user_embeddings, positive_movie_embeddings)

model = MovielensModel(user_model, movie_model)

model.compile(
    optimizer=tfa.optimizers.AdamW(learning_rate=eta, weight_decay=weight_decay),
    # loss=tf.keras.losses.CategoricalCrossentropy(),
    # metrics=tfrs.metrics.FactorizedTopK(candidates=movies.batch(batch_size).map(movie_model), k=200, name="top_k_acc")
)
history = model.fit(
  cached_train, validation_data=cached_test, epochs=30, verbose=0,
  callbacks=[cb_reduceLR, cb_earlyStopping, TqdmCallback(verbose=0)]
)

image

I wannted to use the tfrs.metrics.FactorizedTopK(k=200). but like above, the output shows the result only with k=1,5,10,100 (always 4 cases). I want to see the result with k=200. (contents on the picture and codes are different, but the issue on output is same)

Cafelatte1 commented 2 years ago

I found that

the K parameter when initializing the tfrs.metrics.FactorizedTopK is calculated and ordered the number of top K score, which is scored by (user embedding (1) * all candidate embedding (N))

the K parameter when initializing the tf.keras.metrics.TopKCategoricalAccuracy is the threshold used for deciding whether the value is in K th.

So you can use like below codes

# K is the threshold values you wanna monitor
k_seq = [5, 10, 20, 50, 100]
tmp_metrics = [tf.keras.metrics.TopKCategoricalAccuracy(k=i, name="top_"+str(i)+"_acc") for i in k_seq]

# Specify above list when initializing tfrs.metrics.FactorizedTopK for parameter 'metrics'

# If you use TopKCategoricalAccuracy with K value that is more than K of FactorizedTopK,
# The metrics shows always accuracy=1
# Because the K of tfrs.metrics.FactorizedTopK determines the maximum number(length) of score vector which can be used for evaluating with TopKCategoricalAccuracy.
tfrs.tasks.Retrieval(
  metrics=tfrs.metrics.FactorizedTopK(candidates=tmp, metrics=tmp_metrics, k=100)
)