[docs]classAbstractMetric(object):""":class:`AbstractMetric` is the base object of all metrics. If you want to implement a metric, you should inherit this class. Args: config (Config): the config of evaluator. """smaller=Falsedef__init__(self,config):self.decimal_place=config['metric_decimal_place']
[docs]defcalculate_metric(self,dataobject):"""Get the dictionary of a metric. Args: dataobject(DataStruct): it contains all the information needed to calculate metrics. Returns: dict: such as ``{'metric@10': 3153, 'metric@20': 0.3824}`` """raiseNotImplementedError('Method [calculate_metric] should be implemented.')
[docs]classTopkMetric(AbstractMetric):""":class:`TopkMetric` is a base object of top-k metrics. If you want to implement an top-k metric, you can inherit this class. Args: config (Config): The config of evaluator. """metric_type=EvaluatorType.RANKINGmetric_need=['rec.topk']def__init__(self,config):super().__init__(config)self.topk=config['topk']
[docs]defused_info(self,dataobject):"""Get the bool matrix indicating whether the corresponding item is positive and number of positive items for each user. """rec_mat=dataobject.get('rec.topk')topk_idx,pos_len_list=torch.split(rec_mat,[max(self.topk),1],dim=1)returntopk_idx.to(torch.bool).numpy(),pos_len_list.squeeze(-1).numpy()
[docs]deftopk_result(self,metric,value):"""Match the metric value to the `k` and put them in `dictionary` form. Args: metric(str): the name of calculated metric. value(numpy.ndarray): metrics for each user, including values from `metric@1` to `metric@max(self.topk)`. Returns: dict: metric values required in the configuration. """metric_dict={}avg_result=value.mean(axis=0)forkinself.topk:key='{}@{}'.format(metric,k)metric_dict[key]=round(avg_result[k-1],self.decimal_place)returnmetric_dict
[docs]defmetric_info(self,pos_index,pos_len=None):"""Calculate the value of the metric. Args: pos_index(numpy.ndarray): a bool matrix, shape of ``n_users * max(topk)``. The item with the (j+1)-th \ highest score of i-th user is positive if ``pos_index[i][j] == True`` and negative otherwise. pos_len(numpy.ndarray): a vector representing the number of positive items per user, shape of ``(n_users,)``. Returns: numpy.ndarray: metrics for each user, including values from `metric@1` to `metric@max(self.topk)`. """raiseNotImplementedError('Method [metric_info] of top-k metric should be implemented.')
[docs]classLossMetric(AbstractMetric):""":class:`LossMetric` is a base object of loss based metrics and AUC. If you want to implement an loss based metric, you can inherit this class. Args: config (Config): The config of evaluator. """metric_type=EvaluatorType.VALUEmetric_need=['rec.score','data.label']def__init__(self,config):super().__init__(config)
[docs]defused_info(self,dataobject):"""Get scores that model predicted and the ground truth."""preds=dataobject.get('rec.score')trues=dataobject.get('data.label')returnpreds.squeeze(-1).numpy(),trues.squeeze(-1).numpy()
[docs]defmetric_info(self,preds,trues):"""Calculate the value of the metric. Args: preds (numpy.ndarray): the scores predicted by model, a one-dimensional vector. trues (numpy.ndarray): the label of items, which has the same shape as ``preds``. Returns: float: The value of the metric. """raiseNotImplementedError('Method [metric_info] of loss-based metric should be implemented.')