Coverage for credoai/modules/metric_utils.py: 25%
24 statements
« prev ^ index » next coverage.py v7.1.0, created at 2023-02-13 21:56 +0000
« prev ^ index » next coverage.py v7.1.0, created at 2023-02-13 21:56 +0000
1import textwrap
2from collections import defaultdict
4from credoai.modules.metrics import ALL_METRICS, MODEL_METRIC_CATEGORIES
5from pandas import DataFrame
8def list_metrics(verbose=True):
9 metrics = defaultdict(set)
10 for metric in ALL_METRICS:
11 if metric.metric_category in MODEL_METRIC_CATEGORIES:
12 metrics[metric.metric_category] |= metric.equivalent_names
13 if verbose:
14 for key, val in metrics.items():
15 metric_str = textwrap.fill(
16 ", ".join(sorted(list(val))),
17 width=50,
18 initial_indent="\t",
19 subsequent_indent="\t",
20 )
21 print(key)
22 print(metric_str)
23 print("")
24 return metrics
27def table_metrics():
28 output = DataFrame(
29 [
30 [
31 metric.name,
32 metric.metric_category,
33 list(metric.equivalent_names),
34 metric.get_fun_doc,
35 ]
36 for metric in ALL_METRICS
37 ],
38 columns=["metric_name", "metric_category", "synonyms", "doc"],
39 )
41 def remove(list1, str1):
42 list1 = [x for x in list1 if x != str1]
43 return list1
45 output["synonyms"] = output.apply(
46 lambda row: remove(row.synonyms, row.metric_name), axis=1
47 )
48 output["synonyms"] = output.synonyms.apply(lambda x: ", ".join(x))
49 return output