Coverage for credoai/evaluators/utils/fairlearn.py: 89%
18 statements
« prev ^ index » next coverage.py v6.5.0, created at 2022-12-08 07:32 +0000
« prev ^ index » next coverage.py v6.5.0, created at 2022-12-08 07:32 +0000
1from fairlearn.metrics import MetricFrame
3from credoai.utils import ValidationError, global_logger
5########### General functions shared across evaluators ###########
8def create_metric_frame(metrics, y_pred, y_true, sensitive_features):
9 """Creates metric frame from dictionary of key:Metric"""
10 metrics = {name: metric.fun for name, metric in metrics.items()}
11 return MetricFrame(
12 metrics=metrics,
13 y_true=y_true,
14 y_pred=y_pred,
15 sensitive_features=sensitive_features,
16 )
19def setup_metric_frames(
20 performance_metrics,
21 prob_metrics,
22 thresh_metrics,
23 y_pred,
24 y_prob,
25 y_true,
26 sensitive_features,
27):
28 metric_frames = {}
29 if y_pred is not None and performance_metrics:
30 metric_frames["pred"] = create_metric_frame(
31 performance_metrics,
32 y_pred,
33 y_true,
34 sensitive_features=sensitive_features,
35 )
37 if prob_metrics:
38 if y_prob is not None:
39 metric_frames["prob"] = create_metric_frame(
40 prob_metrics,
41 y_prob,
42 y_true,
43 sensitive_features=sensitive_features,
44 )
45 else:
46 global_logger.warn(
47 f"Metrics ({list(prob_metrics.keys())}) requested, but no y_prob available"
48 )
50 if thresh_metrics:
51 if y_prob is not None:
52 metric_frames["thresh"] = create_metric_frame(
53 thresh_metrics,
54 y_prob,
55 y_true,
56 sensitive_features=sensitive_features,
57 )
58 else:
59 global_logger.warn(
60 f"Metrics ({list(thresh_metrics.keys())}) requested, but no y_prob available"
61 )
62 return metric_frames