Coverage for credoai/lens/pipeline_creator.py: 80%
60 statements
« prev ^ index » next coverage.py v7.1.0, created at 2023-02-13 21:56 +0000
« prev ^ index » next coverage.py v7.1.0, created at 2023-02-13 21:56 +0000
1"""
2Classes responsible for programatically creating pipelines of evaluators
3"""
5import inspect
6from collections import defaultdict
7from typing import List
9import credoai.evaluators
10from connect.evidence import EvidenceRequirement
11from connect.governance import Governance
12from credoai.evaluators import *
13from credoai.evaluators.utils import name2evaluator
14from credoai.utils.common import remove_suffix
17class PipelineCreator:
18 @staticmethod
19 def generate_all_evaluators():
20 all_evaluators = build_list_of_evaluators()
21 return all_evaluators
23 @staticmethod
24 def generate_from_governance(governance: Governance):
25 evidence_requirements = governance.get_evidence_requirements()
26 governance_pipeline = process_evidence_requirements(evidence_requirements)
27 return governance_pipeline
30def process_evidence_requirements(evidence_requirements: List[EvidenceRequirement]):
31 evaluators = set()
32 kwargs: dict = defaultdict(dict)
33 for e in evidence_requirements:
34 labels = e.label
35 evaluator_name = labels.get("evaluator")
36 if evaluator_name is None:
37 continue
38 evaluators.add(evaluator_name)
39 if evaluator_name in ["ModelFairness", "Performance"]:
40 if "metrics" not in kwargs[evaluator_name]:
41 kwargs[evaluator_name]["metrics"] = extract_metrics(labels)
42 else:
43 kwargs[evaluator_name]["metrics"] |= extract_metrics(labels)
44 if evaluator_name == "FeatureDrift":
45 if "table_name" in labels:
46 if labels["table_name"] == "Characteristic Stability Index":
47 kwargs[evaluator_name]["csi_calculation"] = True
49 pipeline = []
50 for evaluator_name in evaluators:
51 evaltr_class = name2evaluator(evaluator_name)
52 evaltr_kwargs = kwargs.get(evaluator_name, {})
53 initialized_evaltr = evaltr_class(**evaltr_kwargs)
54 pipeline.append(initialized_evaltr)
55 return pipeline
58def extract_metrics(labels):
59 """Extract metrics from a single evidence requirement"""
60 metrics = set()
61 if "metric_type" in labels:
62 metrics.add(remove_suffix(labels["metric_type"], "_parity"))
63 elif "metric_types" in labels:
64 metrics = metrics.union(labels["metric_types"])
65 return metrics
68def build_list_of_evaluators():
69 """
70 Takes all the evaluator type objects available in Lens package
71 and converts them to a list of instantiated objects. Only
72 uses default values.
74 Returns
75 -------
76 List(Evaluator types)
77 List of instantiated evaluators
78 """
79 all_evaluators = []
80 for x in dir(credoai.evaluators):
81 try:
82 evaluated = eval(x)
83 if (
84 inspect.isclass(evaluated)
85 and issubclass(evaluated, Evaluator)
86 and not inspect.isabstract(evaluated)
87 ):
88 all_evaluators.append(evaluated)
89 except NameError:
90 pass
91 return [x() for x in all_evaluators]