diff --git a/validator/main.py b/validator/main.py index 759d0b4..0777e3c 100644 --- a/validator/main.py +++ b/validator/main.py @@ -37,7 +37,7 @@ def __init__( self._arg_1 = arg_1 self._arg_2 = arg_2 - def validate(self, value: Any, metadata: Dict = {}) -> ValidationResult: + def _validate(self, value: Any, metadata: Dict = {}) -> ValidationResult: """Validates that {fill in how you validator interacts with the passed value}.""" # Add your custom validator logic here and return a PassResult or FailResult accordingly. if value != "pass": # FIXME @@ -46,3 +46,42 @@ def validate(self, value: Any, metadata: Dict = {}) -> ValidationResult: fix_value="{The programmtic fix if applicable, otherwise remove this kwarg.}", ) return PassResult() + + def _inference_local(self, model_input: Any) -> Any: + """ + Runs a machine learning pipeline on some input on the local + machine. This function should receive the expected input to the + ML model, and output the results from the ml model. + """ + raise NotImplementedError + + def _inference_remote(self, model_input: Any) -> Any: + """ + Runs a machine learning pipeline on some input on a remote + machine. This function should receive the expected input to the + ML model, and output the results from the ml model. + + """ + raise NotImplementedError + + def _inference(self, model_input: Any) -> Any: + """Calls either a local or remote inference engine for use in the + validation call. + + Args: + model_input (Any): Receives the input to be passed to your ML model. + + Returns: + Any: Returns the output from the ML model inference. + """ + # Only use if both are set, otherwise fall back to local inference + if self.use_local: + return self._inference_local(model_input) + if not self.use_local and self.validation_endpoint: + return self._inference_remote(model_input) + + raise RuntimeError( + "No inference endpoint set, but use_local was false. " + "Please set either use_local=True or " + "set an validation_endpoint to perform inference in the validator." + )