Skip to content

Commit

Permalink
Merge branch 'main' into fix/new_filter_of_parents_class
Browse files Browse the repository at this point in the history
  • Loading branch information
PawelPeczek-Roboflow authored Jan 31, 2025
2 parents faff6b6 + b933d2e commit 4287309
Show file tree
Hide file tree
Showing 7 changed files with 242 additions and 3 deletions.
2 changes: 1 addition & 1 deletion inference/core/version.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
__version__ = "0.36.0"
__version__ = "0.36.1"


if __name__ == "__main__":
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -104,6 +104,10 @@ class BlockManifest(WorkflowBlockManifest):
def get_parameters_accepting_batches(cls) -> List[str]:
return ["predictions"]

@classmethod
def get_parameters_accepting_batches_and_scalars(cls) -> List[str]:
return ["operations_parameters"]

@classmethod
def describe_outputs(cls) -> List[OutputDefinition]:
return [
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,6 @@
OBJECT_DETECTION_PREDICTION_KIND,
STRING_KIND,
Selector,
StepOutputSelector,
)
from inference.core.workflows.prototypes.block import (
BlockResult,
Expand Down Expand Up @@ -83,7 +82,7 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest):
examples=["$inputs.image", "$steps.cropping.crops"],
validation_alias=AliasChoices("images", "image"),
)
perspective_polygons: Union[list, Selector(kind=[LIST_OF_VALUES_KIND]), StepOutputSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore
perspective_polygons: Union[list, Selector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore
description="Perspective polygons (for each batch at least one must be consisting of 4 vertices)",
examples=["$steps.perspective_wrap.zones"],
)
Expand All @@ -108,6 +107,10 @@ class PerspectiveCorrectionManifest(WorkflowBlockManifest):
def get_parameters_accepting_batches(cls) -> List[str]:
return ["images", "predictions"]

@classmethod
def get_parameters_accepting_batches_and_scalars(cls) -> List[str]:
return ["perspective_polygons"]

@classmethod
def describe_outputs(cls) -> List[OutputDefinition]:
return [
Expand Down
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Original file line number Diff line number Diff line change
Expand Up @@ -4,3 +4,4 @@ dogs.jpg: https://www.pexels.com/photo/brown-and-white-dogs-sitting-on-field-356
multi-fruit.jpg: https://www.freepik.com/free-photo/front-close-view-organic-nutrition-source-fresh-bananas-bundle-red-apples-orange-with-stem-dark-background_17119128.htm
multi_line_text.jpg: https://www.pexels.com/photo/illuminated-qoute-board-2255441/
asl_image.jpg: https://universe.roboflow.com/david-lee-d0rhs/american-sign-language-letters
car.jpg: https://pixabay.com/photos/car-vehicle-sports-car-auto-63930/
5 changes: 5 additions & 0 deletions tests/workflows/integration_tests/execution/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,6 +30,11 @@ def dogs_image() -> np.ndarray:
return cv2.imread(os.path.join(ASSETS_DIR, "dogs.jpg"))


@pytest.fixture(scope="function")
def car_image() -> np.ndarray:
return cv2.imread(os.path.join(ASSETS_DIR, "car.jpg"))


@pytest.fixture(scope="function")
def red_image() -> np.ndarray:
return cv2.imread(os.path.join(ASSETS_DIR, "red_image.png"))
Expand Down
Original file line number Diff line number Diff line change
@@ -0,0 +1,226 @@
import numpy as np
from matplotlib import pyplot as plt

from inference.core.env import WORKFLOWS_MAX_CONCURRENT_STEPS
from inference.core.managers.base import ModelManager
from inference.core.workflows.core_steps.common.entities import StepExecutionMode
from inference.core.workflows.execution_engine.core import ExecutionEngine

WORKFLOW_DEFINITION_DETECTIONS_FILTER = {
"version": "1.0",
"inputs": [{"type": "InferenceImage", "name": "image"}],
"steps": [
{
"type": "roboflow_core/roboflow_object_detection_model@v2",
"name": "model",
"images": "$inputs.image",
"model_id": "yolov8n-640",
},
{
"type": "roboflow_core/detections_filter@v1",
"name": "detections_filter",
"predictions": "$steps.model.predictions",
"operations": [
{
"type": "DetectionsFilter",
"filter_operation": {
"type": "StatementGroup",
"operator": "and",
"statements": [
{
"type": "BinaryStatement",
"left_operand": {
"type": "DynamicOperand",
"operand_name": "_",
"operations": [
{
"type": "ExtractDetectionProperty",
"property_name": "size",
}
],
},
"comparator": {"type": "(Number) >="},
"right_operand": {
"type": "DynamicOperand",
"operand_name": "image",
"operations": [
{
"type": "ExtractImageProperty",
"property_name": "size",
},
{"type": "Multiply", "other": 0.025},
],
},
}
],
},
}
],
"operations_parameters": {"image": "$inputs.image"},
},
],
"outputs": [
{
"type": "JsonField",
"name": "model_predictions",
"coordinates_system": "own",
"selector": "$steps.model.predictions",
},
{
"type": "JsonField",
"name": "detections_filter",
"coordinates_system": "own",
"selector": "$steps.detections_filter.predictions",
},
],
}


def test_workflow_with_detections_filter_referencing_image(
model_manager: ModelManager,
crowd_image: np.ndarray,
roboflow_api_key: str,
) -> None:
# given
workflow_init_parameters = {
"workflows_core.model_manager": model_manager,
"workflows_core.api_key": roboflow_api_key,
"workflows_core.step_execution_mode": StepExecutionMode.LOCAL,
}
execution_engine = ExecutionEngine.init(
workflow_definition=WORKFLOW_DEFINITION_DETECTIONS_FILTER,
init_parameters=workflow_init_parameters,
max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS,
)

# when
result = execution_engine.run(
runtime_parameters={
"image": crowd_image,
}
)

assert isinstance(result, list), "Expected list to be delivered"
assert len(result) == 1, "Expected 1 element in the output for one input image"
assert set(result[0].keys()) == {
"model_predictions",
"detections_filter",
}, "Expected all declared outputs to be delivered"
assert len(result[0]["model_predictions"]) == 12
assert len(result[0]["detections_filter"]) == 1


WORKFLOW_DEFINITION_PERSPECTIVE_CORRECTION = {
"version": "1.0",
"inputs": [{"type": "InferenceImage", "name": "image"}],
"steps": [
{
"type": "roboflow_core/roboflow_instance_segmentation_model@v2",
"name": "model",
"images": "$inputs.image",
"model_id": "yolov8n-seg-640",
},
{
"type": "roboflow_core/detections_filter@v1",
"name": "detections_filter",
"predictions": "$steps.model.predictions",
"operations": [
{
"type": "DetectionsFilter",
"filter_operation": {
"type": "StatementGroup",
"operator": "and",
"statements": [
{
"type": "BinaryStatement",
"negate": False,
"left_operand": {
"type": "DynamicOperand",
"operand_name": "_",
"operations": [
{
"type": "ExtractDetectionProperty",
"property_name": "class_name",
}
],
},
"comparator": {"type": "in (Sequence)"},
"right_operand": {
"type": "StaticOperand",
"value": ["car"],
},
}
],
},
}
],
"operations_parameters": {},
},
{
"type": "roboflow_core/dynamic_zone@v1",
"name": "dynamic_zone",
"predictions": "$steps.detections_filter.predictions",
"required_number_of_vertices": 4,
},
{
"type": "roboflow_core/perspective_correction@v1",
"name": "perspective_correction",
"images": "$inputs.image",
"perspective_polygons": "$steps.dynamic_zone.zones",
"predictions": "$steps.model.predictions",
"warp_image": True,
},
{
"type": "roboflow_core/label_visualization@v1",
"name": "label_visualization",
"image": "$steps.perspective_correction.warped_image",
"predictions": "$steps.perspective_correction.corrected_coordinates",
"copy_image": False,
},
],
"outputs": [
{
"type": "JsonField",
"name": "label_visualization",
"coordinates_system": "own",
"selector": "$steps.label_visualization.image",
}
],
}


def test_workflow_with_perspective_correction(
model_manager: ModelManager,
car_image: np.ndarray,
roboflow_api_key: str,
) -> None:
# given
workflow_init_parameters = {
"workflows_core.model_manager": model_manager,
"workflows_core.api_key": roboflow_api_key,
"workflows_core.step_execution_mode": StepExecutionMode.LOCAL,
}
execution_engine = ExecutionEngine.init(
workflow_definition=WORKFLOW_DEFINITION_PERSPECTIVE_CORRECTION,
init_parameters=workflow_init_parameters,
max_concurrent_steps=WORKFLOWS_MAX_CONCURRENT_STEPS,
)

# when
result = execution_engine.run(
runtime_parameters={
"image": [car_image, car_image],
}
)

# then
assert isinstance(result, list), "Expected list to be delivered"
assert len(result) == 2, "Expected 1 element in the output for one input image"
assert set(result[0].keys()) == {
"label_visualization",
}, "Expected all declared outputs to be delivered"
assert set(result[1].keys()) == {
"label_visualization",
}, "Expected all declared outputs to be delivered"
assert isinstance(result[0]["label_visualization"].numpy_image, np.ndarray)
assert isinstance(result[1]["label_visualization"].numpy_image, np.ndarray)

0 comments on commit 4287309

Please sign in to comment.