Skip to content

Commit

Permalink
Merge pull request #642 from roboflow/byte-tracker-block
Browse files Browse the repository at this point in the history
Add Byte Tracker block, Line Counter block and Time In Zone block
  • Loading branch information
grzegorz-roboflow committed Sep 17, 2024
2 parents 7639b9b + f4f791f commit b505887
Show file tree
Hide file tree
Showing 21 changed files with 1,367 additions and 24 deletions.
2 changes: 1 addition & 1 deletion .github/workflows/integration_tests_inference_models.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
python-version: ${{ matrix.python-version }}
check-latest: true
- name: 🚧 Install GDAL OS library
run: sudo apt-get install libgdal-dev
run: sudo apt-get update && sudo apt-get install libgdal-dev
- name: 📦 Install dependencies
run: |
python -m pip install --upgrade pip
Expand Down
2 changes: 1 addition & 1 deletion .github/workflows/integration_tests_workflows_x86.yml
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@ jobs:
python-version: ${{ matrix.python-version }}
check-latest: true
- name: 🚧 Install GDAL OS library
run: sudo apt-get install libgdal-dev
run: sudo apt-get update && sudo apt-get install libgdal-dev
- name: 📦 Install dependencies
run: |
python -m pip install --upgrade pip
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
with:
python-version: "3.9"
- name: 🚧 Install GDAL OS library
run: sudo apt-get install libgdal-dev
run: sudo apt-get update && sudo apt-get install libgdal-dev
- name: 🛞 Create Wheels
run: |
make create_wheels
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ jobs:
with:
python-version: "3.9"
- name: 🚧 Install GDAL OS library
run: sudo apt-get install libgdal-dev
run: sudo apt-get update && sudo apt-get install libgdal-dev
- name: 🛞 Create Wheels
run: |
make create_wheels
Expand Down
112 changes: 112 additions & 0 deletions development/stream_interface/time_in_zone_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,112 @@
import os
from threading import Thread
from typing import List, Optional, Union

import cv2
import supervision as sv

from inference import InferencePipeline
from inference.core.interfaces.camera.entities import VideoFrame
from inference.core.interfaces.stream.watchdog import PipelineWatchDog, BasePipelineWatchDog
from inference.core.utils.drawing import create_tiles

STOP = False

TIME_IN_ZONE_WORKFLOW = {
"version": "1.0",
"inputs": [
{"type": "WorkflowImage", "name": "image"},
{"type": "WorkflowVideoMetadata", "name": "video_metadata"},
{"type": "WorkflowParameter", "name": "zone"},
],
"steps": [
{
"type": "ObjectDetectionModel",
"name": "people_detector",
"image": "$inputs.image",
"model_id": "yolov8n-640",
"confidence": 0.6,
},
{
"type": "roboflow_core/byte_tracker@v1",
"name": "byte_tracker",
"detections": "$steps.people_detector.predictions",
"metadata": "$inputs.video_metadata"
},
{
"type": "roboflow_core/time_in_zone@v1",
"name": "time_in_zone",
"detections": f"$steps.byte_tracker.tracked_detections",
"metadata": "$inputs.video_metadata",
"zone": "$inputs.zone",
"image": "$inputs.image",
},
{
"type": "roboflow_core/label_visualization@v1",
"name": "label_visualization",
"image": "$inputs.image",
"predictions": "$steps.time_in_zone.timed_detections",
"text": "Time In Zone",
}
],
"outputs": [
{"type": "JsonField", "name": "label_visualization", "selector": "$steps.label_visualization.image"},
],
}


def main() -> None:
global STOP
watchdog = BasePipelineWatchDog()
pipeline = InferencePipeline.init_with_workflow(
video_reference=os.environ["VIDEO_REFERENCE"],
workflow_specification=TIME_IN_ZONE_WORKFLOW,
watchdog=watchdog,
on_prediction=workflows_sink,
workflows_parameters={
"zone": [(0, 0), (1000, 0), (1000, 1000), (0, 1000)],
}
)
control_thread = Thread(target=command_thread, args=(pipeline, watchdog))
control_thread.start()
pipeline.start()
STOP = True
pipeline.join()


def command_thread(pipeline: InferencePipeline, watchdog: PipelineWatchDog) -> None:
global STOP
while not STOP:
key = input()
if key == "i":
print(watchdog.get_report())
if key == "t":
pipeline.terminate()
STOP = True
elif key == "p":
pipeline.pause_stream()
elif key == "m":
pipeline.mute_stream()
elif key == "r":
pipeline.resume_stream()


def workflows_sink(
predictions: Union[Optional[dict], List[Optional[dict]]],
video_frames: Union[Optional[VideoFrame], List[Optional[VideoFrame]]],
) -> None:
images_to_show = []
if not isinstance(predictions, list):
predictions = [predictions]
video_frames = [video_frames]
for prediction, frame in zip(predictions, video_frames):
if prediction is None or frame is None:
continue
images_to_show.append(prediction["label_visualization"].numpy_image)
tiles = create_tiles(images=images_to_show)
cv2.imshow(f"Predictions", tiles)
cv2.waitKey(1)


if __name__ == '__main__':
main()
Empty file.
Empty file.
140 changes: 140 additions & 0 deletions inference/core/workflows/core_steps/analytics/line_counter/v1.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,140 @@
from typing import Dict, List, Optional, Tuple, Union

import supervision as sv
from pydantic import ConfigDict, Field
from typing_extensions import Literal, Type

from inference.core.workflows.execution_engine.entities.base import (
OutputDefinition,
VideoMetadata,
)
from inference.core.workflows.execution_engine.entities.types import (
INSTANCE_SEGMENTATION_PREDICTION_KIND,
INTEGER_KIND,
LIST_OF_VALUES_KIND,
OBJECT_DETECTION_PREDICTION_KIND,
STRING_KIND,
StepOutputSelector,
WorkflowParameterSelector,
WorkflowVideoMetadataSelector,
)
from inference.core.workflows.prototypes.block import (
BlockResult,
WorkflowBlock,
WorkflowBlockManifest,
)

OUTPUT_KEY_COUNT_IN: str = "count_in"
OUTPUT_KEY_COUNT_OUT: str = "count_out"
IN: str = "in"
OUT: str = "out"
DETECTIONS_IN_OUT_PARAM: str = "in_out"
SHORT_DESCRIPTION = "Count detections passing line"
LONG_DESCRIPTION = """
The `LineCounter` is an analytics block designed to count objects passing the line.
The block requires detections to be tracked (i.e. each object must have unique tracker_id assigned,
which persists between frames)
"""


class LineCounterManifest(WorkflowBlockManifest):
model_config = ConfigDict(
json_schema_extra={
"name": "Time in zone",
"version": "v1",
"short_description": SHORT_DESCRIPTION,
"long_description": LONG_DESCRIPTION,
"license": "Apache-2.0",
"block_type": "analytics",
}
)
type: Literal["roboflow_core/line_counter@v1"]
metadata: WorkflowVideoMetadataSelector
detections: StepOutputSelector(
kind=[
OBJECT_DETECTION_PREDICTION_KIND,
INSTANCE_SEGMENTATION_PREDICTION_KIND,
]
) = Field( # type: ignore
description="Predictions",
default=None,
examples=["$steps.object_detection_model.predictions"],
)

line_segment: Union[list, StepOutputSelector(kind=[LIST_OF_VALUES_KIND]), WorkflowParameterSelector(kind=[LIST_OF_VALUES_KIND])] = Field( # type: ignore
description="Lines (one for each batch) in a format [(x1, y1), (x2, y2)];"
" direction of line zone is assumed to be that of direction of vector normal to [(x1, y1), (x2, y2)]",
examples=["$inputs.zones"],
)
triggering_anchor: Union[str, WorkflowParameterSelector(kind=[STRING_KIND])] = Field( # type: ignore
description=f"Triggering anchor. Allowed values: {', '.join(sv.Position.list())}",
default="CENTER",
examples=["CENTER"],
)

@classmethod
def describe_outputs(cls) -> List[OutputDefinition]:
return [
OutputDefinition(
name=OUTPUT_KEY_COUNT_IN,
kind=[INTEGER_KIND],
),
OutputDefinition(
name=OUTPUT_KEY_COUNT_OUT,
kind=[INTEGER_KIND],
),
]

@classmethod
def get_execution_engine_compatibility(cls) -> Optional[str]:
return ">=1.0.0,<2.0.0"


class LineCounterBlockV1(WorkflowBlock):
def __init__(self):
self._batch_of_line_zones: Dict[str, sv.LineZone] = {}

@classmethod
def get_manifest(cls) -> Type[WorkflowBlockManifest]:
return LineCounterManifest

def run(
self,
detections: sv.Detections,
metadata: VideoMetadata,
line_segment: List[Tuple[int, int]],
triggering_anchor: str = "CENTER",
) -> BlockResult:
if detections.tracker_id is None:
raise ValueError(
f"tracker_id not initialized, {self.__class__.__name__} requires detections to be tracked"
)
if metadata.video_identifier not in self._batch_of_line_zones:
if not isinstance(line_segment, list) or len(line_segment) != 2:
raise ValueError(
f"{self.__class__.__name__} requires line zone to be a list containing exactly 2 points"
)
if any(not isinstance(e, list) or len(e) != 2 for e in line_segment):
raise ValueError(
f"{self.__class__.__name__} requires each point of line zone to be a list containing exactly 2 coordinates"
)
if any(
not isinstance(e[0], (int, float)) or not isinstance(e[1], (int, float))
for e in line_segment
):
raise ValueError(
f"{self.__class__.__name__} requires each coordinate of line zone to be a number"
)
self._batch_of_line_zones[metadata.video_identifier] = sv.LineZone(
start=sv.Point(*line_segment[0]),
end=sv.Point(*line_segment[1]),
triggering_anchors=[sv.Position(triggering_anchor)],
)
line_zone = self._batch_of_line_zones[metadata.video_identifier]

line_zone.trigger(detections=detections)

return {
OUTPUT_KEY_COUNT_IN: line_zone.in_count,
OUTPUT_KEY_COUNT_OUT: line_zone.out_count,
}
Empty file.
Loading

0 comments on commit b505887

Please sign in to comment.