Diego Fernandez commited on
Commit
fca2efd
1 Parent(s): bd2ff06

feat: initial version

Browse files
Files changed (4) hide show
  1. .gitignore +156 -0
  2. app.py +17 -3
  3. inference.py +102 -0
  4. inference_utils.py +163 -0
.gitignore ADDED
@@ -0,0 +1,156 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Byte-compiled / optimized / DLL files
2
+ __pycache__/
3
+ *.py[cod]
4
+ *$py.class
5
+
6
+ # C extensions
7
+ *.so
8
+
9
+ # Distribution / packaging
10
+ .Python
11
+ build/
12
+ develop-eggs/
13
+ dist/
14
+ downloads/
15
+ eggs/
16
+ .eggs/
17
+ lib/
18
+ lib64/
19
+ parts/
20
+ sdist/
21
+ var/
22
+ wheels/
23
+ share/python-wheels/
24
+ *.egg-info/
25
+ .installed.cfg
26
+ *.egg
27
+ MANIFEST
28
+
29
+ # PyInstaller
30
+ # Usually these files are written by a python script from a template
31
+ # before PyInstaller builds the exe, so as to inject date/other infos into it.
32
+ *.manifest
33
+ *.spec
34
+
35
+ # Installer logs
36
+ pip-log.txt
37
+ pip-delete-this-directory.txt
38
+
39
+ # Unit test / coverage reports
40
+ htmlcov/
41
+ .tox/
42
+ .nox/
43
+ .coverage
44
+ .coverage.*
45
+ .cache
46
+ nosetests.xml
47
+ coverage.xml
48
+ *.cover
49
+ *.py,cover
50
+ .hypothesis/
51
+ .pytest_cache/
52
+ cover/
53
+
54
+ # Translations
55
+ *.mo
56
+ *.pot
57
+
58
+ # Django stuff:
59
+ *.log
60
+ local_settings.py
61
+ db.sqlite3
62
+ db.sqlite3-journal
63
+
64
+ # Flask stuff:
65
+ instance/
66
+ .webassets-cache
67
+
68
+ # Scrapy stuff:
69
+ .scrapy
70
+
71
+ # Sphinx documentation
72
+ docs/_build/
73
+
74
+ # PyBuilder
75
+ .pybuilder/
76
+ target/
77
+
78
+ # Jupyter Notebook
79
+ .ipynb_checkpoints
80
+
81
+ # IPython
82
+ profile_default/
83
+ ipython_config.py
84
+
85
+ # pyenv
86
+ # For a library or package, you might want to ignore these files since the code is
87
+ # intended to run in multiple environments; otherwise, check them in:
88
+ # .python-version
89
+
90
+ # pipenv
91
+ # According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
92
+ # However, in case of collaboration, if having platform-specific dependencies or dependencies
93
+ # having no cross-platform support, pipenv may install dependencies that don't work, or not
94
+ # install all needed dependencies.
95
+ #Pipfile.lock
96
+
97
+ # poetry
98
+ # Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
99
+ # This is especially recommended for binary packages to ensure reproducibility, and is more
100
+ # commonly ignored for libraries.
101
+ # https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
102
+ #poetry.lock
103
+
104
+ # pdm
105
+ # Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
106
+ #pdm.lock
107
+ # pdm stores project-wide configurations in .pdm.toml, but it is recommended to not include it
108
+ # in version control.
109
+ # https://pdm.fming.dev/#use-with-ide
110
+ .pdm.toml
111
+
112
+ # PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
113
+ __pypackages__/
114
+
115
+ # Celery stuff
116
+ celerybeat-schedule
117
+ celerybeat.pid
118
+
119
+ # SageMath parsed files
120
+ *.sage.py
121
+
122
+ # Environments
123
+ .env
124
+ .venv
125
+ env/
126
+ venv/
127
+ ENV/
128
+ env.bak/
129
+ venv.bak/
130
+
131
+ # Spyder project settings
132
+ .spyderproject
133
+ .spyproject
134
+
135
+ # Rope project settings
136
+ .ropeproject
137
+
138
+ # mkdocs documentation
139
+ /site
140
+
141
+ # mypy
142
+ .mypy_cache/
143
+ .dmypy.json
144
+ dmypy.json
145
+
146
+ # Pyre type checker
147
+ .pyre/
148
+
149
+ # pytype static type analyzer
150
+ .pytype/
151
+
152
+ # Cython debug symbols
153
+ cython_debug/
154
+
155
+ # Models
156
+ *.pt
app.py CHANGED
@@ -1,8 +1,22 @@
1
  import gradio as gr
2
 
 
3
 
4
- def greet(name):
5
- return "Hello " + name + "!!"
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6
 
7
- iface = gr.Interface(fn=greet, inputs="text", outputs="text")
8
  iface.launch()
 
1
  import gradio as gr
2
 
3
+ from inference import inference
4
 
5
+ dd_model = gr.Dropdown(choices=["YoloV7"], value="YoloV7", label="Model")
6
+
7
+ cb_motion_estimation = gr.Checkbox(value=True, label="Motion estimation")
8
+
9
+ cb_path_draw = gr.Checkbox(value=True, label="Drawing paths")
10
+
11
+ dd_track_points = gr.Dropdown(
12
+ choices=["Boxes", "Centroid"], value="Boxes", label="Detections style"
13
+ )
14
+
15
+ slide_threshold = gr.Slider(minimum=0, maximum=1, value=0.25, label="Model confidence threshold")
16
+
17
+ inputs = ["video", dd_model, cb_motion_estimation, cb_path_draw, dd_track_points, slide_threshold]
18
+ outputs = "playablevideo"
19
+
20
+ iface = gr.Interface(fn=inference, inputs=inputs, outputs=outputs)
21
 
 
22
  iface.launch()
inference.py ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+
5
+ import numpy as np
6
+
7
+ from inference_utils import (
8
+ YOLO,
9
+ ModelsPath,
10
+ Style,
11
+ center,
12
+ clean_videos,
13
+ draw,
14
+ euclidean_distance,
15
+ iou,
16
+ yolo_detections_to_norfair_detections,
17
+ )
18
+ from norfair.norfair import Paths, Tracker, Video
19
+ from norfair.norfair.camera_motion import (
20
+ HomographyTransformationGetter,
21
+ MotionEstimator,
22
+ )
23
+
24
+ DISTANCE_THRESHOLD_BBOX: float = 3.33
25
+ DISTANCE_THRESHOLD_CENTROID: int = 30
26
+ MAX_DISTANCE: int = 10000
27
+
28
+ parser = argparse.ArgumentParser(description="Track objects in a video.")
29
+ parser.add_argument("--img-size", type=int, default="720", help="YOLOv7 inference size (pixels)")
30
+ parser.add_argument(
31
+ "--iou-threshold", type=float, default="0.45", help="YOLOv7 IOU threshold for NMS"
32
+ )
33
+ parser.add_argument(
34
+ "--classes", nargs="+", type=int, help="Filter by class: --classes 0, or --classes 0 2 3"
35
+ )
36
+ args = parser.parse_args()
37
+
38
+
39
+ def inference(
40
+ input_video: str,
41
+ model: str,
42
+ motion_estimation: bool,
43
+ drawing_paths: bool,
44
+ track_points: str,
45
+ model_threshold: str,
46
+ ):
47
+ clean_videos("tmp")
48
+
49
+ coord_transformations = None
50
+ paths_drawer = None
51
+ track_points = Style[track_points].value
52
+ model = YOLO(ModelsPath[model].value, device="cuda")
53
+ video = Video(input_path=input_video, output_path="tmp")
54
+
55
+ if motion_estimation:
56
+ transformations_getter = HomographyTransformationGetter()
57
+
58
+ motion_estimator = MotionEstimator(
59
+ max_points=500,
60
+ min_distance=7,
61
+ transformations_getter=transformations_getter,
62
+ draw_flow=True,
63
+ )
64
+
65
+ distance_function = iou if track_points == "bbox" else euclidean_distance
66
+ distance_threshold = (
67
+ DISTANCE_THRESHOLD_BBOX if track_points == "bbox" else DISTANCE_THRESHOLD_CENTROID
68
+ )
69
+ tracker = Tracker(
70
+ distance_function=distance_function,
71
+ distance_threshold=distance_threshold,
72
+ )
73
+
74
+ if drawing_paths:
75
+ paths_drawer = Paths(center, attenuation=0.01)
76
+
77
+ for frame in video:
78
+ yolo_detections = model(
79
+ frame,
80
+ conf_threshold=model_threshold,
81
+ iou_threshold=args.iou_threshold,
82
+ image_size=720,
83
+ classes=args.classes,
84
+ )
85
+
86
+ mask = np.ones(frame.shape[:2], frame.dtype)
87
+
88
+ if motion_estimation:
89
+ coord_transformations = motion_estimator.update(frame, mask)
90
+
91
+ detections = yolo_detections_to_norfair_detections(
92
+ yolo_detections, track_points=track_points
93
+ )
94
+
95
+ tracked_objects = tracker.update(
96
+ detections=detections, coord_transformations=coord_transformations
97
+ )
98
+
99
+ frame = draw(paths_drawer, track_points, frame, detections, tracked_objects)
100
+ video.write(frame)
101
+
102
+ return f"{input_video[1:-4]}_out.mp4"
inference_utils.py ADDED
@@ -0,0 +1,163 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import argparse
2
+ import glob
3
+ import os
4
+ from enum import Enum
5
+ from typing import List, Optional, Union
6
+
7
+ import numpy as np
8
+ import torch
9
+ import torchvision.ops.boxes as bops
10
+
11
+ from norfair import norfair
12
+ from norfair.norfair import Detection
13
+
14
+ DISTANCE_THRESHOLD_BBOX: float = 3.33
15
+ DISTANCE_THRESHOLD_CENTROID: int = 30
16
+ MAX_DISTANCE: int = 10000
17
+
18
+
19
+ class ModelsPath(Enum):
20
+ YoloV7 = "models/yolov7.pt"
21
+
22
+
23
+ class Style(Enum):
24
+ Boxes = "bbox"
25
+ Centroid = "centroid"
26
+
27
+
28
+ class YOLO:
29
+ def __init__(self, model_path: str, device: Optional[str] = None):
30
+ if device is not None and "cuda" in device and not torch.cuda.is_available():
31
+ raise Exception("Selected device='cuda', but cuda is not available to Pytorch.")
32
+ # automatically set device if its None
33
+ elif device is None:
34
+ device = "cuda:0" if torch.cuda.is_available() else "cpu"
35
+
36
+ if not os.path.exists(model_path):
37
+ os.system(
38
+ f"wget https://github.com/WongKinYiu/yolov7/releases/download/v0.1/{os.path.basename(model_path)} -O {model_path}"
39
+ )
40
+
41
+ # load model
42
+ try:
43
+ self.model = torch.hub.load("WongKinYiu/yolov7", "custom", model_path)
44
+ except:
45
+ raise Exception("Failed to load model from {}".format(model_path))
46
+
47
+ def __call__(
48
+ self,
49
+ img: Union[str, np.ndarray],
50
+ conf_threshold: float = 0.25,
51
+ iou_threshold: float = 0.45,
52
+ image_size: int = 720,
53
+ classes: Optional[List[int]] = None,
54
+ ) -> torch.tensor:
55
+
56
+ self.model.conf = conf_threshold
57
+ self.model.iou = iou_threshold
58
+ if classes is not None:
59
+ self.model.classes = classes
60
+ detections = self.model(img, size=image_size)
61
+ return detections
62
+
63
+
64
+ def euclidean_distance(detection, tracked_object):
65
+ return np.linalg.norm(detection.points - tracked_object.estimate)
66
+
67
+
68
+ def center(points):
69
+ return [np.mean(np.array(points), axis=0)]
70
+
71
+
72
+ def iou_pytorch(detection, tracked_object):
73
+ # Slower but simplier version of iou
74
+
75
+ detection_points = np.concatenate([detection.points[0], detection.points[1]])
76
+ tracked_object_points = np.concatenate([tracked_object.estimate[0], tracked_object.estimate[1]])
77
+
78
+ box_a = torch.tensor([detection_points], dtype=torch.float)
79
+ box_b = torch.tensor([tracked_object_points], dtype=torch.float)
80
+ iou = bops.box_iou(box_a, box_b)
81
+
82
+ # Since 0 <= IoU <= 1, we define 1/IoU as a distance.
83
+ # Distance values will be in [1, inf)
84
+ return np.float(1 / iou if iou else MAX_DISTANCE)
85
+
86
+
87
+ def iou(detection, tracked_object):
88
+ # Detection points will be box A
89
+ # Tracked objects point will be box B.
90
+
91
+ box_a = np.concatenate([detection.points[0], detection.points[1]])
92
+ box_b = np.concatenate([tracked_object.estimate[0], tracked_object.estimate[1]])
93
+
94
+ x_a = max(box_a[0], box_b[0])
95
+ y_a = max(box_a[1], box_b[1])
96
+ x_b = min(box_a[2], box_b[2])
97
+ y_b = min(box_a[3], box_b[3])
98
+
99
+ # Compute the area of intersection rectangle
100
+ inter_area = max(0, x_b - x_a + 1) * max(0, y_b - y_a + 1)
101
+
102
+ # Compute the area of both the prediction and tracker
103
+ # rectangles
104
+ box_a_area = (box_a[2] - box_a[0] + 1) * (box_a[3] - box_a[1] + 1)
105
+ box_b_area = (box_b[2] - box_b[0] + 1) * (box_b[3] - box_b[1] + 1)
106
+
107
+ # Compute the intersection over union by taking the intersection
108
+ # area and dividing it by the sum of prediction + tracker
109
+ # areas - the interesection area
110
+ iou = inter_area / float(box_a_area + box_b_area - inter_area)
111
+
112
+ # Since 0 <= IoU <= 1, we define 1/IoU as a distance.
113
+ # Distance values will be in [1, inf)
114
+ return 1 / iou if iou else (MAX_DISTANCE)
115
+
116
+
117
+ def yolo_detections_to_norfair_detections(
118
+ yolo_detections: torch.tensor, track_points: str = "centroid" # bbox or centroid
119
+ ) -> List[Detection]:
120
+ """convert detections_as_xywh to norfair detections"""
121
+ norfair_detections: List[Detection] = []
122
+
123
+ if track_points == "centroid":
124
+ detections_as_xywh = yolo_detections.xywh[0]
125
+ for detection_as_xywh in detections_as_xywh:
126
+ centroid = np.array([detection_as_xywh[0].item(), detection_as_xywh[1].item()])
127
+ scores = np.array([detection_as_xywh[4].item()])
128
+ norfair_detections.append(Detection(points=centroid, scores=scores))
129
+ elif track_points == "bbox":
130
+ detections_as_xyxy = yolo_detections.xyxy[0]
131
+ for detection_as_xyxy in detections_as_xyxy:
132
+ bbox = np.array(
133
+ [
134
+ [detection_as_xyxy[0].item(), detection_as_xyxy[1].item()],
135
+ [detection_as_xyxy[2].item(), detection_as_xyxy[3].item()],
136
+ ]
137
+ )
138
+ scores = np.array([detection_as_xyxy[4].item(), detection_as_xyxy[4].item()])
139
+ norfair_detections.append(Detection(points=bbox, scores=scores))
140
+
141
+ return norfair_detections
142
+
143
+
144
+ def clean_videos(path: str):
145
+ # Remove past videos
146
+ files = glob.glob(f"{path}/*")
147
+ for file in files:
148
+ if file.endswith(".mp4"):
149
+ os.remove(file)
150
+
151
+
152
+ def draw(paths_drawer, track_points, frame, detections, tracked_objects):
153
+ if track_points == "centroid":
154
+ norfair.draw_points(frame, detections)
155
+ norfair.draw_tracked_objects(frame, tracked_objects)
156
+ elif track_points == "bbox":
157
+ norfair.draw_boxes(frame, detections)
158
+ norfair.draw_tracked_boxes(frame, tracked_objects)
159
+
160
+ if paths_drawer is not None:
161
+ frame = paths_drawer.draw(frame, tracked_objects)
162
+
163
+ return frame