File size: 1,121 Bytes
3d6c603
b17ce0d
3d6c603
bd2ff06
 
fca2efd
bd2ff06
ceed5f5
 
547bded
fca2efd
210ae8c
 
 
 
31aaaec
210ae8c
 
dfb84ad
fca2efd
 
088dea4
fca2efd
 
 
 
9beafe7
3d6c603
 
210ae8c
3d6c603
b17ce0d
9beafe7
 
 
 
6848665
9beafe7
 
 
 
 
6848665
0cb2d2a
3d6c603
bd2ff06
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
import os
import tempfile

import gradio as gr

from inference import inference

input_video = gr.Video(mirror_webcam=False)

dd_model = gr.Dropdown(choices=["YOLOv7", "YOLOv7 Tiny"], value="YOLOv7", label="Model")

features = gr.CheckboxGroup(
    choices=["Track camera movement", "Draw objects paths"],
    value=["Track camera movement", "Draw objects paths"],
    label="Features",
    type="index",
)

cb_path_draw = gr.Checkbox(value=True, label="Draw objects paths")

dd_track_points = gr.Dropdown(
    choices=["Bounding box", "Centroid"], value="Bounding box", label="Detections style"
)

slide_threshold = gr.Slider(minimum=0, maximum=1, value=0.25, label="Model confidence threshold")

intput_components = [
        input_video,
        dd_model,
        features,
        dd_track_points,
        slide_threshold
    ]

output_components = "playablevideo"

example_list = [["examples/" + example] for example in os.listdir("examples")]

iface = gr.Interface(
    fn=inference,
    inputs=intput_components,
    outputs=output_components,
    examples=example_list,
    cache_examples=True,
)

iface.launch()