joselobenitezg commited on
Commit
afe246e
1 Parent(s): 94f04b7

update blocks

Browse files
Files changed (3) hide show
  1. app.py +45 -58
  2. config.py +26 -1
  3. sapiens +0 -1
app.py CHANGED
@@ -1,4 +1,4 @@
1
- # Part of the source code is in: fashn-ai/sapiens-body-part-segmentation
2
  import os
3
 
4
  import gradio as gr
@@ -9,23 +9,13 @@ from gradio.themes.utils import sizes
9
  from PIL import Image
10
  from torchvision import transforms
11
  from utils.vis_utils import get_palette, visualize_mask_with_overlay
 
12
 
13
  if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:
14
  torch.backends.cuda.matmul.allow_tf32 = True
15
  torch.backends.cudnn.allow_tf32 = True
16
 
17
- ASSETS_DIR = os.path.join(os.path.dirname(__file__), "assets")
18
-
19
-
20
- CHECKPOINTS_DIR = os.path.join(ASSETS_DIR, "checkpoints")
21
-
22
- CHECKPOINTS = {
23
- "0.3B": "sapiens_0.3b_goliath_best_goliath_mIoU_7673_epoch_194_torchscript.pt2",
24
- "0.6B": "sapiens_0.6b_goliath_best_goliath_mIoU_7777_epoch_178_torchscript.pt2",
25
- "1B": "sapiens_1b_goliath_best_goliath_mIoU_7994_epoch_151_torchscript.pt2",
26
- "2B": "sapiens_2b_goliath_best_goliath_mIoU_8179_epoch_181_torchscript.pt2",
27
- }
28
-
29
 
30
  def load_model(checkpoint_name: str):
31
  checkpoint_path = os.path.join(CHECKPOINTS_DIR, CHECKPOINTS[checkpoint_name])
@@ -35,8 +25,7 @@ def load_model(checkpoint_name: str):
35
  return model
36
 
37
 
38
- MODELS = {name: load_model(name) for name in CHECKPOINTS.keys()}
39
-
40
 
41
  @torch.inference_mode()
42
  def run_model(model, input_tensor, height, width):
@@ -53,8 +42,6 @@ transform_fn = transforms.Compose(
53
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
54
  ]
55
  )
56
- # ----------------- CORE FUNCTION ----------------- #
57
-
58
 
59
  @spaces.GPU
60
  def segment(image: Image.Image, model_name: str) -> Image.Image:
@@ -68,47 +55,47 @@ def segment(image: Image.Image, model_name: str) -> Image.Image:
68
 
69
 
70
  # ----------------- GRADIO UI ----------------- #
71
-
72
-
73
- with open("banner.html", "r") as file:
74
- banner = file.read()
75
- with open("tips.html", "r") as file:
76
- tips = file.read()
77
-
78
- CUSTOM_CSS = """
79
- .image-container img {
80
- max-width: 512px;
81
- max-height: 512px;
82
- margin: 0 auto;
83
- border-radius: 0px;
84
- .gradio-container {background-color: #fafafa}
85
- """
86
-
87
- with gr.Blocks(css=CUSTOM_CSS, theme=gr.themes.Monochrome(radius_size=sizes.radius_md)) as demo:
88
- gr.HTML(banner)
89
- gr.HTML(tips)
90
- with gr.Row():
91
- with gr.Column():
92
- input_image = gr.Image(label="Input Image", type="pil", format="png")
93
- model_name = gr.Dropdown(
94
- label="Model Version",
95
- choices=list(CHECKPOINTS.keys()),
96
- value="0.3B",
97
- )
98
-
99
- example_model = gr.Examples(
100
- inputs=input_image,
101
- examples_per_page=10,
102
- examples=[
103
- os.path.join(ASSETS_DIR, "examples", img)
104
- for img in os.listdir(os.path.join(ASSETS_DIR, "examples"))
105
- ],
106
- )
107
- with gr.Column():
108
- result_image = gr.Image(label="Segmentation Result", format="png")
109
- run_button = gr.Button("Run")
110
-
111
- gr.Image(os.path.join(ASSETS_DIR, "legend.png"), label="Legend", type="filepath")
112
 
113
  run_button.click(
114
  fn=segment,
 
1
+ # Part of the source code is from: fashn-ai/sapiens-body-part-segmentation
2
  import os
3
 
4
  import gradio as gr
 
9
  from PIL import Image
10
  from torchvision import transforms
11
  from utils.vis_utils import get_palette, visualize_mask_with_overlay
12
+ from config import SAPIENS_LITE_MODELS_PATH
13
 
14
  if torch.cuda.is_available() and torch.cuda.get_device_properties(0).major >= 8:
15
  torch.backends.cuda.matmul.allow_tf32 = True
16
  torch.backends.cudnn.allow_tf32 = True
17
 
18
+ CHECKPOINTS_DIR = "checkpoints"
 
 
 
 
 
 
 
 
 
 
 
19
 
20
  def load_model(checkpoint_name: str):
21
  checkpoint_path = os.path.join(CHECKPOINTS_DIR, CHECKPOINTS[checkpoint_name])
 
25
  return model
26
 
27
 
28
+ #MODELS = {name: load_model(name) for name in CHECKPOINTS.keys()}
 
29
 
30
  @torch.inference_mode()
31
  def run_model(model, input_tensor, height, width):
 
42
  transforms.Normalize(mean=[0.485, 0.456, 0.406], std=[0.229, 0.224, 0.225]),
43
  ]
44
  )
 
 
45
 
46
  @spaces.GPU
47
  def segment(image: Image.Image, model_name: str) -> Image.Image:
 
55
 
56
 
57
  # ----------------- GRADIO UI ----------------- #
58
+ def update_model_choices(task):
59
+ model_choices = list(SAPIENS_LITE_MODELS_PATH[task.lower()].keys())
60
+ return gr.Dropdown(choices=model_choices, value=model_choices[0] if model_choices else None)
61
+
62
+ with gr.Blocks(theme=gr.themes.Monochrome(radius_size=sizes.radius_md)) as demo:
63
+ gr.Markdown("# Sapiens Arena 🤸🏽‍♂️ - WIP devmode- Not yet available")
64
+ with gr.Tabs():
65
+ with gr.TabItem('Image'):
66
+ with gr.Row():
67
+ with gr.Column():
68
+ input_image = gr.Image(label="Input Image", type="pil", format="png")
69
+ select_task = gr.Radio(
70
+ ["Seg", "Pose", "Depth", "Normal"],
71
+ label="Task",
72
+ info="Choose the task to perfom",
73
+ choices=list(SAPIENS_LITE_MODELS_PATH.keys())
74
+ )
75
+ model_name = gr.Dropdown(
76
+ label="Model Version",
77
+ choices=list(SAPIENS_LITE_MODELS_PATH["seg"].keys()),
78
+ value="0.3B",
79
+ )
80
+
81
+ # example_model = gr.Examples(
82
+ # inputs=input_image,
83
+ # examples_per_page=10,
84
+ # examples=[
85
+ # os.path.join(ASSETS_DIR, "examples", img)
86
+ # for img in os.listdir(os.path.join(ASSETS_DIR, "examples"))
87
+ # ],
88
+ # )
89
+ with gr.Column():
90
+ result_image = gr.Image(label="Segmentation Result", format="png")
91
+ run_button = gr.Button("Run")
92
+
93
+ #gr.Image(os.path.join(ASSETS_DIR, "legend.png"), label="Legend", type="filepath")
94
+
95
+ with gr.TabItem('Video'):
96
+ gr.Markdown("In construction")
97
+
98
+ select_task.change(fn=update_model_choices, inputs=select_task, outputs=model_name)
99
 
100
  run_button.click(
101
  fn=segment,
config.py CHANGED
@@ -1,4 +1,4 @@
1
- SAPIENS_LITE_MODELS = {
2
  "depth": {
3
  "sapiens_0.3b": "https://huggingface.co/facebook/sapiens/resolve/main/sapiens_lite_host/torchscript/depth/checkpoints/sapiens_0.3b/sapiens_0.3b_render_people_epoch_100_torchscript.pt2?download=true",
4
  "sapiens_0.6b": "https://huggingface.co/facebook/sapiens/resolve/main/sapiens_lite_host/torchscript/depth/checkpoints/sapiens_0.6b/sapiens_0.6b_render_people_epoch_70_torchscript.pt2?download=true",
@@ -23,6 +23,31 @@ SAPIENS_LITE_MODELS = {
23
  }
24
  }
25
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
  LABELS_TO_IDS = {
27
  "Background": 0,
28
  "Apparel": 1,
 
1
+ SAPIENS_LITE_MODELS_URL = {
2
  "depth": {
3
  "sapiens_0.3b": "https://huggingface.co/facebook/sapiens/resolve/main/sapiens_lite_host/torchscript/depth/checkpoints/sapiens_0.3b/sapiens_0.3b_render_people_epoch_100_torchscript.pt2?download=true",
4
  "sapiens_0.6b": "https://huggingface.co/facebook/sapiens/resolve/main/sapiens_lite_host/torchscript/depth/checkpoints/sapiens_0.6b/sapiens_0.6b_render_people_epoch_70_torchscript.pt2?download=true",
 
23
  }
24
  }
25
 
26
+ SAPIENS_LITE_MODELS_PATH = {
27
+ "depth": {
28
+ "sapiens_0.3b": "checkpoints/sapiens_0.3b/sapiens_0.3b_render_people_epoch_100_torchscript.pt2",
29
+ "sapiens_0.6b": "checkpoints/sapiens_0.6b/sapiens_0.6b_render_people_epoch_70_torchscript.pt2",
30
+ "sapiens_1b": "checkpoints/sapiens_1b/sapiens_1b_render_people_epoch_88_torchscript.pt2",
31
+ "sapiens_2b": "checkpoints/sapiens_2b/sapiens_2b_render_people_epoch_25_torchscript.pt2"
32
+ },
33
+ "detector": {},
34
+ "normal": {
35
+ "sapiens_0.3b": "checkpoints/sapiens_0.3b/sapiens_0.3b_normal_render_people_epoch_66_torchscript.pt2",
36
+ "sapiens_0.6b": "checkpoints/sapiens_0.6b/sapiens_0.6b_normal_render_people_epoch_200_torchscript.pt2",
37
+ "sapiens_1b": "checkpoints/sapiens_1b/sapiens_1b_normal_render_people_epoch_115_torchscript.pt2",
38
+ "sapiens_2b": "checkpoints/sapiens_2b/sapiens_2b_normal_render_people_epoch_70_torchscript.pt2"
39
+ },
40
+ "pose": {
41
+ "sapiens_1b": "checkpoints/sapiens_1b/sapiens_1b_goliath_best_goliath_AP_640_torchscript.pt2"
42
+ },
43
+ "seg": {
44
+ "sapiens_0.3b": "checkpoints/sapiens_0.3b/sapiens_0.3b_goliath_best_goliath_mIoU_7673_epoch_194_torchscript.pt2",
45
+ "sapiens_0.6b": "checkpoints/sapiens_0.6b/sapiens_0.6b_goliath_best_goliath_mIoU_7777_epoch_178_torchscript.pt2",
46
+ "sapiens_1b": "checkpoints/sapiens_1b/sapiens_1b_goliath_best_goliath_mIoU_7994_epoch_151_torchscript.pt2",
47
+ "sapiens_2b": "checkpoints/sapiens_2b/sapiens_2b_goliath_best_goliath_mIoU_8179_epoch_181_torchscript.pt2"
48
+ }
49
+ }
50
+
51
  LABELS_TO_IDS = {
52
  "Background": 0,
53
  "Apparel": 1,
sapiens DELETED
@@ -1 +0,0 @@
1
- Subproject commit 04bdc575d33ae93735f4c64887383e132951d8a4