keremberke's picture
upload space files
5acef02
raw
history blame
2.05 kB
from pathlib import Path
import os
import json
import gradio as gr
import yolov5
from PIL import Image
from huggingface_hub import hf_hub_download
app_title = "Valorant Object Detection"
models_ids = ['keremberke/yolov5n-valorant', 'keremberke/yolov5s-valorant', 'keremberke/yolov5m-valorant']
article = f"<p style='text-align: center'> <a href='https://huggingface.co/{models_ids[-1]}'>huggingface.co/{models_ids[-1]}</a> | <a href='https://huggingface.co/keremberke/valorant-object-detection'>huggingface.co/keremberke/valorant-object-detection</a> | <a href='https://github.com/keremberke/awesome-yolov5-models'>awesome-yolov5-models</a> </p>"
current_model_id = models_ids[-1]
model = yolov5.load(current_model_id)
if space_content/test_images:
image_filenames = os.listdir(space_content/test_images)
examples = [
[Path(space_content/test_images) / image_filename, 0.25, models_ids[-1]]
for image_filename in image_filenames
]
else:
examples = None
def predict(image, threshold=0.25, model_id=None):
# update model if required
global current_model_id
global model
if model_id != current_model_id:
model = yolov5.load(model_id)
current_model_id = model_id
# get model input size
config_path = hf_hub_download(repo_id=model_id, filename="config.json")
with open(config_path, "r") as f:
config = json.load(f)
input_size = config["input_size"]
# perform inference
model.conf = threshold
results = model(image, size=input_size)
numpy_image = results.render()[0]
output_image = Image.fromarray(numpy_image)
return output_image
gr.Interface(
title=app_title,
description="Created by 'keremberke'",
article=article,
fn=predict,
inputs=[
gr.Image(type="pil"),
gr.Slider(maximum=1, step=0.01, value=0.25),
gr.Dropdown(models_ids, value=models_ids[-1]),
],
outputs=gr.Image(type="pil"),
examples=examples,
cache_examples=True if examples else False,
).launch(enable_queue=True)