|
|
|
"""Deploy Barcelo demo.ipynb |
|
Automatically generated by Colaboratory. |
|
Original file is located at |
|
https://colab.research.google.com/drive/1FxaL8DcYgvjPrWfWruSA5hvk3J81zLY9 |
|
![ ](https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png) |
|
# Modelo |
|
YOLO es una familia de modelos de detección de objetos a escala compuesta entrenados en COCO dataset, e incluye una funcionalidad simple para Test Time Augmentation (TTA), model ensembling, hyperparameter evolution, and export to ONNX, CoreML and TFLite. |
|
## Gradio Inferencia |
|
![](https://i.ibb.co/982NS6m/header.png) |
|
Este Notebook se acelera opcionalmente con un entorno de ejecución de GPU |
|
---------------------------------------------------------------------- |
|
YOLOv5 Gradio demo |
|
*Author: Ultralytics LLC and Gradio* |
|
# Código |
|
""" |
|
|
|
|
|
|
|
import os |
|
import re |
|
import json |
|
import numpy as np |
|
import pandas as pd |
|
import gradio as gr |
|
import torch |
|
from PIL import Image |
|
from ultralytics import YOLO |
|
|
|
|
|
torch.hub.download_url_to_file('https://i.pinimg.com/originals/7f/5e/96/7f5e9657c08aae4bcd8bc8b0dcff720e.jpg', 'ejemplo1.jpg') |
|
torch.hub.download_url_to_file('https://i.pinimg.com/originals/c2/ce/e0/c2cee05624d5477ffcf2d34ca77b47d1.jpg', 'ejemplo2.jpg') |
|
|
|
|
|
class YOLODetect(): |
|
def __init__(self, modelo): |
|
self.modelo = modelo |
|
|
|
def predecir(self, url): |
|
|
|
|
|
self.source = url |
|
self.results = self.modelo.predict(source=self.source, save=True, imgsz=640, conf=0.5, iou=0.40) |
|
return self.results |
|
|
|
def show(self): |
|
results = self.results[0] |
|
render = None |
|
render = Image.open(f"runs/detect/predict/{results.path}") |
|
return render |
|
|
|
def to_json(self): |
|
results = self.results[0] |
|
img_size = results.orig_shape |
|
img_name = results.path |
|
array_numpy = results.boxes.cls.cpu().numpy().astype(np.int32) |
|
|
|
|
|
clases = { |
|
0: "Aedes", |
|
1: "Mosquitos", |
|
2: "Moscas" |
|
} |
|
|
|
|
|
conteo_clases = np.bincount(array_numpy) |
|
|
|
self.json_result = [{'Especie': clases[i], 'Cantidad': str(conteo_clases[i]) if i < len(conteo_clases) else str(0)} for i in range(len(clases))] |
|
|
|
result_dict = { |
|
"image": str(img_name), |
|
"size": str(img_size), |
|
"detail": self.json_result |
|
} |
|
|
|
|
|
result_dict = json.dumps(result_dict) |
|
|
|
|
|
result_dict = json.loads(result_dict) |
|
|
|
return result_dict |
|
|
|
def to_dataframe(self): |
|
return pd.DataFrame(self.json_result) |
|
|
|
modelo_yolo = YOLO('best.pt') |
|
|
|
def yolo(size, iou, conf, im): |
|
'''Wrapper fn for gradio''' |
|
g = (int(size) / max(im.size)) |
|
im = im.resize((int(x * g) for x in im.size), Image.LANCZOS) |
|
|
|
print(type(im)) |
|
|
|
source = im |
|
model = YOLODetect(modelo_yolo) |
|
results = model.predecir(source) |
|
|
|
result_json = model.to_json() |
|
print(result_json) |
|
result_df = model.to_dataframe() |
|
print(result_df) |
|
result_img = model.show() |
|
|
|
return result_img, result_df, result_json |
|
|
|
|
|
|
|
in1 = gr.Radio(['640', '1280'], label="Tamaño de la imagen", type='value') |
|
in2 = gr.Slider(minimum=0, maximum=1, step=0.05, label='NMS IoU threshold') |
|
in3 = gr.Slider(minimum=0, maximum=1, step=0.05, label='Umbral o threshold') |
|
in4 = gr.Image(type='pil', label="Original Image") |
|
|
|
out2 = gr.Image(type="pil", label="YOLOv5") |
|
out3 = gr.Dataframe(label="Cantidad_especie", headers=['Cantidad','Especie'], type="pandas") |
|
out4 = gr.JSON(label="JSON") |
|
|
|
title = 'Trampas Barceló' |
|
description = """ |
|
<p> |
|
<center> |
|
Sistemas de Desarrollado por Subsecretaría de Modernización del Municipio de Vicente López. Advertencia solo usar fotos provenientes de las trampas Barceló, no de celular o foto de internet. |
|
<img src="https://www.vicentelopez.gov.ar/assets/images/logo-mvl.png" alt="logo" width="250"/> |
|
</center> |
|
</p> |
|
""" |
|
article ="<p style='text-align: center'><a href='https://docs.google.com/presentation/d/1T5CdcLSzgRe8cQpoi_sPB4U170551NGOrZNykcJD0xU/edit?usp=sharing' target='_blank'>Para mas info, clik para ir al white paper</a></p><p style='text-align: center'><a href='https://drive.google.com/drive/folders/1owACN3HGIMo4zm2GQ_jf-OhGNeBVRS7l?usp=sharing ' target='_blank'>Google Colab Demo</a></p><p style='text-align: center'><a href='https://github.com/Municipalidad-de-Vicente-Lopez/Trampa_Barcelo' target='_blank'>Repo Github</a></p></center></p>" |
|
|
|
examples = [['640',0.45, 0.75,'ejemplo1.jpg'], ['640',0.45, 0.75,'ejemplo2.jpg']] |
|
|
|
iface = gr.Interface(yolo, |
|
inputs=[in1, in2, in3, in4], |
|
outputs=[out2,out3,out4], title=title, |
|
description=description, |
|
article=article, |
|
examples=examples, |
|
analytics_enabled=False, |
|
allow_flagging="manual", |
|
flagging_options=["Correcto", "Incorrecto", "Casi correcto", "Error", "Otro"] |
|
) |
|
|
|
iface.queue() |
|
iface.launch(debug=True) |
|
|
|
"""For YOLOv5 PyTorch Hub inference with **PIL**, **OpenCV**, **Numpy** or **PyTorch** inputs please see the full [YOLOv5 PyTorch Hub Tutorial](https://github.com/ultralytics/yolov5/issues/36). |
|
## Citation |
|
[![DOI](https://zenodo.org/badge/264818686.svg)](https://zenodo.org/badge/latestdoi/264818686) |
|
""" |