Spaces:
Runtime error
Runtime error
File size: 5,480 Bytes
8e8918c 020203d 8e8918c 2911751 2f4dab4 3e91da1 c88bfe2 fdca025 c88bfe2 fdca025 8ddd85c fdca025 3752748 fdca025 bc96ace fdca025 c88bfe2 8e8918c 3752748 dbcdf98 8e8918c dbcdf98 8e8918c fdca025 8e8918c fdca025 8e8918c 48fcc41 8e8918c de77f70 3e91da1 8e8918c 3e91da1 8e8918c 3e91da1 8e8918c 3e91da1 8e8918c e9de992 8e8918c bc96ace 8e8918c bc96ace 8e8918c 2f23320 8e8918c |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 |
import gradio as gr
import os
import cv2
from encoded_video import EncodedVideo, write_video
import torch
import numpy as np
from torchvision.datasets import ImageFolder
import transformers
from transformers import ViTFeatureExtractor, ViTForImageClassification, AutoFeatureExtractor, ViTMSNForImageClassification
from pathlib import Path
import pytorch_lightning as pl
from torch.utils.data import DataLoader
from torchmetrics import Accuracy
from torchvision import transforms
from PIL import Image
import PIL
os.environ['SHM_SIZE'] = '2G'
HF_DATASETS_CACHE="./"
class ImageClassificationCollator:
def __init__(self, feature_extractor):
self.feature_extractor = feature_extractor
def __call__(self, batch):
encodings = self.feature_extractor([x[0] for x in batch], return_tensors='pt')
encodings['labels'] = torch.tensor([x[1] for x in batch], dtype=torch.long)
return encodings
class Classifier(pl.LightningModule):
def __init__(self, model, lr: float = 2e-5, **kwargs):
super().__init__()
self.save_hyperparameters('lr', *list(kwargs))
self.model = model
self.forward = self.model.forward
self.val_acc = Accuracy(
task='multiclass' if model.config.num_labels > 2 else 'binary',
num_classes=model.config.num_labels
)
def training_step(self, batch, batch_idx):
outputs = self(**batch)
self.log(f"train_loss", outputs.loss)
return outputs.loss
def validation_step(self, batch, batch_idx):
outputs = self(**batch)
self.log(f"val_loss", outputs.loss)
acc = self.val_acc(outputs.logits.argmax(1), batch['labels'])
self.log(f"val_acc", acc, prog_bar=True)
return outputs.loss
def configure_optimizers(self):
return torch.optim.Adam(self.parameters(), lr=self.hparams.lr)
def video_identity(video,user_name,class_name,trainortest,ready):
if ready=='yes':
data_dir = Path(str(user_name)+'/train')
train_ds = ImageFolder(data_dir)
test_dir = Path(str(user_name)+'/test')
test_ds = ImageFolder(test_dir)
label2id = {}
id2label = {}
for i, class_name in enumerate(train_ds.classes):
label2id[class_name] = str(i)
id2label[str(i)] = class_name
feature_extractor = ViTFeatureExtractor.from_pretrained('google/vit-base-patch16-224-in21k')
model = ViTForImageClassification.from_pretrained(
'google/vit-base-patch16-224-in21k',
num_labels=len(label2id),
label2id=label2id,
id2label=id2label
)
collator = ImageClassificationCollator(feature_extractor)
train_loader = DataLoader(train_ds, batch_size=8, collate_fn=collator, num_workers=8, shuffle=True)
test_loader = DataLoader(test_ds, batch_size=8, collate_fn=collator, num_workers=8)
val_batch = next(iter(test_loader))
outputs = model(**val_batch)
preds=outputs.logits.softmax(1).argmax(1)
for name, param in model.named_parameters():
param.requires_grad = False
if name.startswith("classifier"): # choose whatever you like here
param.requires_grad = True
pl.seed_everything(42)
classifier = Classifier(model, lr=2e-5)
trainer = pl.Trainer(accelerator='gpu', devices=1, precision=16, max_epochs=30)
trainer.fit(classifier, train_loader, test_loader)
threshold = 0.7 # set the score threshold
for batch_idx, data in enumerate(test_loader):
outputs = model(**data)
scores = outputs.logits.softmax(1)
print(scores)
preds = []
for score in scores:
if score.max() > threshold:
preds.append(str(score.argmax().item()))
else:
preds.append('None')
print(preds)
labels = str(data['labels'])
return outputs, preds, preds
else:
capture = cv2.VideoCapture(video)
user_d=str(user_name)+'/'+str(trainortest)
class_d=str(user_name)+'/'+str(trainortest)+'/'+str(class_name)
if not os.path.exists(user_d):
os.makedirs(user_d)
if not os.path.exists(class_d):
os.makedirs(class_d)
frameNr = 0
while (True):
success, frame = capture.read()
if success:
cv2.imwrite(f'{class_d}/frame_{frameNr}.jpg', frame)
else:
break
frameNr = frameNr+10
a=pl.__version__
img=cv2.imread(class_d+'/frame_0.jpg')
return img, a, class_d
demo = gr.Interface(video_identity,
inputs=[gr.Video(source='upload'),
gr.Text(),
gr.Text(),
gr.Text(label='Which set is this? (type train or test)'),
gr.Text(label='Are you ready? (type yes or no)')],
outputs=[gr.Text(),
gr.Text(),
gr.Text()],
cache_examples=True)
demo.launch(debug=True) |