Ubuntu commited on
Commit
bc96ace
1 Parent(s): 8ddd85c
Files changed (2) hide show
  1. .ipynb_checkpoints/app-checkpoint.py +5 -10
  2. app.py +5 -10
.ipynb_checkpoints/app-checkpoint.py CHANGED
@@ -38,16 +38,11 @@ class Classifier(pl.LightningModule):
38
  num_classes=model.config.num_labels
39
  )
40
 
41
- # def training_step(self, batch, batch_idx):
42
- # outputs = self(**batch)
43
- # self.log(f"train_loss", outputs.loss)
44
- # return outputs.loss
45
  def training_step(self, batch, batch_idx):
46
  outputs = self(**batch)
47
- loss = outputs.loss.float() # Convert to float
48
- self.log(f"train_loss", loss)
49
- return loss
50
-
51
  def validation_step(self, batch, batch_idx):
52
  outputs = self(**batch)
53
  self.log(f"val_loss", outputs.loss)
@@ -141,9 +136,9 @@ def video_identity(video,user_name,class_name,trainortest,ready):
141
  break
142
 
143
  frameNr = frameNr+10
144
-
145
  img=cv2.imread(class_d+'/frame_0.jpg')
146
- return img, user_d, class_d
147
  demo = gr.Interface(video_identity,
148
  inputs=[gr.Video(source='upload'),
149
  gr.Text(),
 
38
  num_classes=model.config.num_labels
39
  )
40
 
 
 
 
 
41
  def training_step(self, batch, batch_idx):
42
  outputs = self(**batch)
43
+ self.log(f"train_loss", outputs.loss)
44
+ return outputs.loss
45
+
 
46
  def validation_step(self, batch, batch_idx):
47
  outputs = self(**batch)
48
  self.log(f"val_loss", outputs.loss)
 
136
  break
137
 
138
  frameNr = frameNr+10
139
+ a=pl.__version__
140
  img=cv2.imread(class_d+'/frame_0.jpg')
141
+ return img, a, class_d
142
  demo = gr.Interface(video_identity,
143
  inputs=[gr.Video(source='upload'),
144
  gr.Text(),
app.py CHANGED
@@ -38,16 +38,11 @@ class Classifier(pl.LightningModule):
38
  num_classes=model.config.num_labels
39
  )
40
 
41
- # def training_step(self, batch, batch_idx):
42
- # outputs = self(**batch)
43
- # self.log(f"train_loss", outputs.loss)
44
- # return outputs.loss
45
  def training_step(self, batch, batch_idx):
46
  outputs = self(**batch)
47
- loss = outputs.loss.float() # Convert to float
48
- self.log(f"train_loss", loss)
49
- return loss
50
-
51
  def validation_step(self, batch, batch_idx):
52
  outputs = self(**batch)
53
  self.log(f"val_loss", outputs.loss)
@@ -141,9 +136,9 @@ def video_identity(video,user_name,class_name,trainortest,ready):
141
  break
142
 
143
  frameNr = frameNr+10
144
-
145
  img=cv2.imread(class_d+'/frame_0.jpg')
146
- return img, user_d, class_d
147
  demo = gr.Interface(video_identity,
148
  inputs=[gr.Video(source='upload'),
149
  gr.Text(),
 
38
  num_classes=model.config.num_labels
39
  )
40
 
 
 
 
 
41
  def training_step(self, batch, batch_idx):
42
  outputs = self(**batch)
43
+ self.log(f"train_loss", outputs.loss)
44
+ return outputs.loss
45
+
 
46
  def validation_step(self, batch, batch_idx):
47
  outputs = self(**batch)
48
  self.log(f"val_loss", outputs.loss)
 
136
  break
137
 
138
  frameNr = frameNr+10
139
+ a=pl.__version__
140
  img=cv2.imread(class_d+'/frame_0.jpg')
141
+ return img, a, class_d
142
  demo = gr.Interface(video_identity,
143
  inputs=[gr.Video(source='upload'),
144
  gr.Text(),