huzey commited on
Commit
1075d8a
1 Parent(s): 8575416

update gpu

Browse files
Files changed (1) hide show
  1. app.py +7 -17
app.py CHANGED
@@ -13,7 +13,6 @@ import gradio as gr
13
  import spaces
14
 
15
  USE_CUDA = torch.cuda.is_available()
16
-
17
  print("CUDA is available:", USE_CUDA)
18
 
19
  def transform_images(images, resolution=(1024, 1024)):
@@ -142,7 +141,6 @@ class MobileSAM(nn.Module):
142
 
143
  mobilesam = MobileSAM()
144
 
145
- @spaces.GPU(duration=60)
146
  def image_mobilesam_feature(
147
  images,
148
  node_type="block",
@@ -172,10 +170,9 @@ def image_mobilesam_feature(
172
  }
173
  out = out_dict[node_type]
174
  out = out[layer]
175
- outputs.append(out.cpu())
176
  outputs = torch.cat(outputs, dim=0)
177
 
178
- mobilesam = mobilesam.cpu()
179
  return outputs
180
 
181
 
@@ -246,7 +243,6 @@ class SAM(torch.nn.Module):
246
 
247
  sam = SAM()
248
 
249
- @spaces.GPU(duration=120)
250
  def image_sam_feature(
251
  images,
252
  node_type="block",
@@ -274,10 +270,9 @@ def image_sam_feature(
274
  }
275
  out = out_dict[node_type]
276
  out = out[layer]
277
- outputs.append(out.cpu())
278
  outputs = torch.cat(outputs, dim=0)
279
 
280
- sam = sam.cpu()
281
 
282
  return outputs
283
 
@@ -326,7 +321,6 @@ class DiNOv2(torch.nn.Module):
326
 
327
  dinov2 = DiNOv2()
328
 
329
- @spaces.GPU(duration=60)
330
  def image_dino_feature(images, node_type="block", layer=-1):
331
  global USE_CUDA
332
  if USE_CUDA:
@@ -350,11 +344,10 @@ def image_dino_feature(images, node_type="block", layer=-1):
350
  }
351
  out = out_dict[node_type]
352
  out = out[layer]
353
- outputs.append(out.cpu())
354
  outputs = torch.cat(outputs, dim=0)
355
  outputs = rearrange(outputs[:, 5:, :], "b (h w) c -> b h w c", h=32, w=32)
356
 
357
- dinov2 = dinov2.cpu()
358
  return outputs
359
 
360
 
@@ -423,7 +416,6 @@ class CLIP(torch.nn.Module):
423
 
424
  clip = CLIP()
425
 
426
- @spaces.GPU(duration=60)
427
  def image_clip_feature(
428
  images, node_type="block", layer=-1
429
  ):
@@ -449,10 +441,9 @@ def image_clip_feature(
449
  }
450
  out = out_dict[node_type]
451
  out = out[layer]
452
- outputs.append(out.cpu())
453
  outputs = torch.cat(outputs, dim=0)
454
 
455
- clip = clip.cpu()
456
  return outputs
457
 
458
 
@@ -514,7 +505,6 @@ def run_model_on_image(images, model_name="sam", node_type="block", layer=-1):
514
  else:
515
  raise ValueError(f"Model {model_name} not supported.")
516
 
517
- USE_CUDA = False
518
  return result
519
 
520
  def extract_features(images, model_name="MobileSAM", node_type="block", layer=-1):
@@ -550,14 +540,14 @@ def compute_ncut(
550
  knn_tsne=10,
551
  num_sample_tsne=1000,
552
  perplexity=500,
553
- ):
554
  from ncut_pytorch import NCUT, rgb_from_tsne_3d
555
 
556
  start = time.time()
557
  eigvecs, eigvals = NCUT(
558
  num_eig=num_eig,
559
  num_sample=num_sample_ncut,
560
- device="cpu",
561
  affinity_focal_gamma=affinity_focal_gamma,
562
  knn=knn_ncut,
563
  ).fit_transform(features.reshape(-1, features.shape[-1]))
@@ -595,7 +585,7 @@ def to_pil_images(images):
595
  for image in images
596
  ]
597
 
598
-
599
  def main_fn(
600
  images,
601
  model_name="SAM(sam_vit_b)",
 
13
  import spaces
14
 
15
  USE_CUDA = torch.cuda.is_available()
 
16
  print("CUDA is available:", USE_CUDA)
17
 
18
  def transform_images(images, resolution=(1024, 1024)):
 
141
 
142
  mobilesam = MobileSAM()
143
 
 
144
  def image_mobilesam_feature(
145
  images,
146
  node_type="block",
 
170
  }
171
  out = out_dict[node_type]
172
  out = out[layer]
173
+ outputs.append(out)
174
  outputs = torch.cat(outputs, dim=0)
175
 
 
176
  return outputs
177
 
178
 
 
243
 
244
  sam = SAM()
245
 
 
246
  def image_sam_feature(
247
  images,
248
  node_type="block",
 
270
  }
271
  out = out_dict[node_type]
272
  out = out[layer]
273
+ outputs.append(out)
274
  outputs = torch.cat(outputs, dim=0)
275
 
 
276
 
277
  return outputs
278
 
 
321
 
322
  dinov2 = DiNOv2()
323
 
 
324
  def image_dino_feature(images, node_type="block", layer=-1):
325
  global USE_CUDA
326
  if USE_CUDA:
 
344
  }
345
  out = out_dict[node_type]
346
  out = out[layer]
347
+ outputs.append(out)
348
  outputs = torch.cat(outputs, dim=0)
349
  outputs = rearrange(outputs[:, 5:, :], "b (h w) c -> b h w c", h=32, w=32)
350
 
 
351
  return outputs
352
 
353
 
 
416
 
417
  clip = CLIP()
418
 
 
419
  def image_clip_feature(
420
  images, node_type="block", layer=-1
421
  ):
 
441
  }
442
  out = out_dict[node_type]
443
  out = out[layer]
444
+ outputs.append(out)
445
  outputs = torch.cat(outputs, dim=0)
446
 
 
447
  return outputs
448
 
449
 
 
505
  else:
506
  raise ValueError(f"Model {model_name} not supported.")
507
 
 
508
  return result
509
 
510
  def extract_features(images, model_name="MobileSAM", node_type="block", layer=-1):
 
540
  knn_tsne=10,
541
  num_sample_tsne=1000,
542
  perplexity=500,
543
+ ):
544
  from ncut_pytorch import NCUT, rgb_from_tsne_3d
545
 
546
  start = time.time()
547
  eigvecs, eigvals = NCUT(
548
  num_eig=num_eig,
549
  num_sample=num_sample_ncut,
550
+ device="cuda" if USE_CUDA else "cpu",
551
  affinity_focal_gamma=affinity_focal_gamma,
552
  knn=knn_ncut,
553
  ).fit_transform(features.reshape(-1, features.shape[-1]))
 
585
  for image in images
586
  ]
587
 
588
+ @spaces.GPU(duration=60)
589
  def main_fn(
590
  images,
591
  model_name="SAM(sam_vit_b)",