moranyanuka commited on
Commit
e6d0d56
1 Parent(s): 8c920e3

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +3 -3
README.md CHANGED
@@ -23,7 +23,7 @@ import requests
23
  from PIL import Image
24
  from transformers import BlipProcessor, BlipForConditionalGeneration
25
 
26
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
27
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-large-mocha")
28
 
29
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
@@ -56,7 +56,7 @@ import requests
56
  from PIL import Image
57
  from transformers import BlipProcessor, BlipForConditionalGeneration
58
 
59
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
60
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-large-mocha").to("cuda")
61
 
62
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
@@ -88,7 +88,7 @@ import requests
88
  from PIL import Image
89
  from transformers import BlipProcessor, BlipForConditionalGeneration
90
 
91
- processor = BlipProcessor.from_pretrained("Salesforce/blip-image-captioning-large")
92
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-large-mocha", torch_dtype=torch.float16).to("cuda")
93
 
94
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
 
23
  from PIL import Image
24
  from transformers import BlipProcessor, BlipForConditionalGeneration
25
 
26
+ processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-large-mocha")
27
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-large-mocha")
28
 
29
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
 
56
  from PIL import Image
57
  from transformers import BlipProcessor, BlipForConditionalGeneration
58
 
59
+ processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-large-mocha")
60
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-large-mocha").to("cuda")
61
 
62
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'
 
88
  from PIL import Image
89
  from transformers import BlipProcessor, BlipForConditionalGeneration
90
 
91
+ processor = BlipProcessor.from_pretrained("moranyanuka/blip-image-captioning-large-mocha")
92
  model = BlipForConditionalGeneration.from_pretrained("moranyanuka/blip-image-captioning-large-mocha", torch_dtype=torch.float16).to("cuda")
93
 
94
  img_url = 'https://storage.googleapis.com/sfr-vision-language-research/BLIP/demo.jpg'