Update README.md
Browse files
README.md
CHANGED
@@ -47,13 +47,12 @@ from io import BytesIO
|
|
47 |
from transformers import AutoProcessor, AutoModelForVision2Seq
|
48 |
from transformers.image_utils import load_image
|
49 |
|
50 |
-
|
|
|
51 |
|
52 |
# Note that passing the image urls (instead of the actual pil images) to the processor is also possible
|
53 |
image = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
|
54 |
|
55 |
-
device = "cuda" if torch.cuda.is_available() else "cpu"
|
56 |
-
|
57 |
model_id = "cmarkea/idefics2-8b-ft-docvqa-lora"
|
58 |
processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b", do_image_splitting=False)
|
59 |
model = AutoModelForVision2Seq.from_pretrained(
|
|
|
47 |
from transformers import AutoProcessor, AutoModelForVision2Seq
|
48 |
from transformers.image_utils import load_image
|
49 |
|
50 |
+
|
51 |
+
device = "cuda" if torch.cuda.is_available() else "cpu"
|
52 |
|
53 |
# Note that passing the image urls (instead of the actual pil images) to the processor is also possible
|
54 |
image = load_image("https://cdn.britannica.com/61/93061-050-99147DCE/Statue-of-Liberty-Island-New-York-Bay.jpg")
|
55 |
|
|
|
|
|
56 |
model_id = "cmarkea/idefics2-8b-ft-docvqa-lora"
|
57 |
processor = AutoProcessor.from_pretrained("HuggingFaceM4/idefics2-8b", do_image_splitting=False)
|
58 |
model = AutoModelForVision2Seq.from_pretrained(
|