peterpeter8585 commited on
Commit
9e6bd63
1 Parent(s): 6916605

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +2 -6
app.py CHANGED
@@ -77,7 +77,7 @@ def respond1(
77
  yield response
78
  def respond0(multimodal_input,password):
79
  if password==password1:
80
- if multimodal_input["files"] ==not None:
81
 
82
  images = multimodal_input["files"]
83
  content = [{"type": "image"} for _ in images]
@@ -89,11 +89,7 @@ def respond0(multimodal_input,password):
89
  model_id = "HuggingFaceM4/idefics2-8b"
90
 
91
  processor = AutoProcessor.from_pretrained(model_id)
92
- model = AutoModelForVision2Seq.from_pretrained(
93
- "HuggingFaceM4/idefics2-8b",
94
- torch_dtype=torch.float16,
95
- quantization_config=quantization_config
96
- ).to("cpu")
97
  prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
98
  inputs = processor(text=prompt, images=[images], return_tensors="pt")
99
  inputs = {k: v.to(model.device) for k, v in inputs.items()}
 
77
  yield response
78
  def respond0(multimodal_input,password):
79
  if password==password1:
80
+ if multimodal_input["files"] == not None:
81
 
82
  images = multimodal_input["files"]
83
  content = [{"type": "image"} for _ in images]
 
89
  model_id = "HuggingFaceM4/idefics2-8b"
90
 
91
  processor = AutoProcessor.from_pretrained(model_id)
92
+ model = AutoModelForVision2Seq.from_pretrained("HuggingFaceM4/idefics2-8b",torch_dtype=torch.float16,quantization_config=quantization_config).to("cpu")
 
 
 
 
93
  prompt = processor.apply_chat_template(messages, add_generation_prompt=True)
94
  inputs = processor(text=prompt, images=[images], return_tensors="pt")
95
  inputs = {k: v.to(model.device) for k, v in inputs.items()}