from transformers import AutoTokenizer, AutoModelForQuestionAnswering import torch def loadSqueeze(): tokenizer = AutoTokenizer.from_pretrained("ALOQAS/squeezebert-uncased-finetuned-squad-v2") model = AutoModelForQuestionAnswering.from_pretrained("ALOQAS/squeezebert-uncased-finetuned-squad-v2") return tokenizer, model def squeezebert(context, question, model, tokenizer): # Define the specific model and tokenizer for SqueezeBERT # Tokenize the input question-context pair inputs = tokenizer.encode_plus(question, context, max_length=512, truncation=True, padding=True, return_tensors='pt') # Send inputs to the same device as your model inputs = {k: v.to(model.device) for k, v in inputs.items()} with torch.no_grad(): # Forward pass, get model outputs outputs = model(**inputs) # Extract the start and end positions of the answer in the tokens answer_start_scores, answer_end_scores = outputs.start_logits, outputs.end_logits answer_start_index = torch.argmax(answer_start_scores) # Most likely start of answer answer_end_index = torch.argmax(answer_end_scores) + 1 # Most likely end of answer; +1 for inclusive slicing # Convert token indices to the actual answer text answer_tokens = inputs['input_ids'][0, answer_start_index:answer_end_index] answer = tokenizer.decode(answer_tokens, skip_special_tokens=True) return {"answer": answer, "start": answer_start_index.item(), "end": answer_end_index.item()} def bert(context, question, pip): return pip(context=context, question=question) def deberta(context, question, pip): return pip(context=context, question=question)