File size: 1,685 Bytes
00837ec
 
c37d535
00837ec
 
 
 
 
 
c37d535
00837ec
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
b4d0d8e
00837ec
a88183c
c37d535
00837ec
a88183c
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
from transformers import AutoTokenizer, AutoModelForQuestionAnswering
import torch

def loadSqueeze():
    tokenizer = AutoTokenizer.from_pretrained("ALOQAS/squeezebert-uncased-finetuned-squad-v2")
    model = AutoModelForQuestionAnswering.from_pretrained("ALOQAS/squeezebert-uncased-finetuned-squad-v2")
    return tokenizer, model

def squeezebert(context, question, model, tokenizer):
    # Define the specific model and tokenizer for SqueezeBERT
        # Tokenize the input question-context pair
    inputs = tokenizer.encode_plus(question, context, max_length=512, truncation=True, padding=True, return_tensors='pt') 

    # Send inputs to the same device as your model
    inputs = {k: v.to(model.device) for k, v in inputs.items()}

    with torch.no_grad():
        # Forward pass, get model outputs
        outputs = model(**inputs)

    # Extract the start and end positions of the answer in the tokens
    answer_start_scores, answer_end_scores = outputs.start_logits, outputs.end_logits
    answer_start_index = torch.argmax(answer_start_scores)  # Most likely start of answer
    answer_end_index = torch.argmax(answer_end_scores) + 1  # Most likely end of answer; +1 for inclusive slicing

    # Convert token indices to the actual answer text
    answer_tokens = inputs['input_ids'][0, answer_start_index:answer_end_index]
    answer = tokenizer.decode(answer_tokens, skip_special_tokens=True)
    return {"answer": answer, "start": answer_start_index.item(), "end": answer_end_index.item()}



def bert(context, question, pip):
    return pip(context=context, question=question)

def deberta(context, question, pip):
    return pip(context=context, question=question)