# from transformers import MT5Tokenizer, MT5ForConditionalGeneration # local_model_path = "./t5m_pocet" # tokenizer = MT5Tokenizer.from_pretrained(local_model_path, legacy=False) # model = MT5ForConditionalGeneration.from_pretrained(local_model_path) model_directory = './t5m_pocet/model_directory' from transformers import MT5ForConditionalGeneration, MT5Tokenizer import torch tokenizer = MT5Tokenizer(sp_model_kwargs={"model_file": f"{model_directory}/spiece.model"}) # local_model_path = "./t5m_pocet" # tokenizer = MT5Tokenizer.from_pretrained(local_model_path, legacy=False) # model = MT5ForConditionalGeneration.from_pretrained(local_model_path) # Загрузка токенизатора и модели mT5 # model_name = "google/mt5-small" # tokenizer = MT5Tokenizer.from_pretrained(model_name) # model = MT5ForConditionalGeneration.from_pretrained(model_name) # # Пример входных данных # context = "Контекст, на основе которого нужно ответить на вопрос." # question = "Какой вопрос нужно задать?" # # Форматирование входных данных для модели # input_text = f"question: {question} context: {context}" # input_ids = tokenizer(input_text, return_tensors="pt").input_ids # # Генерация ответа # outputs = model.generate(input_ids) # answer = tokenizer.decode(outputs[0], skip_special_tokens=True) # print(answer)