from transformers import AutoTokenizer, AutoModelForCausalLM import os import huggingface_hub # Obtener token de Hugging Face token = os.environ.get("HUGGINGFACE_HUB_TOKEN", None) huggingface_hub.login(token=token) # ID del modelo model_id = "CohereForAI/aya-23-35B" # Cargar tokenizador y modelo tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) # Formatear mensaje con la plantilla de chat messages = [{"role": "user", "content": "Anneme onu ne kadar sevdiğimi anlatan bir mektup yaz"}] input_ids = tokenizer.apply_chat_template(messages, tokenize=True, add_generation_prompt=True, return_tensors="pt") # Generar texto gen_tokens = model.generate( input_ids, max_new_tokens=100, do_sample=True, temperature=0.3, force_download=True ) # Decodificar tokens generados gen_text = tokenizer.decode(gen_tokens[0], skip_special_tokens=True) print(gen_text)