Telugu-LLM-Labs commited on
Commit
ee8d9eb
1 Parent(s): 3a94748

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -4
README.md CHANGED
@@ -50,7 +50,7 @@ max_seq_length = 2048
50
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
51
  load_in_4bit = False
52
  model, tokenizer = FastLanguageModel.from_pretrained(
53
- model_name = "Telugu-LLM-Labs/gemma_7b_10_lang_finetuned",
54
  max_seq_length = max_seq_length,
55
  dtype = dtype,
56
  load_in_4bit = load_in_4bit,
@@ -87,11 +87,11 @@ from peft import AutoPeftModelForCausalLM
87
  from transformers import AutoTokenizer
88
 
89
  model = AutoPeftModelForCausalLM.from_pretrained(
90
- "Telugu-LLM-Labs/Telugu-gemma-7b-finetuned-sft",
91
  load_in_4bit = False,
92
  token = hf_token
93
  )
94
- tokenizer = AutoTokenizer.from_pretrained("Telugu-LLM-Labs/Telugu-gemma-7b-finetuned-sft")
95
 
96
  input_prompt = """
97
  ### Instruction:
@@ -115,7 +115,7 @@ outputs = model.generate(**inputs, max_new_tokens = 300, use_cache = True)
115
  response = tokenizer.batch_decode(outputs)[0]
116
  ```
117
 
118
- Refer to the [blog post]() for sample examples.
119
 
120
  ------------------------------------------------------------------------------------------------------------------------------------
121
 
 
50
  dtype = None # None for auto detection. Float16 for Tesla T4, V100, Bfloat16 for Ampere+
51
  load_in_4bit = False
52
  model, tokenizer = FastLanguageModel.from_pretrained(
53
+ model_name = "Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa",
54
  max_seq_length = max_seq_length,
55
  dtype = dtype,
56
  load_in_4bit = load_in_4bit,
 
87
  from transformers import AutoTokenizer
88
 
89
  model = AutoPeftModelForCausalLM.from_pretrained(
90
+ "Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa",
91
  load_in_4bit = False,
92
  token = hf_token
93
  )
94
+ tokenizer = AutoTokenizer.from_pretrained("Telugu-LLM-Labs/Indic-gemma-2b-finetuned-sft-Navarasa")
95
 
96
  input_prompt = """
97
  ### Instruction:
 
115
  response = tokenizer.batch_decode(outputs)[0]
116
  ```
117
 
118
+ Refer to the [blog post](https://ravidesetty.medium.com/introducing-indic-gemma-7b-2b-instruction-tuned-model-on-9-indian-languages-navarasa-86bc81b4a282) for sample examples.
119
 
120
  ------------------------------------------------------------------------------------------------------------------------------------
121