Update README.md
Browse files
README.md
CHANGED
@@ -46,15 +46,15 @@ The LLM was trained on a subset for 5000 samples of the bjoernp/tagesschau-2018-
|
|
46 |
|
47 |
|
48 |
|
49 |
-
## How to Get Started with the Model
|
50 |
-
|
51 |
# Load model directly
|
|
|
52 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
53 |
|
54 |
tokenizer = AutoTokenizer.from_pretrained("Kamilatr/Ueberschriftengenerator_LEOLM", trust_remote_code=True)
|
55 |
model = AutoModelForCausalLM.from_pretrained("Kamilatr/Ueberschriftengenerator_LEOLM", trust_remote_code=True)
|
56 |
-
|
57 |
# Use a pipeline as a high-level helper
|
|
|
58 |
from transformers import pipeline
|
59 |
|
60 |
pipe = pipeline("text-generation", model="Kamilatr/Ueberschriftengenerator_LEOLM", trust_remote_code=True)
|
@@ -64,10 +64,10 @@ pipe = pipeline("text-generation", model="Kamilatr/Ueberschriftengenerator_LEOLM
|
|
64 |
|
65 |
The LeoLM Model was fine tuned with LoRA.
|
66 |
|
67 |
-
|
68 |
|
69 |
#### Speeds, Sizes, Times
|
70 |
-
|
71 |
training_arguments = TrainingArguments(
|
72 |
output_dir="./results",
|
73 |
evaluation_strategy="epoch",
|
@@ -83,7 +83,7 @@ training_arguments = TrainingArguments(
|
|
83 |
warmup_steps=100,
|
84 |
lr_scheduler_type="constant",
|
85 |
)
|
86 |
-
|
87 |
|
88 |
|
89 |
## Evaluation and Testing
|
|
|
46 |
|
47 |
|
48 |
|
|
|
|
|
49 |
# Load model directly
|
50 |
+
```
|
51 |
from transformers import AutoTokenizer, AutoModelForCausalLM
|
52 |
|
53 |
tokenizer = AutoTokenizer.from_pretrained("Kamilatr/Ueberschriftengenerator_LEOLM", trust_remote_code=True)
|
54 |
model = AutoModelForCausalLM.from_pretrained("Kamilatr/Ueberschriftengenerator_LEOLM", trust_remote_code=True)
|
55 |
+
```
|
56 |
# Use a pipeline as a high-level helper
|
57 |
+
```
|
58 |
from transformers import pipeline
|
59 |
|
60 |
pipe = pipeline("text-generation", model="Kamilatr/Ueberschriftengenerator_LEOLM", trust_remote_code=True)
|
|
|
64 |
|
65 |
The LeoLM Model was fine tuned with LoRA.
|
66 |
|
67 |
+
```
|
68 |
|
69 |
#### Speeds, Sizes, Times
|
70 |
+
```
|
71 |
training_arguments = TrainingArguments(
|
72 |
output_dir="./results",
|
73 |
evaluation_strategy="epoch",
|
|
|
83 |
warmup_steps=100,
|
84 |
lr_scheduler_type="constant",
|
85 |
)
|
86 |
+
```
|
87 |
|
88 |
|
89 |
## Evaluation and Testing
|