wissamantoun commited on
Commit
3e0a6d8
1 Parent(s): 5ead831

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +4 -20
README.md CHANGED
@@ -13,6 +13,8 @@ widget:
13
 
14
  # Arabic GPT2
15
 
 
 
16
  You can find more information in our paper [AraGPT2](https://arxiv.org/abs/2012.15520)
17
 
18
  The code in this repository was used to train all GPT2 variants. The code support training and fine-tuning GPT2 on GPUs and TPUs via the TPUEstimator API.
@@ -38,7 +40,7 @@ from arabert.aragpt2.grover.modeling_gpt2 import GPT2LMHeadModel
38
 
39
  from arabert.preprocess import ArabertPreprocessor
40
 
41
- MODEL_NAME='aragpt2-base'
42
  arabert_prep = ArabertPreprocessor(model_name=MODEL_NAME)
43
 
44
  text=""
@@ -73,25 +75,7 @@ python create_pretraining_data.py
73
 
74
  Finetuning:
75
  ```bash
76
- python3 run_pretraining.py \
77
- --input_file="gs://<GS_BUCKET>/pretraining_data/*" \
78
- --output_dir="gs://<GS_BUCKET>/pretraining_model/" \
79
- --config_file="config/small_hparams.json" \
80
- --batch_size=128 \
81
- --eval_batch_size=8 \
82
- --num_train_steps= \
83
- --num_warmup_steps= \
84
- --learning_rate= \
85
- --save_checkpoints_steps= \
86
- --max_seq_length=1024 \
87
- --max_eval_steps= \
88
- --optimizer="lamb" \
89
- --iterations_per_loop=5000 \
90
- --keep_checkpoint_max=10 \
91
- --use_tpu=True \
92
- --tpu_name=<TPU NAME> \
93
- --do_train=True \
94
- --do_eval=False
95
  ```
96
  # Model Sizes
97
 
 
13
 
14
  # Arabic GPT2
15
 
16
+ <img src="https://raw.githubusercontent.com/aub-mind/arabert/master/AraGPT2.png" width="100" align="left"/>
17
+
18
  You can find more information in our paper [AraGPT2](https://arxiv.org/abs/2012.15520)
19
 
20
  The code in this repository was used to train all GPT2 variants. The code support training and fine-tuning GPT2 on GPUs and TPUs via the TPUEstimator API.
 
40
 
41
  from arabert.preprocess import ArabertPreprocessor
42
 
43
+ MODEL_NAME='aubmindlab/aragpt2-base'
44
  arabert_prep = ArabertPreprocessor(model_name=MODEL_NAME)
45
 
46
  text=""
 
75
 
76
  Finetuning:
77
  ```bash
78
+ python3 run_pretraining.py \\r\n --input_file="gs://<GS_BUCKET>/pretraining_data/*" \\r\n --output_dir="gs://<GS_BUCKET>/pretraining_model/" \\r\n --config_file="config/small_hparams.json" \\r\n --batch_size=128 \\r\n --eval_batch_size=8 \\r\n --num_train_steps= \\r\n --num_warmup_steps= \\r\n --learning_rate= \\r\n --save_checkpoints_steps= \\r\n --max_seq_length=1024 \\r\n --max_eval_steps= \\r\n --optimizer="lamb" \\r\n --iterations_per_loop=5000 \\r\n --keep_checkpoint_max=10 \\r\n --use_tpu=True \\r\n --tpu_name=<TPU NAME> \\r\n --do_train=True \\r\n --do_eval=False
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
79
  ```
80
  # Model Sizes
81