Fouzi Takelait commited on
Commit
0dfd702
1 Parent(s): b1c0f8d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +3 -11
app.py CHANGED
@@ -23,11 +23,7 @@ from transformer_mt_roberta.modeling_transformer_final import TransfomerEncoderD
23
  # source_tokenizer = PreTrainedTokenizerFast.from_pretrained("da_en_output_dir/da_tokenizer")
24
  # target_tokenizer = PreTrainedTokenizerFast.from_pretrained("da_en_output_dir/en_tokenizer")
25
  # model = TransfomerEncoderDecoderModel.from_pretrained("da_en_output_dir")
26
- <<<<<<< HEAD
27
- #
28
- =======
29
- #
30
- >>>>>>> adb80531e202c58b4ab91375bc391ab50bbc882f
31
  # input_ids = source_tokenizer.encode(text_in, return_tensors="pt")
32
  # output_ids = model.generate(
33
  # input_ids,
@@ -36,11 +32,7 @@ from transformer_mt_roberta.modeling_transformer_final import TransfomerEncoderD
36
  # eos_token_id=target_tokenizer.eos_token_id,
37
  # pad_token_id=target_tokenizer.pad_token_id,
38
  # )
39
- <<<<<<< HEAD
40
- #
41
- =======
42
- #
43
- >>>>>>> adb80531e202c58b4ab91375bc391ab50bbc882f
44
  # return target_tokenizer.decode(output_ids[0])
45
 
46
  def translator_fn_roberta(text_in):
@@ -49,7 +41,7 @@ def translator_fn_roberta(text_in):
49
  model_pretrained_roberta = mt_roberta.from_pretrained("da_en_RoBERTa_pretrained")
50
 
51
  input_ids_pretrained_roberta = source_tokenizer_pretrained_roberta.encode(text_in, return_tensors="pt")
52
- output_ids_pretrained_roberta = input_ids_pretrained_roberta.generate(
53
  input_ids_pretrained_roberta,
54
  max_length=10,
55
  bos_token_id=target_tokenizer_pretrained_roberta.bos_token_id,
 
23
  # source_tokenizer = PreTrainedTokenizerFast.from_pretrained("da_en_output_dir/da_tokenizer")
24
  # target_tokenizer = PreTrainedTokenizerFast.from_pretrained("da_en_output_dir/en_tokenizer")
25
  # model = TransfomerEncoderDecoderModel.from_pretrained("da_en_output_dir")
26
+
 
 
 
 
27
  # input_ids = source_tokenizer.encode(text_in, return_tensors="pt")
28
  # output_ids = model.generate(
29
  # input_ids,
 
32
  # eos_token_id=target_tokenizer.eos_token_id,
33
  # pad_token_id=target_tokenizer.pad_token_id,
34
  # )
35
+
 
 
 
 
36
  # return target_tokenizer.decode(output_ids[0])
37
 
38
  def translator_fn_roberta(text_in):
 
41
  model_pretrained_roberta = mt_roberta.from_pretrained("da_en_RoBERTa_pretrained")
42
 
43
  input_ids_pretrained_roberta = source_tokenizer_pretrained_roberta.encode(text_in, return_tensors="pt")
44
+ output_ids_pretrained_roberta = model_pretrained_roberta.generate(
45
  input_ids_pretrained_roberta,
46
  max_length=10,
47
  bos_token_id=target_tokenizer_pretrained_roberta.bos_token_id,