shahrukhx01 commited on
Commit
a8979ba
1 Parent(s): 42f0d65

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +2 -2
README.md CHANGED
@@ -13,10 +13,10 @@ tokenizer = BartTokenizer.from_pretrained('shahrukhx01/schema-aware-distilbart-c
13
  ## add NL query with table schema
14
  question = "What is terrence ross' nationality? </s> <col0> Player : text <col1> No. : text <col2> Nationality : text <col3> Position : text <col4> Years in Toronto : text <col5> School/Club Team : text"
15
 
16
- inputs = tokenizer([question], max_length=1024, min_length=0, return_tensors='pt')
17
 
18
  # Generate SQL
19
- text_query_ids = model.generate(inputs['input_ids'], num_beams=4, max_length=125, early_stopping=True)
20
  prediction = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in text_query_ids][0]
21
  print(prediction)
22
  ```
 
13
  ## add NL query with table schema
14
  question = "What is terrence ross' nationality? </s> <col0> Player : text <col1> No. : text <col2> Nationality : text <col3> Position : text <col4> Years in Toronto : text <col5> School/Club Team : text"
15
 
16
+ inputs = tokenizer([question], max_length=1024, return_tensors='pt')
17
 
18
  # Generate SQL
19
+ text_query_ids = model.generate(inputs['input_ids'], num_beams=4, min_length=0, max_length=125, early_stopping=True)
20
  prediction = [tokenizer.decode(g, skip_special_tokens=True, clean_up_tokenization_spaces=False) for g in text_query_ids][0]
21
  print(prediction)
22
  ```