victorialslocum commited on
Commit
eb14dc8
1 Parent(s): ce01949

update to gradio v3.4

Browse files
Files changed (2) hide show
  1. app.py +112 -144
  2. requirements.txt +1 -1
app.py CHANGED
@@ -1,27 +1,28 @@
 
1
  import spacy
2
  from spacy import displacy
3
- import random
4
  from spacy.tokens import Span
5
- import gradio as gr
6
  import pandas as pd
7
  import base64
 
8
 
9
 
10
  DEFAULT_MODEL = "en_core_web"
11
  DEFAULT_TEXT = "Apple is looking at buying U.K. startup for $1 billion."
12
- DEFAULT_TOK_ATTR = ['idx', 'text', 'pos_', 'lemma_', 'shape_', 'dep_']
13
- DEFAULT_ENTS = ['CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY',
14
- 'NORP', 'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT', 'QUANTITY', 'TIME', 'WORK_OF_ART']
15
- DEFAULT_COLOR = "linear-gradient(90deg, #FFCA74, #7AECEC)"
16
  texts = {"en": DEFAULT_TEXT, "ca": "Apple està buscant comprar una startup del Regne Unit per mil milions de dòlars", "da": "Apple overvejer at købe et britisk startup for 1 milliard dollar.", "de": "Die ganze Stadt ist ein Startup: Shenzhen ist das Silicon Valley für Hardware-Firmen",
17
  "el": "Η άνιση κατανομή του πλούτου και του εισοδήματος, η οποία έχει λάβει τρομερές διαστάσεις, δεν δείχνει τάσεις βελτίωσης.", "es": "Apple está buscando comprar una startup del Reino Unido por mil millones de dólares.", "fi": "Itseajavat autot siirtävät vakuutusvastuun autojen valmistajille", "fr": "Apple cherche à acheter une start-up anglaise pour 1 milliard de dollars", "it": "Apple vuole comprare una startup del Regno Unito per un miliardo di dollari",
18
  "ja": "アップルがイギリスの新興企業を10億ドルで購入を検討", "ko": "애플이 영국의 스타트업을 10억 달러에 인수하는 것을 알아보고 있다.", "lt": "Jaunikis pirmąją vestuvinę naktį iškeitė į areštinės gultą", "nb": "Apple vurderer å kjøpe britisk oppstartfirma for en milliard dollar.", "nl": "Apple overweegt om voor 1 miljard een U.K. startup te kopen",
19
  "pl": "Poczuł przyjemną woń mocnej kawy.", "pt": "Apple está querendo comprar uma startup do Reino Unido por 100 milhões de dólares", "ro": "Apple plănuiește să cumpere o companie britanică pentru un miliard de dolari", "ru": "Apple рассматривает возможность покупки стартапа из Соединённого Королевства за $1 млрд", "sv": "Apple överväger att köpa brittisk startup för 1 miljard dollar.", "zh": "作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。"}
20
-
21
-
22
  button_css = "float: right; --tw-border-opacity: 1; border-color: rgb(229 231 235 / var(--tw-border-opacity)); --tw-gradient-from: rgb(243 244 246 / 0.7); --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to, rgb(243 244 246 / 0)); --tw-gradient-to: rgb(229 231 235 / 0.8); --tw-text-opacity: 1; color: rgb(55 65 81 / var(--tw-text-opacity)); border-width: 1px; --tw-bg-opacity: 1; background-color: rgb(255 255 255 / var(--tw-bg-opacity)); background-image: linear-gradient(to bottom right, var(--tw-gradient-stops)); display: inline-flex; flex: 1 1 0%; align-items: center; justify-content: center; --tw-shadow: 0 1px 2px 0 rgb(0 0 0 / 0.05); --tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color); box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -webkit-appearance: button; border-radius: 0.5rem; padding-top: 0.5rem; padding-bottom: 0.5rem; padding-left: 1rem; padding-right: 1rem; font-size: 1rem; line-height: 1.5rem; font-weight: 600;"
 
 
 
 
23
  NOUN_ATTR = ['text', 'root.text', 'root.dep_', 'root.head.text']
24
 
 
 
 
25
  # get the huggingface models specified in the requirements.txt file
26
  def get_all_models():
27
  with open("requirements.txt") as f:
@@ -93,30 +94,6 @@ def default_token(text, attributes, model):
93
  data.append(tok_data)
94
  return data, model_name
95
 
96
- # returns noun chunks in text
97
- def noun_chunks(text, model):
98
- model_name = model + "_sm"
99
- nlp = spacy.load(model_name)
100
- data = []
101
- doc = nlp(text)
102
- for chunk in doc.noun_chunks:
103
- data.append([chunk.text, chunk.root.text, chunk.root.dep_,
104
- chunk.root.head.text])
105
- data = pd.DataFrame(data, columns=NOUN_ATTR)
106
- return data, model_name
107
-
108
- # returns noun chuncks for the default value
109
- # the return value is not a pandas DataFrame
110
- def default_noun_chunks(text, model):
111
- model_name = model + "_sm"
112
- nlp = spacy.load(model_name)
113
- data = []
114
- doc = nlp(text)
115
- for chunk in doc.noun_chunks:
116
- data.append([chunk.text, chunk.root.text, chunk.root.dep_,
117
- chunk.root.head.text])
118
- return data, model_name
119
-
120
  # Get similarity of two random generated vectors
121
  def random_vectors(text, model):
122
  model_name = model + "_md"
@@ -178,6 +155,30 @@ def span(text, span1, span2, label1, label2, model):
178
  svg = displacy.render(doc, style="span")
179
  return svg, model_name
180
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
181
  # get default text based on language model
182
  def get_text(model):
183
  for i in range(len(models)):
@@ -200,37 +201,27 @@ with demo:
200
  with gr.Column():
201
  gr.Markdown(" ## Choose a language model and the inputted text")
202
  with gr.Row():
203
- with gr.Column():
204
  model_input = gr.Dropdown(
205
  choices=models, value=DEFAULT_MODEL, interactive=True, label="Pretrained Pipelines")
206
- with gr.Column():
207
- gr.Markdown("")
208
- with gr.Column():
209
- gr.Markdown("")
210
- with gr.Column():
211
- gr.Markdown("")
212
  with gr.Row():
213
- with gr.Column():
214
  text_input = gr.Textbox(
215
  value=DEFAULT_TEXT, interactive=True, label="Input Text")
216
- with gr.Column():
217
- gr.Markdown("")
218
- button = gr.Button("Update", variant="primary")
219
  with gr.Box():
220
  with gr.Column():
221
  with gr.Row():
222
- with gr.Column():
223
  gr.Markdown(
224
  "## [🔗 Dependency Parser](https://spacy.io/usage/visualizers#dep)")
225
  gr.Markdown(
226
  "The dependency visualizer shows part-of-speech tags and syntactic dependencies")
227
- with gr.Column():
228
- with gr.Row():
229
- with gr.Column():
230
- gr.Markdown(" ")
231
- with gr.Column():
232
- dep_model = gr.Textbox(
233
- label="Model", value="en_core_web_sm")
234
  with gr.Row():
235
  with gr.Column():
236
  col_punct = gr.Checkbox(
@@ -244,168 +235,145 @@ with demo:
244
  with gr.Column():
245
  text = gr.Textbox(
246
  label="Text Color", value="black")
247
-
248
- dep_output = gr.HTML(value=dependency(
249
- DEFAULT_TEXT, True, True, False, DEFAULT_COLOR, "black", DEFAULT_MODEL)[0])
250
  with gr.Row():
251
- with gr.Column():
 
 
 
252
  dep_button = gr.Button(
253
- "Update Dependency Parser", variant="primary")
254
  with gr.Column():
255
  dep_download_button = gr.HTML(
256
  value=download_svg(dep_output.value))
257
- gr.Markdown(" ")
258
  with gr.Box():
259
  with gr.Column():
260
  with gr.Row():
261
- with gr.Column():
262
  gr.Markdown(
263
  "## [🔗 Entity Recognizer](https://spacy.io/usage/visualizers#ent)")
264
  gr.Markdown(
265
  "The entity visualizer highlights named entities and their labels in a text")
266
- with gr.Column():
267
- with gr.Row():
268
- with gr.Column():
269
- gr.Markdown(" ")
270
- with gr.Column():
271
- ent_model = gr.Textbox(
272
- label="Model", value="en_core_web_sm")
273
  ent_input = gr.CheckboxGroup(
274
- DEFAULT_ENTS, value=DEFAULT_ENTS)
275
  ent_output = gr.HTML(value=entity(
276
  DEFAULT_TEXT, DEFAULT_ENTS, DEFAULT_MODEL)[0])
277
- ent_button = gr.Button(
278
- "Update Entity Recognizer", variant="primary")
 
 
279
  with gr.Box():
280
  with gr.Column():
281
  with gr.Row():
282
- with gr.Column():
283
  gr.Markdown(
284
  "## [🔗 Token Properties](https://spacy.io/usage/linguistic-features)")
285
  gr.Markdown(
286
  "When you put in raw text to spaCy, it returns a Doc object with different linguistic features")
287
- with gr.Column():
288
- with gr.Row():
289
- with gr.Column():
290
- gr.Markdown(" ")
291
- with gr.Column():
292
- tok_model = gr.Textbox(
293
- label="Model", value="en_core_web_sm")
294
  with gr.Row():
295
- with gr.Column():
296
  tok_input = gr.CheckboxGroup(
297
- DEFAULT_TOK_ATTR, value=DEFAULT_TOK_ATTR)
298
- with gr.Column():
299
- gr.Markdown("")
300
  tok_output = gr.Dataframe(headers=DEFAULT_TOK_ATTR, value=default_token(
301
  DEFAULT_TEXT, DEFAULT_TOK_ATTR, DEFAULT_MODEL)[0], overflow_row_behaviour="paginate")
302
- tok_button = gr.Button(
303
- "Update Token Properties", variant="primary")
304
- with gr.Box():
305
- with gr.Column():
306
  with gr.Row():
307
- with gr.Column():
308
- gr.Markdown(
309
- "## [🔗 Noun chunks](https://spacy.io/usage/linguistic-features#noun-chunks)")
310
- gr.Markdown(
311
- "You can use `doc.noun_chunks` to extract noun phrases from a doc object")
312
- with gr.Column():
313
- with gr.Row():
314
- with gr.Column():
315
- gr.Markdown(" ")
316
- with gr.Column():
317
- noun_model = gr.Textbox(
318
- label="Model", value="en_core_web_sm")
319
- noun_output = gr.Dataframe(headers=NOUN_ATTR, value=default_noun_chunks(
320
- DEFAULT_TEXT, DEFAULT_MODEL)[0], overflow_row_behaviour="paginate")
321
- noun_button = gr.Button(
322
- "Update Noun Chunks", variant="primary")
323
  with gr.Box():
324
  with gr.Column():
325
  with gr.Row():
326
- with gr.Column():
327
  gr.Markdown(
328
  "## [🔗 Word and Phrase Similarity](https://spacy.io/usage/linguistic-features#vectors-similarity)")
329
  gr.Markdown(
330
  "Words and spans have similarity ratings based on their word vectors")
331
- with gr.Column():
332
- with gr.Row():
333
- with gr.Column():
334
- gr.Markdown(" ")
335
- with gr.Column():
336
- sim_model = gr.Textbox(
337
- label="Model", value="en_core_web_md")
338
  with gr.Row():
339
- with gr.Column():
340
  sim_text1 = gr.Textbox(
341
  value="Apple", label="Word 1", interactive=True,)
342
- with gr.Column():
343
  sim_text2 = gr.Textbox(
344
  value="U.K. startup", label="Word 2", interactive=True,)
345
- with gr.Column():
346
  sim_output = gr.Textbox(
347
  label="Similarity Score", value="0.12")
348
- with gr.Column():
349
- gr.Markdown("")
350
- sim_random_button = gr.Button("Update random words")
351
- sim_button = gr.Button("Update similarity", variant="primary")
 
352
  with gr.Box():
353
  with gr.Column():
354
  with gr.Row():
355
- with gr.Column():
356
  gr.Markdown(
357
  "## [🔗 Spans](https://spacy.io/usage/visualizers#span)")
358
  gr.Markdown(
359
  "The span visualizer highlights overlapping spans in a text")
360
- with gr.Column():
361
- with gr.Row():
362
- with gr.Column():
363
- gr.Markdown(" ")
364
- with gr.Column():
365
- span_model = gr.Textbox(
366
  label="Model", value="en_core_web_sm")
367
  with gr.Row():
368
- with gr.Column():
369
  span1 = gr.Textbox(
370
  label="Span 1", value="U.K. startup", placeholder="Input a part of the sentence")
371
- with gr.Column():
372
  label1 = gr.Textbox(value="ORG",
373
  label="Label for Span 1")
374
- with gr.Column():
375
- gr.Markdown("")
376
- with gr.Column():
377
- gr.Markdown("")
378
  with gr.Row():
379
- with gr.Column():
380
  span2 = gr.Textbox(
381
  label="Span 2", value="U.K.", placeholder="Input another part of the sentence")
382
- with gr.Column():
383
  label2 = gr.Textbox(value="GPE",
384
  label="Label for Span 2")
385
- with gr.Column():
386
- gr.Markdown("")
387
- with gr.Column():
388
- gr.Markdown("")
389
  span_output = gr.HTML(value=span(
390
  DEFAULT_TEXT, "U.K. startup", "U.K.", "ORG", "GPE", DEFAULT_MODEL)[0])
391
- span_button = gr.Button("Update Spans", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
392
 
393
  # change text based on model input
394
  model_input.change(get_text, inputs=[model_input], outputs=text_input)
395
-
396
  # main button - update all components
397
  button.click(dependency, inputs=[
398
  text_input, col_punct, col_phrase, compact, bg, text, model_input], outputs=[dep_output, dep_download_button, dep_model])
399
  button.click(
400
  entity, inputs=[text_input, ent_input, model_input], outputs=[ent_output, ent_model])
401
- button.click(
402
- noun_chunks, inputs=[text_input, model_input], outputs=[noun_output, noun_model])
403
  button.click(
404
  token, inputs=[text_input, tok_input, model_input], outputs=[tok_output, tok_model])
405
  button.click(vectors, inputs=[sim_text1,
406
  sim_text2, model_input], outputs=[sim_output, sim_model])
407
  button.click(
408
  span, inputs=[text_input, span1, span2, label1, label2, model_input], outputs=[span_output, span_model])
 
 
409
 
410
  # individual component buttons
411
  dep_button.click(dependency, inputs=[
@@ -414,13 +382,13 @@ with demo:
414
  entity, inputs=[text_input, ent_input, model_input], outputs=[ent_output, ent_model])
415
  tok_button.click(
416
  token, inputs=[text_input, tok_input, model_input], outputs=[tok_output, tok_model])
417
- noun_button.click(
418
- noun_chunks, inputs=[text_input, model_input], outputs=[noun_output, noun_model])
419
  sim_button.click(vectors, inputs=[
420
  sim_text1, sim_text2, model_input], outputs=[sim_output, sim_model])
421
- span_button.click(
422
- span, inputs=[text_input, span1, span2, label1, label2, model_input], outputs=[span_output, span_model])
423
  sim_random_button.click(random_vectors, inputs=[text_input, model_input], outputs=[
424
  sim_output, sim_text1, sim_text2, sim_model])
425
-
426
- demo.launch()
 
 
 
 
 
1
+ import gradio as gr
2
  import spacy
3
  from spacy import displacy
 
4
  from spacy.tokens import Span
 
5
  import pandas as pd
6
  import base64
7
+ import random
8
 
9
 
10
  DEFAULT_MODEL = "en_core_web"
11
  DEFAULT_TEXT = "Apple is looking at buying U.K. startup for $1 billion."
 
 
 
 
12
  texts = {"en": DEFAULT_TEXT, "ca": "Apple està buscant comprar una startup del Regne Unit per mil milions de dòlars", "da": "Apple overvejer at købe et britisk startup for 1 milliard dollar.", "de": "Die ganze Stadt ist ein Startup: Shenzhen ist das Silicon Valley für Hardware-Firmen",
13
  "el": "Η άνιση κατανομή του πλούτου και του εισοδήματος, η οποία έχει λάβει τρομερές διαστάσεις, δεν δείχνει τάσεις βελτίωσης.", "es": "Apple está buscando comprar una startup del Reino Unido por mil millones de dólares.", "fi": "Itseajavat autot siirtävät vakuutusvastuun autojen valmistajille", "fr": "Apple cherche à acheter une start-up anglaise pour 1 milliard de dollars", "it": "Apple vuole comprare una startup del Regno Unito per un miliardo di dollari",
14
  "ja": "アップルがイギリスの新興企業を10億ドルで購入を検討", "ko": "애플이 영국의 스타트업을 10억 달러에 인수하는 것을 알아보고 있다.", "lt": "Jaunikis pirmąją vestuvinę naktį iškeitė į areštinės gultą", "nb": "Apple vurderer å kjøpe britisk oppstartfirma for en milliard dollar.", "nl": "Apple overweegt om voor 1 miljard een U.K. startup te kopen",
15
  "pl": "Poczuł przyjemną woń mocnej kawy.", "pt": "Apple está querendo comprar uma startup do Reino Unido por 100 milhões de dólares", "ro": "Apple plănuiește să cumpere o companie britanică pentru un miliard de dolari", "ru": "Apple рассматривает возможность покупки стартапа из Соединённого Королевства за $1 млрд", "sv": "Apple överväger att köpa brittisk startup för 1 miljard dollar.", "zh": "作为语言而言,为世界使用人数最多的语言,目前世界有五分之一人口做为母语。"}
 
 
16
  button_css = "float: right; --tw-border-opacity: 1; border-color: rgb(229 231 235 / var(--tw-border-opacity)); --tw-gradient-from: rgb(243 244 246 / 0.7); --tw-gradient-stops: var(--tw-gradient-from), var(--tw-gradient-to, rgb(243 244 246 / 0)); --tw-gradient-to: rgb(229 231 235 / 0.8); --tw-text-opacity: 1; color: rgb(55 65 81 / var(--tw-text-opacity)); border-width: 1px; --tw-bg-opacity: 1; background-color: rgb(255 255 255 / var(--tw-bg-opacity)); background-image: linear-gradient(to bottom right, var(--tw-gradient-stops)); display: inline-flex; flex: 1 1 0%; align-items: center; justify-content: center; --tw-shadow: 0 1px 2px 0 rgb(0 0 0 / 0.05); --tw-shadow-colored: 0 1px 2px 0 var(--tw-shadow-color); box-shadow: var(--tw-ring-offset-shadow, 0 0 #0000), var(--tw-ring-shadow, 0 0 #0000), var(--tw-shadow); -webkit-appearance: button; border-radius: 0.5rem; padding-top: 0.5rem; padding-bottom: 0.5rem; padding-left: 1rem; padding-right: 1rem; font-size: 1rem; line-height: 1.5rem; font-weight: 600;"
17
+ DEFAULT_COLOR = "linear-gradient(90deg, #FFCA74, #7AECEC)"
18
+ DEFAULT_ENTS = ['CARDINAL', 'DATE', 'EVENT', 'FAC', 'GPE', 'LANGUAGE', 'LAW', 'LOC', 'MONEY',
19
+ 'NORP', 'ORDINAL', 'ORG', 'PERCENT', 'PERSON', 'PRODUCT', 'QUANTITY', 'TIME', 'WORK_OF_ART']
20
+ DEFAULT_TOK_ATTR = ['idx', 'text', 'pos_', 'lemma_', 'shape_', 'dep_']
21
  NOUN_ATTR = ['text', 'root.text', 'root.dep_', 'root.head.text']
22
 
23
+
24
+
25
+
26
  # get the huggingface models specified in the requirements.txt file
27
  def get_all_models():
28
  with open("requirements.txt") as f:
 
94
  data.append(tok_data)
95
  return data, model_name
96
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
97
  # Get similarity of two random generated vectors
98
  def random_vectors(text, model):
99
  model_name = model + "_md"
 
155
  svg = displacy.render(doc, style="span")
156
  return svg, model_name
157
 
158
+ # returns noun chunks in text
159
+ def noun_chunks(text, model):
160
+ model_name = model + "_sm"
161
+ nlp = spacy.load(model_name)
162
+ data = []
163
+ doc = nlp(text)
164
+ for chunk in doc.noun_chunks:
165
+ data.append([chunk.text, chunk.root.text, chunk.root.dep_,
166
+ chunk.root.head.text])
167
+ data = pd.DataFrame(data, columns=NOUN_ATTR)
168
+ return data, model_name
169
+
170
+ # returns noun chuncks for the default value
171
+ # the return value is not a pandas DataFrame
172
+ def default_noun_chunks(text, model):
173
+ model_name = model + "_sm"
174
+ nlp = spacy.load(model_name)
175
+ data = []
176
+ doc = nlp(text)
177
+ for chunk in doc.noun_chunks:
178
+ data.append([chunk.text, chunk.root.text, chunk.root.dep_,
179
+ chunk.root.head.text])
180
+ return data, model_name
181
+
182
  # get default text based on language model
183
  def get_text(model):
184
  for i in range(len(models)):
 
201
  with gr.Column():
202
  gr.Markdown(" ## Choose a language model and the inputted text")
203
  with gr.Row():
204
+ with gr.Column(scale=0.25):
205
  model_input = gr.Dropdown(
206
  choices=models, value=DEFAULT_MODEL, interactive=True, label="Pretrained Pipelines")
 
 
 
 
 
 
207
  with gr.Row():
208
+ with gr.Column(scale=0.5):
209
  text_input = gr.Textbox(
210
  value=DEFAULT_TEXT, interactive=True, label="Input Text")
211
+ with gr.Row():
212
+ with gr.Column(scale=0.25):
213
+ button = gr.Button("Update", variant="primary").style(full_width=False)
214
  with gr.Box():
215
  with gr.Column():
216
  with gr.Row():
217
+ with gr.Column(scale=0.75):
218
  gr.Markdown(
219
  "## [🔗 Dependency Parser](https://spacy.io/usage/visualizers#dep)")
220
  gr.Markdown(
221
  "The dependency visualizer shows part-of-speech tags and syntactic dependencies")
222
+ with gr.Column(scale=0.25):
223
+ dep_model = gr.Textbox(
224
+ label="Model", value="en_core_web_sm")
 
 
 
 
225
  with gr.Row():
226
  with gr.Column():
227
  col_punct = gr.Checkbox(
 
235
  with gr.Column():
236
  text = gr.Textbox(
237
  label="Text Color", value="black")
 
 
 
238
  with gr.Row():
239
+ dep_output = gr.HTML(value=dependency(
240
+ DEFAULT_TEXT, True, True, False, DEFAULT_COLOR, "black", DEFAULT_MODEL)[0])
241
+ with gr.Row():
242
+ with gr.Column(scale=0.25):
243
  dep_button = gr.Button(
244
+ "Update Dependency Parser", variant="primary").style(full_width=False)
245
  with gr.Column():
246
  dep_download_button = gr.HTML(
247
  value=download_svg(dep_output.value))
 
248
  with gr.Box():
249
  with gr.Column():
250
  with gr.Row():
251
+ with gr.Column(scale=0.75):
252
  gr.Markdown(
253
  "## [🔗 Entity Recognizer](https://spacy.io/usage/visualizers#ent)")
254
  gr.Markdown(
255
  "The entity visualizer highlights named entities and their labels in a text")
256
+ with gr.Column(scale=0.25):
257
+ ent_model = gr.Textbox(
258
+ label="Model", value="en_core_web_sm")
 
 
 
 
259
  ent_input = gr.CheckboxGroup(
260
+ DEFAULT_ENTS, value=DEFAULT_ENTS, label="Entity Types")
261
  ent_output = gr.HTML(value=entity(
262
  DEFAULT_TEXT, DEFAULT_ENTS, DEFAULT_MODEL)[0])
263
+ with gr.Row():
264
+ with gr.Column(scale=0.25):
265
+ ent_button = gr.Button(
266
+ "Update Entity Recognizer", variant="primary")
267
  with gr.Box():
268
  with gr.Column():
269
  with gr.Row():
270
+ with gr.Column(scale=0.75):
271
  gr.Markdown(
272
  "## [🔗 Token Properties](https://spacy.io/usage/linguistic-features)")
273
  gr.Markdown(
274
  "When you put in raw text to spaCy, it returns a Doc object with different linguistic features")
275
+ with gr.Column(scale=0.25):
276
+ tok_model = gr.Textbox(
277
+ label="Model", value="en_core_web_sm")
 
 
 
 
278
  with gr.Row():
279
+ with gr.Column(scale=0.5):
280
  tok_input = gr.CheckboxGroup(
281
+ DEFAULT_TOK_ATTR, value=DEFAULT_TOK_ATTR, label="Token Attributes", interactive=True)
 
 
282
  tok_output = gr.Dataframe(headers=DEFAULT_TOK_ATTR, value=default_token(
283
  DEFAULT_TEXT, DEFAULT_TOK_ATTR, DEFAULT_MODEL)[0], overflow_row_behaviour="paginate")
 
 
 
 
284
  with gr.Row():
285
+ with gr.Column(scale=0.25):
286
+ tok_button = gr.Button(
287
+ "Update Token Properties", variant="primary")
 
 
 
 
 
 
 
 
 
 
 
 
 
288
  with gr.Box():
289
  with gr.Column():
290
  with gr.Row():
291
+ with gr.Column(scale=0.75):
292
  gr.Markdown(
293
  "## [🔗 Word and Phrase Similarity](https://spacy.io/usage/linguistic-features#vectors-similarity)")
294
  gr.Markdown(
295
  "Words and spans have similarity ratings based on their word vectors")
296
+ with gr.Column(scale=0.25):
297
+ sim_model = gr.Textbox(
298
+ label="Model", value="en_core_web_md")
 
 
 
 
299
  with gr.Row():
300
+ with gr.Column(scale=0.25):
301
  sim_text1 = gr.Textbox(
302
  value="Apple", label="Word 1", interactive=True,)
303
+ with gr.Column(scale=0.25):
304
  sim_text2 = gr.Textbox(
305
  value="U.K. startup", label="Word 2", interactive=True,)
306
+ with gr.Column(scale=0.25):
307
  sim_output = gr.Textbox(
308
  label="Similarity Score", value="0.12")
309
+ with gr.Row():
310
+ with gr.Column(scale=0.25):
311
+ sim_random_button = gr.Button("Update random words")
312
+ with gr.Column(scale=0.25):
313
+ sim_button = gr.Button("Update similarity", variant="primary")
314
  with gr.Box():
315
  with gr.Column():
316
  with gr.Row():
317
+ with gr.Column(scale=0.75):
318
  gr.Markdown(
319
  "## [🔗 Spans](https://spacy.io/usage/visualizers#span)")
320
  gr.Markdown(
321
  "The span visualizer highlights overlapping spans in a text")
322
+ with gr.Column(scale=0.25):
323
+ span_model = gr.Textbox(
 
 
 
 
324
  label="Model", value="en_core_web_sm")
325
  with gr.Row():
326
+ with gr.Column(scale=0.3):
327
  span1 = gr.Textbox(
328
  label="Span 1", value="U.K. startup", placeholder="Input a part of the sentence")
329
+ with gr.Column(scale=0.3):
330
  label1 = gr.Textbox(value="ORG",
331
  label="Label for Span 1")
 
 
 
 
332
  with gr.Row():
333
+ with gr.Column(scale=0.3):
334
  span2 = gr.Textbox(
335
  label="Span 2", value="U.K.", placeholder="Input another part of the sentence")
336
+ with gr.Column(scale=0.3):
337
  label2 = gr.Textbox(value="GPE",
338
  label="Label for Span 2")
 
 
 
 
339
  span_output = gr.HTML(value=span(
340
  DEFAULT_TEXT, "U.K. startup", "U.K.", "ORG", "GPE", DEFAULT_MODEL)[0])
341
+ with gr.Row():
342
+ with gr.Column(scale=0.25):
343
+ span_button = gr.Button("Update Spans", variant="primary")
344
+ with gr.Box():
345
+ with gr.Column():
346
+ with gr.Row():
347
+ with gr.Column(scale=0.75):
348
+ gr.Markdown(
349
+ "## [🔗 Noun chunks](https://spacy.io/usage/linguistic-features#noun-chunks)")
350
+ gr.Markdown(
351
+ "You can use `doc.noun_chunks` to extract noun phrases from a doc object")
352
+ with gr.Column(scale=0.25):
353
+ noun_model = gr.Textbox(
354
+ label="Model", value="en_core_web_sm")
355
+ noun_output = gr.Dataframe(headers=NOUN_ATTR, value=default_noun_chunks(
356
+ DEFAULT_TEXT, DEFAULT_MODEL)[0], overflow_row_behaviour="paginate")
357
+ with gr.Row():
358
+ with gr.Column(scale=0.25):
359
+ noun_button = gr.Button(
360
+ "Update Noun Chunks", variant="primary")
361
 
362
  # change text based on model input
363
  model_input.change(get_text, inputs=[model_input], outputs=text_input)
 
364
  # main button - update all components
365
  button.click(dependency, inputs=[
366
  text_input, col_punct, col_phrase, compact, bg, text, model_input], outputs=[dep_output, dep_download_button, dep_model])
367
  button.click(
368
  entity, inputs=[text_input, ent_input, model_input], outputs=[ent_output, ent_model])
 
 
369
  button.click(
370
  token, inputs=[text_input, tok_input, model_input], outputs=[tok_output, tok_model])
371
  button.click(vectors, inputs=[sim_text1,
372
  sim_text2, model_input], outputs=[sim_output, sim_model])
373
  button.click(
374
  span, inputs=[text_input, span1, span2, label1, label2, model_input], outputs=[span_output, span_model])
375
+ button.click(
376
+ noun_chunks, inputs=[text_input, model_input], outputs=[noun_output, noun_model])
377
 
378
  # individual component buttons
379
  dep_button.click(dependency, inputs=[
 
382
  entity, inputs=[text_input, ent_input, model_input], outputs=[ent_output, ent_model])
383
  tok_button.click(
384
  token, inputs=[text_input, tok_input, model_input], outputs=[tok_output, tok_model])
 
 
385
  sim_button.click(vectors, inputs=[
386
  sim_text1, sim_text2, model_input], outputs=[sim_output, sim_model])
 
 
387
  sim_random_button.click(random_vectors, inputs=[text_input, model_input], outputs=[
388
  sim_output, sim_text1, sim_text2, sim_model])
389
+ span_button.click(
390
+ span, inputs=[text_input, span1, span2, label1, label2, model_input], outputs=[span_output, span_model])
391
+ noun_button.click(
392
+ noun_chunks, inputs=[text_input, model_input], outputs=[noun_output, noun_model])
393
+
394
+ demo.launch()
requirements.txt CHANGED
@@ -1,5 +1,5 @@
1
  pandas==1.4.2
2
- gradio==3.0.18
3
  spacy==3.4.0
4
 
5
  https://huggingface.co/spacy/ca_core_news_md/resolve/main/ca_core_news_md-any-py3-none-any.whl
 
1
  pandas==1.4.2
2
+ gradio==3.4.0
3
  spacy==3.4.0
4
 
5
  https://huggingface.co/spacy/ca_core_news_md/resolve/main/ca_core_news_md-any-py3-none-any.whl