File size: 1,632 Bytes
4614c9a
 
 
2ffae77
 
 
 
 
 
ad911ae
53174b4
 
 
8b54486
2ffae77
 
4920445
 
4614c9a
 
8186b7e
ad911ae
8186b7e
 
3097ce9
4920445
 
 
 
 
 
 
4b8bee1
4920445
 
 
 
c52f02e
4920445
b1ae291
4b8bee1
 
aa0c481
4920445
c5da146
4920445
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
<!DOCTYPE html>
<html>
	<head>
      <link rel="stylesheet" href="https://fonts.googleapis.com/css?family=Roboto&display=swap" >
      <style>
          body {
              font-family: 'Roboto', sans-serif;
              font-size: 16px; 
          }
        .logo {
            height: 1em;
            vertical-align: middle;
            margin-bottom: 0.1em; 
          }
      </style>
      
		<script type="module" crossorigin src="https://cdn.jsdelivr.net/npm/@gradio/lite@0.4.1/dist/lite.js"></script>
		<link rel="stylesheet" href="https://cdn.jsdelivr.net/npm/@gradio/lite@0.4.1/dist/lite.css" />
	</head>
	<body>
      <h2>
        <img src="lite-logo.png" alt="logo" class="logo">
        Gradio-lite (Gradio running entirely in your browser!)
      </h2>
      <p>Try it out! Once the Gradio app loads (can take 10-15 seconds), disconnect your Wifi and the machine learning model will still work!</p>
<gradio-lite>

<gradio-requirements>
transformers_js_py
</gradio-requirements>

<gradio-file name="app.py" entrypoint>
from transformers_js import import_transformers_js, as_url
import gradio as gr

transformers = await import_transformers_js()
pipeline = transformers.pipeline
pipe = await pipeline('zero-shot-image-classification')

async def classify(image, classes):
	data = await pipe(as_url(image), classes.split(","))
	result = {item['label']: round(item['score'], 2) for item in data}
	return result

demo = gr.Interface(classify, [gr.Image(label="Input image", type="filepath"), gr.Textbox(label="Classes separated by commas")], gr.Label())
demo.launch()
</gradio-file>

</gradio-lite>		
    </body>
</html>