nitinsurya commited on
Commit
5c93d70
1 Parent(s): 2be815a

Dockerfile setup and text embeddings integrated.

Browse files
Files changed (3) hide show
  1. Dockerfile +14 -0
  2. README.md +8 -2
  3. app.py +12 -5
Dockerfile ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ FROM python:3.11-slim
2
+
3
+ ARG GRADIO_SERVER_PORT=7860
4
+ ENV GRADIO_SERVER_PORT=${GRADIO_SERVER_PORT}
5
+
6
+ WORKDIR /app
7
+
8
+ COPY requirements.txt /app/
9
+
10
+ RUN pip install -r /app/requirements.txt
11
+
12
+ COPY app.py /app/
13
+
14
+ CMD ["python", "/app/app.py"]
README.md CHANGED
@@ -3,11 +3,17 @@ title: Clip Embedding
3
  emoji: 💻
4
  colorFrom: pink
5
  colorTo: gray
6
- sdk: gradio
7
- sdk_version: 3.41.2
8
  app_file: app.py
9
  pinned: false
 
10
  license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
3
  emoji: 💻
4
  colorFrom: pink
5
  colorTo: gray
6
+ sdk: docker
 
7
  app_file: app.py
8
  pinned: false
9
+ app_port: 7860
10
  license: mit
11
  ---
12
 
13
  Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
14
+
15
+
16
+ ```
17
+ docker build . -t clip_embeddings
18
+ docker run -v ./cache_dir:/app/cache -p 7860:7860 clip_embeddings
19
+ ```
app.py CHANGED
@@ -3,13 +3,20 @@ import numpy as np
3
  from PIL import Image
4
  from sentence_transformers import SentenceTransformer
5
 
6
- model = SentenceTransformer('clip-ViT-B-32')
7
 
8
 
9
- def image_to_embedding(img: np.ndarray):
10
- embedding = model.encode(sentences=[Image.fromarray(img)], batch_size=128)
 
 
 
 
 
 
 
11
  return embedding
12
 
13
 
14
- iface = gr.Interface(fn=image_to_embedding, inputs="image", outputs="textbox", cache_examples=True)
15
- iface.launch(auth=("Cdpv9i6Q", "R206pqYF"))
 
3
  from PIL import Image
4
  from sentence_transformers import SentenceTransformer
5
 
6
+ model = SentenceTransformer('clip-ViT-B-32', cache_folder='/app/cache')
7
 
8
 
9
+ def image_to_embedding(img: np.ndarray = None, txt: str = None) -> np.ndarray:
10
+ if img is None and not txt:
11
+ return []
12
+
13
+ if img is not None:
14
+ embedding = model.encode(sentences=[Image.fromarray(img)], batch_size=128)
15
+ else:
16
+ embedding = model.encode(sentences=[txt], batch_size=128)
17
+
18
  return embedding
19
 
20
 
21
+ demo = gr.Interface(fn=image_to_embedding, inputs=["image", "textbox"], outputs="textbox", cache_examples=True)
22
+ demo.launch(server_name="0.0.0.0")