nastasiasnk commited on
Commit
a2320ed
1 Parent(s): a7938b4

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -49
app.py CHANGED
@@ -1,60 +1,25 @@
1
 
2
 
 
3
  import os
4
  HF_TOKEN = os.getenv('HF_TOKEN')
5
-
6
  from huggingface_hub import HfFolder
7
-
8
  # Set the token using HfFolder (this persists the token)
9
  HfFolder.save_token(HF_TOKEN)
10
 
11
  import transformers
12
- from transformers import VisionEncoderDecoderModel, AutoTokenizer, pipeline, AutoModel
 
 
 
 
 
 
 
 
 
 
 
 
13
 
14
- import streamlit as st
15
 
16
- # Set Hugging Face API Token if required
17
- """
18
- os.environ["HF_HOME"] = "path_to_your_huggingface_cache_directory"
19
- os.environ["TRANSFORMERS_CACHE"] = "path_to_your_transformers_cache_directory"
20
- os.environ["HF_DATASETS_CACHE"] = "path_to_your_datasets_cache_directory"
21
- os.environ["HF_METRICS_CACHE"] = "path_to_your_metrics_cache_directory"
22
- os.environ["HF_MODULES_CACHE"] = "path_to_your_modules_cache_directory"
23
- os.environ["HF_TOKEN"] = "your_hugging_face_access_token"
24
- """
25
-
26
- # Setup Streamlit interface for input
27
- st.title("Image to Text Model")
28
-
29
- # Using Pipeline
30
- st.header("Using Pipeline for Image Captioning")
31
- uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
32
-
33
- if uploaded_file is not None:
34
- # Assuming the pipeline handles image files directly
35
- pipe = pipeline("image-to-text", model="nlpconnect/vit-gpt2-image-captioning")
36
- try:
37
- result = pipe(uploaded_file.getvalue())
38
- st.write("Generated Caption:", result[0]['generated_text'])
39
- except Exception as e:
40
- st.error(f"Failed to generate caption: {str(e)}")
41
-
42
- # Load model directly for further analysis or different processing steps
43
- st.header("Load Model Directly")
44
-
45
- # Assuming 'nlpconnect/vit-gpt2-image-captioning' is your model identifier
46
- model = VisionEncoderDecoderModel.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
47
- tokenizer = AutoTokenizer.from_pretrained("nlpconnect/vit-gpt2-image-captioning")
48
-
49
- # Example of how you might use model and tokenizer directly
50
- # This section can be customized based on what you need to do with the model
51
- if st.button("Load Model Information"):
52
- try:
53
- st.text("Model and Tokenizer loaded successfully")
54
- # Display some model details, for example:
55
- st.text(f"Model Architecture: {model.__class__.__name__}")
56
- st.text(f"Tokenizer Type: {tokenizer.__class__.__name__}")
57
- except Exception as e:
58
- st.error(f"Error loading model: {str(e)}")
59
-
60
-
 
1
 
2
 
3
+ import streamlit as st
4
  import os
5
  HF_TOKEN = os.getenv('HF_TOKEN')
 
6
  from huggingface_hub import HfFolder
 
7
  # Set the token using HfFolder (this persists the token)
8
  HfFolder.save_token(HF_TOKEN)
9
 
10
  import transformers
11
+ from transformers import pipeline
12
+
13
+ generator = pipeline("text-generation")
14
+ text = st.text_area("your input")
15
+
16
+ if text:
17
+ out = generator(text)
18
+ st.json(out)
19
+
20
+
21
+
22
+
23
+
24
 
 
25