File size: 1,688 Bytes
1a9b0e9
82cc52b
1a9b0e9
82cc52b
 
c7614d7
95a989a
82cc52b
 
 
 
fb9e583
1a9b0e9
 
fb9e583
1a9b0e9
fb9e583
95a989a
 
 
 
 
fb9e583
95a989a
 
fb9e583
82cc52b
 
 
 
1a9b0e9
95a989a
1a9b0e9
 
 
95a989a
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
import gradio as gr
from transformers import pipeline

def main():
    # Load the model using Hugging Face's transformers pipeline
    model = pipeline("text-generation", model="microsoft/Phi-3-mini-128k-instruct", trust_remote_code=True)

    def generate_text(input_text):
        # Generate text using the model
        result = model(input_text, max_length=50)
        return result[0]['generated_text']

    with gr.Blocks() as blocks:
        with gr.Tab("Model Info"):
            gr.Markdown("""
                # Microsoft/Phi-3-mini-128k-instruct
                ## Description
                This model has 3.8 billion parameters, designed for lightweight and cutting-edge applications. Trained on the Phi-3 dataset, it emphasizes quality and dense reasoning suitable for various applications.
                ### Main Use Cases
                - Ideal for environments with memory or processing limitations.
                - Suitable for scenarios where latency is critical.
                - Useful for solid reasoning needs, including code, mathematics, and logic.
                ### License and Use
                The information provided should not be seen as a modification of the licensing under which the model is released.
                [More information](https://huggingface.co/microsoft/Phi-3-mini-128k-instruct)
            """)
        with gr.Tab("Generate Text"):
            input_text = gr.Textbox(placeholder="Type here to generate text")
            output_text = gr.Textbox(label="Generated Text")
            input_text.submit(fn=generate_text, inputs=input_text, outputs=output_text)

        blocks.launch()

if __name__ == "__main__":
    main()