Edit model card

Model Card for Model ID

Base model : beomi/Llama-3-Open-Ko-8B

Using Dataset : Bingsu/ko_alpaca_data & ν•œκ΅­μ–΄ 생성 기반 상식좔둠 데이터셋

Model Details

inference code

from peft import PeftModel, PeftConfig
from transformers import AutoModelForCausalLM, AutoTokenizer

config = PeftConfig.from_pretrained("gamzadole/llama3_instruct_tuning_without_pretraing")
base_model = AutoModelForCausalLM.from_pretrained("beomi/Llama-3-Open-Ko-8B", device_map="auto", load_in_4bit=True)
model = PeftModel.from_pretrained(base_model, "gamzadole/llama3_instruct_tuning_without_pretraing")
tokenizer = AutoTokenizer.from_pretrained("gamzadole/llama3_instruct_tuning_without_pretraing")

alpaca_prompt = """μ•„λž˜λŠ” 질문 instruction κ³Ό 좔가정보λ₯Ό λ‚˜νƒ€λ‚΄λŠ” input μž…λ‹ˆλ‹€. μ μ ˆν•œ responseλ₯Ό μƒμ„±ν•΄μ£Όμ„Έμš”.

### Instruction:
{instruction}

### Input:
{input}

### Response:
{response}"""

def generate_response(prompt, model):
    prompt = alpaca_prompt.format(instruction=prompt, input="", response="")
    messages = [
        {"role": "system", "content": "μΉœμ ˆν•œ μ±—λ΄‡μœΌλ‘œμ„œ μƒλŒ€λ°©μ˜ μš”μ²­μ— μ΅œλŒ€ν•œ μžμ„Έν•˜κ³  μΉœμ ˆν•˜κ²Œ λ‹΅ν•˜μž. λͺ¨λ“  λŒ€λ‹΅μ€ ν•œκ΅­μ–΄(Korean)으둜 λŒ€λ‹΅ν•΄μ€˜."},
        {"role": "user", "content": f"{prompt}"},
    ]

    input_ids = tokenizer.apply_chat_template(
        messages,
        add_generation_prompt=True,
        return_tensors="pt"
    ).to(model.device)

    terminators = [
        tokenizer.eos_token_id,
        tokenizer.convert_tokens_to_ids("<|eot_id|>")
    ]

    outputs = model.generate(
        input_ids,
        max_new_tokens=512,
        eos_token_id=terminators,
        do_sample=True,
        temperature=0.1,
        top_p=0.9,
    )

    response = outputs[0][input_ids.shape[-1]:]
    return tokenizer.decode(response, skip_special_tokens=True)

instruction = "건강을 μœ μ§€ν•˜λŠ” 3가지 팁 μ•Œλ €μ€˜"

print(generate_response(instruction, model))

response

'건강을 μœ μ§€ν•˜λŠ” μ„Έ 가지 νŒμ€ λ‹€μŒκ³Ό κ°™μŠ΅λ‹ˆλ‹€.


1. μ μ ˆν•œ 수면 μ‹œκ°„μ„ μœ μ§€ν•˜μ„Έμš”.


2. μ μ ˆν•œ μ‹μŠ΅κ΄€μ„ μœ μ§€ν•˜μ„Έμš”.


3. κ·œμΉ™μ μΈ μš΄λ™μ„ ν•˜μ„Έμš”.'
Downloads last month
2
Inference API
Unable to determine this model’s pipeline type. Check the docs .

Model tree for gamzadole/llama3_instruct_tuning_without_pretraing

Adapter
this model