rpand002's picture
Update README.md
592d164 verified
metadata
pipeline_tag: text-generation
base_model: ibm-granite/granite-20b-code-base-8k
inference: false
license: apache-2.0
datasets:
  - bigcode/commitpackft
  - TIGER-Lab/MathInstruct
  - meta-math/MetaMathQA
  - glaiveai/glaive-code-assistant-v3
  - glaive-function-calling-v2
  - bugdaryan/sql-create-context-instruction
  - garage-bAInd/Open-Platypus
  - nvidia/HelpSteer
metrics:
  - code_eval
library_name: transformers
tags:
  - code
  - granite
model-index:
  - name: granite-20b-code-instruct-8k
    results:
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 60.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 53.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 58.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 42.1
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 45.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 42.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 44.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 42.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 49.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 32.3
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 42.1
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 18.3
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 43.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 43.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 45.7
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 41.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 41.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 29.9
            veriefied: false

image/png

ibm-granite/granite-20b-code-instruct-8k-GGUF

This is the Q4_K_M converted version of the original ibm-granite/granite-20b-code-instruct-8k. Refer to the original model card for more details.

Use with llama.cpp

git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp

# install
make

# run generation
./main -m granite-20b-code-instruct-8k-GGUF/granite-20b-code-instruct.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color