rpand002's picture
Update README.md
6ef4ee5 verified
metadata
pipeline_tag: text-generation
inference: false
license: apache-2.0
datasets:
  - codeparrot/github-code-clean
  - bigcode/starcoderdata
  - open-web-math/open-web-math
  - math-ai/StackMathQA
metrics:
  - code_eval
library_name: transformers
tags:
  - code
  - granite
model-index:
  - name: granite-3b-code-base-2k
    results:
      - task:
          type: text-generation
        dataset:
          type: mbpp
          name: MBPP
        metrics:
          - name: pass@1
            type: pass@1
            value: 36
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: evalplus/mbppplus
          name: MBPP+
        metrics:
          - name: pass@1
            type: pass@1
            value: 45.1
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 36.6
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 37.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 40.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 26.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 35.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalSynthesis(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 22
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 25
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 18.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 29.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 17.1
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 26.8
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalExplain(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 14
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Python)
        metrics:
          - name: pass@1
            type: pass@1
            value: 18.3
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(JavaScript)
        metrics:
          - name: pass@1
            type: pass@1
            value: 23.2
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Java)
        metrics:
          - name: pass@1
            type: pass@1
            value: 29.9
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Go)
        metrics:
          - name: pass@1
            type: pass@1
            value: 24.4
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(C++)
        metrics:
          - name: pass@1
            type: pass@1
            value: 16.5
            veriefied: false
      - task:
          type: text-generation
        dataset:
          type: bigcode/humanevalpack
          name: HumanEvalFix(Rust)
        metrics:
          - name: pass@1
            type: pass@1
            value: 3.7
            veriefied: false

image/png

ibm-granite/granite-3b-code-base-2k-GGUF

This is the Q4_K_M converted version of the original ibm-granite/granite-3b-code-base-2k. Refer to the original model card for more details.

Use with llama.cpp

git clone https://github.com/ggerganov/llama.cpp
cd llama.cpp

# install
make

# run generation
./main -m granite-3b-code-base-2k-GGUF/granite-3b-code-base.Q4_K_M.gguf -n 128 -p "def generate_random(x: int):" --color