legraphista commited on
Commit
aef26c1
1 Parent(s): 0593c2a

Upload imatrix.log with huggingface_hub

Browse files
Files changed (1) hide show
  1. imatrix.log +26 -25
imatrix.log CHANGED
@@ -1,4 +1,4 @@
1
- llama_model_loader: loaded meta data with 25 key-value pairs and 508 tensors from gemma-2-27b-it-IMat-GGUF/gemma-2-27b-it.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
2
  llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
3
  llama_model_loader: - kv 0: general.architecture str = gemma2
4
  llama_model_loader: - kv 1: general.name str = gemma-2-27b-it
@@ -15,8 +15,8 @@ llama_model_loader: - kv 11: general.file_type u32
15
  llama_model_loader: - kv 12: tokenizer.ggml.model str = llama
16
  llama_model_loader: - kv 13: tokenizer.ggml.pre str = default
17
  llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = ["<pad>", "<eos>", "<bos>", "<unk>", ...
18
- llama_model_loader: - kv 15: tokenizer.ggml.scores arr[f32,256000] = [0.000000, 0.000000, 0.000000, 0.0000...
19
- llama_model_loader: - kv 16: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 2, 1, 1, 1, 1, 1, 1, 1, 1, ...
20
  llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 2
21
  llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 1
22
  llama_model_loader: - kv 19: tokenizer.ggml.unknown_token_id u32 = 3
@@ -24,10 +24,11 @@ llama_model_loader: - kv 20: tokenizer.ggml.padding_token_id u32
24
  llama_model_loader: - kv 21: tokenizer.ggml.add_bos_token bool = true
25
  llama_model_loader: - kv 22: tokenizer.ggml.add_eos_token bool = false
26
  llama_model_loader: - kv 23: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
27
- llama_model_loader: - kv 24: general.quantization_version u32 = 2
 
28
  llama_model_loader: - type f32: 185 tensors
29
  llama_model_loader: - type q8_0: 323 tensors
30
- llm_load_vocab: special tokens cache size = 260
31
  llm_load_vocab: token to piece cache size = 1.6014 MB
32
  llm_load_print_meta: format = GGUF V3 (latest)
33
  llm_load_print_meta: arch = gemma2
@@ -65,7 +66,7 @@ llm_load_print_meta: ssm_d_conv = 0
65
  llm_load_print_meta: ssm_d_inner = 0
66
  llm_load_print_meta: ssm_d_state = 0
67
  llm_load_print_meta: ssm_dt_rank = 0
68
- llm_load_print_meta: model type = ?B
69
  llm_load_print_meta: model ftype = Q8_0
70
  llm_load_print_meta: model params = 27.23 B
71
  llm_load_print_meta: model size = 26.94 GiB (8.50 BPW)
@@ -104,40 +105,40 @@ llama_new_context_with_model: graph splits = 121
104
 
105
  system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
106
  compute_imatrix: tokenizing the input ..
107
- compute_imatrix: tokenization took 126.52 ms
108
  compute_imatrix: computing over 128 chunks with batch_size 512
109
- compute_imatrix: 1.97 seconds per pass - ETA 4.20 minutes
110
- [1]16.8060,[2]10.0388,[3]8.6860,[4]10.8551,[5]10.6816,[6]8.5806,[7]9.5274,[8]10.0565,[9]10.8304,
111
  save_imatrix: stored collected data after 10 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
112
- [10]9.2142,[11]9.3787,[12]10.2510,[13]11.1856,[14]11.6859,[15]12.6435,[16]13.3837,[17]13.5288,[18]14.3690,[19]13.6140,
113
  save_imatrix: stored collected data after 20 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
114
- [20]13.8628,[21]14.2014,[22]13.9559,[23]14.2008,[24]14.3894,[25]14.7491,[26]14.2443,[27]14.6796,[28]15.1159,[29]14.8935,
115
  save_imatrix: stored collected data after 30 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
116
- [30]14.7781,[31]13.7545,[32]13.2662,[33]13.0484,[34]12.8644,[35]12.7189,[36]12.9116,[37]12.9272,[38]12.9377,[39]13.1664,
117
  save_imatrix: stored collected data after 40 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
118
- [40]13.3908,[41]13.7261,[42]14.3809,[43]15.0405,[44]15.6225,[45]16.0831,[46]15.7899,[47]15.8613,[48]16.2568,[49]16.5927,
119
  save_imatrix: stored collected data after 50 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
120
- [50]16.2484,[51]16.2360,[52]16.3678,[53]16.6269,[54]16.9455,[55]17.2366,[56]17.3912,[57]17.4234,[58]17.5112,[59]17.2045,
121
  save_imatrix: stored collected data after 60 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
122
- [60]16.9565,[61]16.6263,[62]16.5667,[63]16.5981,[64]16.5561,[65]16.4957,[66]16.4538,[67]16.3132,[68]16.1684,[69]16.2471,
123
  save_imatrix: stored collected data after 70 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
124
- [70]16.2391,[71]16.2548,[72]16.2723,[73]16.2170,[74]16.1591,[75]16.1024,[76]16.1478,[77]16.2332,[78]16.2062,[79]16.1531,
125
  save_imatrix: stored collected data after 80 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
126
- [80]16.2953,[81]16.3488,[82]16.3215,[83]16.3536,[84]16.4559,[85]16.2309,[86]16.1677,[87]16.0908,[88]16.1282,[89]16.2161,
127
  save_imatrix: stored collected data after 90 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
128
- [90]16.2864,[91]16.1855,[92]16.0805,[93]15.9261,[94]15.7792,[95]15.6751,[96]15.5429,[97]15.3907,[98]15.2958,[99]15.3161,
129
  save_imatrix: stored collected data after 100 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
130
- [100]15.3213,[101]15.4828,[102]15.6273,[103]15.7855,[104]16.1017,[105]16.3573,[106]16.4126,[107]16.4446,[108]16.5071,[109]16.4200,
131
  save_imatrix: stored collected data after 110 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
132
- [110]16.3406,[111]16.1420,[112]15.9301,[113]16.0263,[114]16.0351,[115]16.0151,[116]16.0227,[117]16.0788,[118]16.0826,[119]16.0903,
133
  save_imatrix: stored collected data after 120 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
134
- [120]16.0969,[121]16.1101,[122]16.0417,[123]16.1350,[124]16.2564,[125]16.3835,[126]16.5740,[127]16.7243,[128]16.8485,
135
  save_imatrix: stored collected data after 128 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
136
 
137
- llama_print_timings: load time = 4135.04 ms
138
  llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
139
- llama_print_timings: prompt eval time = 235882.68 ms / 65536 tokens ( 3.60 ms per token, 277.83 tokens per second)
140
  llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
141
- llama_print_timings: total time = 239612.08 ms / 65537 tokens
142
 
143
- Final estimate: PPL = 16.8485 +/- 0.33601
 
1
+ llama_model_loader: loaded meta data with 26 key-value pairs and 508 tensors from gemma-2-27b-it-IMat-GGUF/gemma-2-27b-it.Q8_0.gguf.hardlink.gguf (version GGUF V3 (latest))
2
  llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
3
  llama_model_loader: - kv 0: general.architecture str = gemma2
4
  llama_model_loader: - kv 1: general.name str = gemma-2-27b-it
 
15
  llama_model_loader: - kv 12: tokenizer.ggml.model str = llama
16
  llama_model_loader: - kv 13: tokenizer.ggml.pre str = default
17
  llama_model_loader: - kv 14: tokenizer.ggml.tokens arr[str,256000] = ["<pad>", "<eos>", "<bos>", "<unk>", ...
18
+ llama_model_loader: - kv 15: tokenizer.ggml.scores arr[f32,256000] = [-1000.000000, -1000.000000, -1000.00...
19
+ llama_model_loader: - kv 16: tokenizer.ggml.token_type arr[i32,256000] = [3, 3, 3, 3, 1, 1, 1, 1, 1, 1, 1, 1, ...
20
  llama_model_loader: - kv 17: tokenizer.ggml.bos_token_id u32 = 2
21
  llama_model_loader: - kv 18: tokenizer.ggml.eos_token_id u32 = 1
22
  llama_model_loader: - kv 19: tokenizer.ggml.unknown_token_id u32 = 3
 
24
  llama_model_loader: - kv 21: tokenizer.ggml.add_bos_token bool = true
25
  llama_model_loader: - kv 22: tokenizer.ggml.add_eos_token bool = false
26
  llama_model_loader: - kv 23: tokenizer.chat_template str = {{ bos_token }}{% if messages[0]['rol...
27
+ llama_model_loader: - kv 24: tokenizer.ggml.add_space_prefix bool = false
28
+ llama_model_loader: - kv 25: general.quantization_version u32 = 2
29
  llama_model_loader: - type f32: 185 tensors
30
  llama_model_loader: - type q8_0: 323 tensors
31
+ llm_load_vocab: special tokens cache size = 261
32
  llm_load_vocab: token to piece cache size = 1.6014 MB
33
  llm_load_print_meta: format = GGUF V3 (latest)
34
  llm_load_print_meta: arch = gemma2
 
66
  llm_load_print_meta: ssm_d_inner = 0
67
  llm_load_print_meta: ssm_d_state = 0
68
  llm_load_print_meta: ssm_dt_rank = 0
69
+ llm_load_print_meta: model type = 27B
70
  llm_load_print_meta: model ftype = Q8_0
71
  llm_load_print_meta: model params = 27.23 B
72
  llm_load_print_meta: model size = 26.94 GiB (8.50 BPW)
 
105
 
106
  system_info: n_threads = 25 / 32 | AVX = 1 | AVX_VNNI = 0 | AVX2 = 1 | AVX512 = 1 | AVX512_VBMI = 1 | AVX512_VNNI = 1 | AVX512_BF16 = 1 | FMA = 1 | NEON = 0 | SVE = 0 | ARM_FMA = 0 | F16C = 1 | FP16_VA = 0 | WASM_SIMD = 0 | BLAS = 1 | SSE3 = 1 | SSSE3 = 1 | VSX = 0 | MATMUL_INT8 = 0 | LLAMAFILE = 1 |
107
  compute_imatrix: tokenizing the input ..
108
+ compute_imatrix: tokenization took 102.378 ms
109
  compute_imatrix: computing over 128 chunks with batch_size 512
110
+ compute_imatrix: 1.92 seconds per pass - ETA 4.08 minutes
111
+ [1]39.6511,[2]16.5757,[3]13.6939,[4]15.6944,[5]16.8349,[6]17.6097,[7]19.6449,[8]21.2138,[9]22.7663,
112
  save_imatrix: stored collected data after 10 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
113
+ [10]19.5954,[11]18.7601,[12]20.1620,[13]21.1839,[14]21.3214,[15]22.4495,[16]22.5613,[17]22.7834,[18]23.9931,[19]23.7468,
114
  save_imatrix: stored collected data after 20 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
115
+ [20]23.8365,[21]26.0599,[22]25.6103,[23]25.1318,[24]25.6891,[25]25.7517,[26]25.1686,[27]25.8776,[28]26.5172,[29]26.5127,
116
  save_imatrix: stored collected data after 30 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
117
+ [30]27.2144,[31]24.9851,[32]23.7414,[33]22.8572,[34]22.1536,[35]21.5105,[36]22.1975,[37]23.0630,[38]23.1560,[39]23.3739,
118
  save_imatrix: stored collected data after 40 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
119
+ [40]23.4627,[41]23.6640,[42]25.0051,[43]25.9131,[44]26.9078,[45]27.6585,[46]27.0582,[47]26.4096,[48]26.8484,[49]27.2839,
120
  save_imatrix: stored collected data after 50 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
121
+ [50]26.9025,[51]26.6051,[52]26.6338,[53]27.0460,[54]27.5828,[55]28.1862,[56]28.5826,[57]28.5708,[58]28.5400,[59]28.0093,
122
  save_imatrix: stored collected data after 60 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
123
+ [60]27.6445,[61]27.2285,[62]27.0626,[63]27.1479,[64]27.4686,[65]27.1084,[66]27.0599,[67]27.0251,[68]26.9609,[69]26.8687,
124
  save_imatrix: stored collected data after 70 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
125
+ [70]26.7705,[71]26.7926,[72]26.7099,[73]26.7881,[74]26.7220,[75]26.5072,[76]26.5858,[77]26.7008,[78]26.6471,[79]26.4994,
126
  save_imatrix: stored collected data after 80 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
127
+ [80]26.6450,[81]26.7715,[82]26.8064,[83]27.0250,[84]27.0770,[85]26.6203,[86]26.4729,[87]26.2858,[88]26.3698,[89]26.3433,
128
  save_imatrix: stored collected data after 90 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
129
+ [90]26.5397,[91]26.5333,[92]26.4208,[93]26.3047,[94]26.1210,[95]26.0503,[96]25.8447,[97]25.8144,[98]25.7297,[99]25.7554,
130
  save_imatrix: stored collected data after 100 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
131
+ [100]25.6738,[101]25.8975,[102]26.0385,[103]26.1709,[104]26.5591,[105]26.8560,[106]26.8766,[107]26.8606,[108]26.7007,[109]26.6811,
132
  save_imatrix: stored collected data after 110 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
133
+ [110]26.4028,[111]26.0091,[112]25.5502,[113]25.6821,[114]25.7196,[115]25.6433,[116]25.5595,[117]25.5877,[118]25.6440,[119]25.6503,
134
  save_imatrix: stored collected data after 120 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
135
+ [120]25.6006,[121]25.5261,[122]25.4242,[123]25.4475,[124]25.6331,[125]25.8680,[126]26.1323,[127]26.2467,[128]26.3842,
136
  save_imatrix: stored collected data after 128 chunks in gemma-2-27b-it-IMat-GGUF/imatrix.dat
137
 
138
+ llama_print_timings: load time = 4083.77 ms
139
  llama_print_timings: sample time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
140
+ llama_print_timings: prompt eval time = 232410.77 ms / 65536 tokens ( 3.55 ms per token, 281.98 tokens per second)
141
  llama_print_timings: eval time = 0.00 ms / 1 runs ( 0.00 ms per token, inf tokens per second)
142
+ llama_print_timings: total time = 236107.85 ms / 65537 tokens
143
 
144
+ Final estimate: PPL = 26.3842 +/- 0.60805