jamesdumay commited on
Commit
7035cc2
·
1 Parent(s): 5609a6f

Add Qwen3.6-35B-A3B-UD-Q4_K_XL full-v1 for unsloth/Qwen3.6-35B-A3B-GGUF@9280dd353ab5 (#4)

Browse files

- Add Qwen3.6-35B-A3B-UD-Q4_K_XL full-v1 for unsloth/Qwen3.6-35B-A3B-GGUF@9280dd353ab5 (2314bf0a88b32cbb203259f5f030b2e70b3e2425)

data/unsloth/Qwen3.6-35B-A3B-GGUF/9280dd353ab587157920d5bd391ada414d84e552/gguf/Qwen3.6-35B-A3B-UD-Q4_K_XL/full-v1/analysis.json ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "memory": {
3
+ "base_resident_bytes": 2678180352,
4
+ "expert_bytes": {
5
+ "bytes_per_expert": 76840960,
6
+ "kind": "uniform"
7
+ },
8
+ "expert_tensor_bytes_total": 19671285760,
9
+ "full_model_bytes": 22360456160,
10
+ "shard_file_overhead_bytes": 10990048
11
+ },
12
+ "model": {
13
+ "expert_count": 256,
14
+ "expert_used_count": 8
15
+ },
16
+ "ranking": {
17
+ "mass_checkpoints": [
18
+ {
19
+ "mass_fraction": 0.320624258670914,
20
+ "top_n": 1
21
+ },
22
+ {
23
+ "mass_fraction": 0.324429432772452,
24
+ "top_n": 2
25
+ },
26
+ {
27
+ "mass_fraction": 0.3318762258353961,
28
+ "top_n": 4
29
+ },
30
+ {
31
+ "mass_fraction": 0.3460204514826405,
32
+ "top_n": 8
33
+ },
34
+ {
35
+ "mass_fraction": 0.3726827485247638,
36
+ "top_n": 16
37
+ },
38
+ {
39
+ "mass_fraction": 0.42329422660075683,
40
+ "top_n": 32
41
+ },
42
+ {
43
+ "mass_fraction": 0.51767078795539,
44
+ "top_n": 64
45
+ },
46
+ {
47
+ "mass_fraction": 0.6919257754605005,
48
+ "top_n": 128
49
+ },
50
+ {
51
+ "mass_fraction": 1.0,
52
+ "top_n": 256
53
+ }
54
+ ],
55
+ "rows": 256,
56
+ "sha256": "sha256:5f514ec9a2a890798f6692b2606005fb84175092ef25555f8b5e1d0c625349b9"
57
+ },
58
+ "schema_version": 1
59
+ }
data/unsloth/Qwen3.6-35B-A3B-GGUF/9280dd353ab587157920d5bd391ada414d84e552/gguf/Qwen3.6-35B-A3B-UD-Q4_K_XL/full-v1/metadata.json ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "all_files": [
3
+ "Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf"
4
+ ],
5
+ "all_layers": true,
6
+ "analysis_tool": "llama-moe-analyze",
7
+ "analyzer_id": "full-v1",
8
+ "command": {
9
+ "analyzer_id": "full-v1",
10
+ "context_size": 4096,
11
+ "token_count": 32
12
+ },
13
+ "created_at": "2026-04-17T08:07:52+00:00",
14
+ "distribution_id": "Qwen3.6-35B-A3B-UD-Q4_K_XL",
15
+ "file_hashes": {
16
+ "Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf": "sha256:707a55a8a4397ecde44de0c499d3e68c1ad1d240d1da65826b4949d1043f4450"
17
+ },
18
+ "format": "gguf",
19
+ "primary_file": "Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf",
20
+ "prompt_count": null,
21
+ "prompt_set": null,
22
+ "ranking_path": "ranking.csv",
23
+ "schema_version": 1,
24
+ "source_repo": "unsloth/Qwen3.6-35B-A3B-GGUF",
25
+ "source_revision": "9280dd353ab587157920d5bd391ada414d84e552",
26
+ "status": "complete",
27
+ "token_count": 32
28
+ }
data/unsloth/Qwen3.6-35B-A3B-GGUF/9280dd353ab587157920d5bd391ada414d84e552/gguf/Qwen3.6-35B-A3B-UD-Q4_K_XL/full-v1/ranking.csv ADDED
@@ -0,0 +1,265 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # MoE expert ranking by gate mass
2
+ # Model: /Users/jdumay/.cache/huggingface/hub/models--unsloth--Qwen3.6-35B-A3B-GGUF/snapshots/9280dd353ab587157920d5bd391ada414d84e552/Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf
3
+ # Experts: 256 (top-8)
4
+ # Prompts: 10 x 32 tokens
5
+ # Layers logged: all
6
+ # Total token-layer observations: 3304225
7
+ #
8
+ # Format: expert_id,gate_mass,mass_pct,selection_count
9
+ # Sorted by gate_mass descending (hottest first)
10
+ 0,12897.1,32.0625,3277738
11
+ 243,153.063,0.380518,1539
12
+ 89,150.49,0.37412,1298
13
+ 60,149.057,0.370558,1645
14
+ 224,146.013,0.362991,1489
15
+ 64,142.587,0.354475,1426
16
+ 95,140.235,0.348627,1550
17
+ 189,140.116,0.348331,1045
18
+ 229,136.135,0.338433,1522
19
+ 36,136.132,0.338425,1348
20
+ 125,134.277,0.333815,1138
21
+ 108,133.868,0.332798,1271
22
+ 254,133.33,0.33146,1197
23
+ 167,133.256,0.331275,1316
24
+ 43,132.86,0.330292,1652
25
+ 88,132.632,0.329726,1366
26
+ 137,132.245,0.328764,1186
27
+ 165,131.32,0.326463,1255
28
+ 160,129.608,0.322208,1217
29
+ 46,129.056,0.320835,1160
30
+ 35,128.984,0.320657,1510
31
+ 134,128.404,0.319215,1327
32
+ 65,127.282,0.316426,1397
33
+ 103,126.612,0.314761,1334
34
+ 191,126.241,0.313836,1438
35
+ 182,126.235,0.313821,1221
36
+ 72,125.683,0.31245,1384
37
+ 56,125.096,0.31099,1116
38
+ 204,125.043,0.31086,1170
39
+ 203,125.007,0.31077,1321
40
+ 158,125.007,0.310769,1112
41
+ 20,124.022,0.308322,1580
42
+ 248,123.246,0.306392,1154
43
+ 220,122.568,0.304705,1275
44
+ 61,122.235,0.303879,1274
45
+ 163,121.789,0.30277,1178
46
+ 221,121.181,0.301257,1050
47
+ 206,121.022,0.300863,1151
48
+ 105,120.96,0.300708,769
49
+ 201,120.941,0.30066,976
50
+ 47,120.627,0.29988,1263
51
+ 151,119.378,0.296776,1568
52
+ 127,119.272,0.296513,1033
53
+ 99,119.161,0.296236,1090
54
+ 208,119.019,0.295884,924
55
+ 219,118.918,0.295632,1047
56
+ 171,118.732,0.29517,1007
57
+ 111,118.732,0.29517,1115
58
+ 185,118.714,0.295126,1265
59
+ 251,118.705,0.295103,1041
60
+ 121,118.695,0.295078,978
61
+ 116,118.639,0.294939,1415
62
+ 87,118.281,0.294049,1160
63
+ 209,117.858,0.292998,1164
64
+ 217,117.032,0.290944,921
65
+ 107,116.513,0.289654,953
66
+ 42,116.391,0.289351,976
67
+ 239,115.628,0.287454,937
68
+ 2,115.512,0.287164,1055
69
+ 250,115.34,0.286737,1130
70
+ 130,115.325,0.286701,1245
71
+ 128,115.31,0.286663,981
72
+ 205,115.303,0.286645,1045
73
+ 28,115.267,0.286555,1104
74
+ 252,115.165,0.286301,907
75
+ 30,114.915,0.285681,798
76
+ 162,114.525,0.284711,986
77
+ 44,114.387,0.284369,1270
78
+ 138,114.295,0.284139,970
79
+ 32,114.198,0.283898,1098
80
+ 225,114.078,0.283601,793
81
+ 41,113.612,0.282441,683
82
+ 154,112.79,0.280397,950
83
+ 176,112.788,0.280392,967
84
+ 69,112.288,0.279151,791
85
+ 37,112.096,0.278674,897
86
+ 26,112.058,0.278579,1135
87
+ 86,111.989,0.278406,1013
88
+ 202,111.718,0.277733,1047
89
+ 63,111.708,0.277709,1091
90
+ 247,111.689,0.277661,872
91
+ 218,111.589,0.277412,1028
92
+ 112,111.27,0.276619,838
93
+ 169,111.268,0.276614,984
94
+ 13,111.201,0.276447,1305
95
+ 133,110.741,0.275304,951
96
+ 22,110.69,0.275178,1174
97
+ 91,110.66,0.275102,863
98
+ 173,110.387,0.274423,876
99
+ 39,110.049,0.273584,996
100
+ 124,110.037,0.273554,1042
101
+ 98,110.025,0.273525,902
102
+ 106,109.999,0.27346,642
103
+ 233,109.938,0.273307,1053
104
+ 70,109.569,0.27239,949
105
+ 166,109.373,0.271903,944
106
+ 180,109.137,0.271316,840
107
+ 51,108.977,0.27092,1057
108
+ 198,108.871,0.270656,685
109
+ 177,108.763,0.270386,663
110
+ 238,108.697,0.270223,1056
111
+ 196,108.531,0.269811,923
112
+ 92,108.279,0.269184,1172
113
+ 215,108.18,0.268938,785
114
+ 58,108.128,0.268809,1041
115
+ 79,108.052,0.268618,836
116
+ 15,108.027,0.268558,1011
117
+ 110,108.011,0.268517,972
118
+ 11,107.797,0.267986,938
119
+ 172,107.401,0.267001,914
120
+ 40,107.34,0.26685,837
121
+ 153,107.159,0.266398,890
122
+ 143,106.979,0.265951,764
123
+ 242,106.872,0.265685,932
124
+ 144,106.858,0.265651,813
125
+ 3,106.841,0.26561,1008
126
+ 75,106.839,0.265603,952
127
+ 54,106.475,0.264698,922
128
+ 132,106.44,0.264612,938
129
+ 17,106.359,0.26441,598
130
+ 199,106.254,0.264148,899
131
+ 192,105.588,0.262493,772
132
+ 59,105.534,0.262359,945
133
+ 1,105.412,0.262055,1279
134
+ 214,105.398,0.26202,682
135
+ 194,105.087,0.261248,779
136
+ 184,105.021,0.261084,527
137
+ 255,104.999,0.261029,811
138
+ 78,104.945,0.260895,758
139
+ 84,104.917,0.260825,826
140
+ 27,104.76,0.260434,882
141
+ 71,104.571,0.259965,930
142
+ 52,104.5,0.259789,736
143
+ 175,104.442,0.259645,679
144
+ 200,104.317,0.259333,744
145
+ 195,104.209,0.259065,822
146
+ 10,104.207,0.259059,729
147
+ 152,103.981,0.258498,724
148
+ 174,103.938,0.258391,704
149
+ 102,103.895,0.258284,628
150
+ 66,103.891,0.258274,877
151
+ 83,103.844,0.258158,784
152
+ 226,103.62,0.257602,628
153
+ 93,103.383,0.257011,744
154
+ 49,103.371,0.256983,551
155
+ 57,103.199,0.256553,824
156
+ 73,103.193,0.25654,702
157
+ 117,103.142,0.256413,747
158
+ 115,103.094,0.256293,760
159
+ 45,102.958,0.255955,740
160
+ 149,102.6,0.255066,681
161
+ 123,102.465,0.254729,744
162
+ 129,102.158,0.253967,793
163
+ 25,102.056,0.253713,636
164
+ 159,101.731,0.252906,611
165
+ 53,101.159,0.251482,852
166
+ 232,101.149,0.251457,689
167
+ 236,101.135,0.251424,809
168
+ 100,101.131,0.251413,513
169
+ 147,101.089,0.251309,909
170
+ 150,100.953,0.25097,570
171
+ 228,100.72,0.250392,571
172
+ 212,100.615,0.25013,708
173
+ 148,100.426,0.24966,851
174
+ 241,99.8775,0.248297,684
175
+ 155,99.8737,0.248288,461
176
+ 24,99.7311,0.247933,655
177
+ 170,99.6893,0.247829,777
178
+ 82,99.6649,0.247768,465
179
+ 68,99.5295,0.247432,579
180
+ 85,99.5103,0.247384,863
181
+ 4,99.4539,0.247244,754
182
+ 19,99.405,0.247123,761
183
+ 126,99.3764,0.247051,677
184
+ 16,99.3484,0.246982,690
185
+ 230,99.1009,0.246366,518
186
+ 14,98.9578,0.246011,790
187
+ 141,98.7835,0.245577,603
188
+ 164,98.717,0.245412,837
189
+ 77,98.7115,0.245398,595
190
+ 145,98.7103,0.245395,798
191
+ 23,98.71,0.245395,801
192
+ 12,98.1749,0.244064,710
193
+ 146,98.1143,0.243914,761
194
+ 213,98.0514,0.243757,595
195
+ 55,98.0126,0.243661,857
196
+ 31,97.9284,0.243452,503
197
+ 227,97.8974,0.243374,570
198
+ 48,97.7811,0.243085,616
199
+ 234,97.7319,0.242963,737
200
+ 237,97.6486,0.242756,584
201
+ 34,97.6462,0.24275,558
202
+ 38,97.6346,0.242721,709
203
+ 113,97.3721,0.242069,657
204
+ 140,97.2809,0.241842,445
205
+ 253,97.2683,0.241811,448
206
+ 80,97.091,0.24137,498
207
+ 97,97.083,0.24135,677
208
+ 33,96.7513,0.240525,715
209
+ 94,96.7201,0.240448,735
210
+ 231,96.4082,0.239672,580
211
+ 90,96.3466,0.239519,678
212
+ 9,96.336,0.239493,601
213
+ 156,96.1985,0.239151,661
214
+ 157,96.0247,0.238719,798
215
+ 120,95.8212,0.238213,823
216
+ 18,95.4528,0.237297,871
217
+ 6,95.2122,0.236699,571
218
+ 210,95.2075,0.236687,508
219
+ 183,95.1461,0.236535,641
220
+ 187,95.0521,0.236301,492
221
+ 104,95.0467,0.236288,647
222
+ 223,94.8627,0.23583,433
223
+ 178,94.7889,0.235647,549
224
+ 119,94.7123,0.235456,575
225
+ 142,94.4449,0.234792,669
226
+ 193,94.3387,0.234528,707
227
+ 76,94.3314,0.234509,634
228
+ 197,94.3222,0.234487,772
229
+ 122,94.2152,0.234221,799
230
+ 168,94.0656,0.233849,317
231
+ 207,94.0196,0.233734,631
232
+ 161,94.0048,0.233697,709
233
+ 101,93.9512,0.233564,639
234
+ 114,93.8643,0.233348,641
235
+ 244,93.7773,0.233132,485
236
+ 74,93.7731,0.233121,709
237
+ 188,93.155,0.231585,599
238
+ 7,92.5198,0.230006,613
239
+ 222,92.278,0.229405,727
240
+ 139,92.1443,0.229072,642
241
+ 136,92.0111,0.228741,417
242
+ 179,91.9323,0.228545,511
243
+ 246,91.8896,0.228439,537
244
+ 118,91.8888,0.228437,481
245
+ 81,91.7182,0.228013,610
246
+ 5,91.053,0.226359,633
247
+ 186,90.6356,0.225322,521
248
+ 50,90.5887,0.225205,756
249
+ 235,90.5667,0.22515,602
250
+ 29,90.5197,0.225033,527
251
+ 109,90.1993,0.224237,590
252
+ 8,90.0863,0.223956,437
253
+ 245,89.7939,0.223229,347
254
+ 67,88.9806,0.221207,465
255
+ 131,88.8688,0.220929,468
256
+ 181,88.1751,0.219205,583
257
+ 211,87.618,0.21782,566
258
+ 135,87.2305,0.216856,499
259
+ 62,86.7473,0.215655,386
260
+ 96,85.6512,0.21293,557
261
+ 21,84.3823,0.209776,551
262
+ 249,84.0732,0.209007,460
263
+ 216,83.6198,0.20788,426
264
+ 240,81.7215,0.203161,381
265
+ 190,81.3988,0.202359,461
data/unsloth/Qwen3.6-35B-A3B-GGUF/9280dd353ab587157920d5bd391ada414d84e552/gguf/Qwen3.6-35B-A3B-UD-Q4_K_XL/full-v1/run.log ADDED
@@ -0,0 +1,333 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ $ /Users/jdumay/code/mesh-llm-moe-fit-checks/llama.cpp/build/bin/llama-moe-analyze -m /Users/jdumay/.cache/huggingface/hub/models--unsloth--Qwen3.6-35B-A3B-GGUF/snapshots/9280dd353ab587157920d5bd391ada414d84e552/Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf --all-layers --export-ranking /Users/jdumay/.cache/mesh-llm/moe-rankings/hf-unsloth--Qwen3.6-35B-A3B-GGUF-9280dd353ab587157920d5bd391ada414d84e552-Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf.csv -n 32 -c 4096 -ngl 0
2
+
3
+ [stdout]
4
+
5
+ [stderr]
6
+ ggml_metal_device_init: tensor API disabled for pre-M5 and pre-A19 devices
7
+ ggml_metal_library_init: using embedded metal library
8
+ ggml_metal_library_init: loaded in 9.099 sec
9
+ ggml_metal_rsets_init: creating a residency set collection (keep_alive = 180 s)
10
+ ggml_metal_device_init: GPU name: MTL0
11
+ ggml_metal_device_init: GPU family: MTLGPUFamilyApple7 (1007)
12
+ ggml_metal_device_init: GPU family: MTLGPUFamilyCommon3 (3003)
13
+ ggml_metal_device_init: GPU family: MTLGPUFamilyMetal4 (5002)
14
+ ggml_metal_device_init: simdgroup reduction = true
15
+ ggml_metal_device_init: simdgroup matrix mul. = true
16
+ ggml_metal_device_init: has unified memory = true
17
+ ggml_metal_device_init: has bfloat = true
18
+ ggml_metal_device_init: has tensor = false
19
+ ggml_metal_device_init: use residency sets = true
20
+ ggml_metal_device_init: use shared buffers = true
21
+ ggml_metal_device_init: recommendedMaxWorkingSetSize = 115448.73 MB
22
+ common_init_result: fitting params to device memory, for bugs during this step try to reproduce them with -fit off, or provide --verbose logs if the bug only occurs with -fit on
23
+ llama_params_fit_impl: projected to use 16 MiB of device memory vs. 110100 MiB of free device memory
24
+ llama_params_fit_impl: will leave 110084 >= 1024 MiB of free device memory, no changes needed
25
+ llama_params_fit: successfully fit params to free device memory
26
+ llama_params_fit: fitting params to free memory took 0.32 seconds
27
+ llama_model_load_from_file_impl: using device MTL0 (Apple M1 Ultra) (unknown id) - 110100 MiB free
28
+ llama_model_loader: loaded meta data with 54 key-value pairs and 733 tensors from /Users/jdumay/.cache/huggingface/hub/models--unsloth--Qwen3.6-35B-A3B-GGUF/snapshots/9280dd353ab587157920d5bd391ada414d84e552/Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf (version GGUF V3 (latest))
29
+ llama_model_loader: Dumping metadata keys/values. Note: KV overrides do not apply in this output.
30
+ llama_model_loader: - kv 0: general.architecture str = qwen35moe
31
+ llama_model_loader: - kv 1: general.type str = model
32
+ llama_model_loader: - kv 2: general.sampling.top_k i32 = 20
33
+ llama_model_loader: - kv 3: general.sampling.top_p f32 = 0.950000
34
+ llama_model_loader: - kv 4: general.sampling.temp f32 = 1.000000
35
+ llama_model_loader: - kv 5: general.name str = Qwen3.6-35B-A3B
36
+ llama_model_loader: - kv 6: general.basename str = Qwen3.6-35B-A3B
37
+ llama_model_loader: - kv 7: general.quantized_by str = Unsloth
38
+ llama_model_loader: - kv 8: general.size_label str = 35B-A3B
39
+ llama_model_loader: - kv 9: general.license str = apache-2.0
40
+ llama_model_loader: - kv 10: general.license.link str = https://huggingface.co/Qwen/Qwen3.6-3...
41
+ llama_model_loader: - kv 11: general.repo_url str = https://huggingface.co/unsloth
42
+ llama_model_loader: - kv 12: general.base_model.count u32 = 1
43
+ llama_model_loader: - kv 13: general.base_model.0.name str = Qwen3.6 35B A3B
44
+ llama_model_loader: - kv 14: general.base_model.0.organization str = Qwen
45
+ llama_model_loader: - kv 15: general.base_model.0.repo_url str = https://huggingface.co/Qwen/Qwen3.6-3...
46
+ llama_model_loader: - kv 16: general.tags arr[str,3] = ["qwen3_5_moe", "qwen", "image-text-t...
47
+ llama_model_loader: - kv 17: qwen35moe.block_count u32 = 40
48
+ llama_model_loader: - kv 18: qwen35moe.context_length u32 = 262144
49
+ llama_model_loader: - kv 19: qwen35moe.embedding_length u32 = 2048
50
+ llama_model_loader: - kv 20: qwen35moe.attention.head_count u32 = 16
51
+ llama_model_loader: - kv 21: qwen35moe.attention.head_count_kv u32 = 2
52
+ llama_model_loader: - kv 22: qwen35moe.rope.dimension_sections arr[i32,4] = [11, 11, 10, 0]
53
+ llama_model_loader: - kv 23: qwen35moe.rope.freq_base f32 = 10000000.000000
54
+ llama_model_loader: - kv 24: qwen35moe.attention.layer_norm_rms_epsilon f32 = 0.000001
55
+ llama_model_loader: - kv 25: qwen35moe.expert_count u32 = 256
56
+ llama_model_loader: - kv 26: qwen35moe.expert_used_count u32 = 8
57
+ llama_model_loader: - kv 27: qwen35moe.attention.key_length u32 = 256
58
+ llama_model_loader: - kv 28: qwen35moe.attention.value_length u32 = 256
59
+ llama_model_loader: - kv 29: qwen35moe.expert_feed_forward_length u32 = 512
60
+ llama_model_loader: - kv 30: qwen35moe.expert_shared_feed_forward_length u32 = 512
61
+ llama_model_loader: - kv 31: qwen35moe.ssm.conv_kernel u32 = 4
62
+ llama_model_loader: - kv 32: qwen35moe.ssm.state_size u32 = 128
63
+ llama_model_loader: - kv 33: qwen35moe.ssm.group_count u32 = 16
64
+ llama_model_loader: - kv 34: qwen35moe.ssm.time_step_rank u32 = 32
65
+ llama_model_loader: - kv 35: qwen35moe.ssm.inner_size u32 = 4096
66
+ llama_model_loader: - kv 36: qwen35moe.full_attention_interval u32 = 4
67
+ llama_model_loader: - kv 37: qwen35moe.rope.dimension_count u32 = 64
68
+ llama_model_loader: - kv 38: tokenizer.ggml.model str = gpt2
69
+ llama_model_loader: - kv 39: tokenizer.ggml.pre str = qwen35
70
+ llama_model_loader: - kv 40: tokenizer.ggml.tokens arr[str,248320] = ["!", "\"", "#", "$", "%", "&", "'", ...
71
+ llama_model_loader: - kv 41: tokenizer.ggml.token_type arr[i32,248320] = [1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, ...
72
+ llama_model_loader: - kv 42: tokenizer.ggml.merges arr[str,247587] = ["Ġ Ġ", "ĠĠ ĠĠ", "i n", "Ġ t",...
73
+ llama_model_loader: - kv 43: tokenizer.ggml.eos_token_id u32 = 248046
74
+ llama_model_loader: - kv 44: tokenizer.ggml.padding_token_id u32 = 248055
75
+ llama_model_loader: - kv 45: tokenizer.ggml.bos_token_id u32 = 248044
76
+ llama_model_loader: - kv 46: tokenizer.ggml.add_bos_token bool = false
77
+ llama_model_loader: - kv 47: tokenizer.chat_template str = {%- set image_count = namespace(value...
78
+ llama_model_loader: - kv 48: general.quantization_version u32 = 2
79
+ llama_model_loader: - kv 49: general.file_type u32 = 15
80
+ llama_model_loader: - kv 50: quantize.imatrix.file str = Qwen3.6-35B-A3B-GGUF/imatrix_unsloth....
81
+ llama_model_loader: - kv 51: quantize.imatrix.dataset str = unsloth_calibration_Qwen3.6-35B-A3B.txt
82
+ llama_model_loader: - kv 52: quantize.imatrix.entries_count u32 = 510
83
+ llama_model_loader: - kv 53: quantize.imatrix.chunks_count u32 = 76
84
+ llama_model_loader: - type f32: 361 tensors
85
+ llama_model_loader: - type q8_0: 252 tensors
86
+ llama_model_loader: - type q4_K: 78 tensors
87
+ llama_model_loader: - type q5_K: 38 tensors
88
+ llama_model_loader: - type q6_K: 4 tensors
89
+ print_info: file format = GGUF V3 (latest)
90
+ print_info: file type = Q4_K - Medium
91
+ print_info: file size = 20.81 GiB (5.16 BPW)
92
+ load: 0 unused tokens
93
+ load: printing all EOG tokens:
94
+ load: - 248044 ('<|endoftext|>')
95
+ load: - 248046 ('<|im_end|>')
96
+ load: - 248063 ('<|fim_pad|>')
97
+ load: - 248064 ('<|repo_name|>')
98
+ load: - 248065 ('<|file_sep|>')
99
+ load: special tokens cache size = 33
100
+ load: token to piece cache size = 1.7581 MB
101
+ print_info: arch = qwen35moe
102
+ print_info: vocab_only = 0
103
+ print_info: no_alloc = 0
104
+ print_info: n_ctx_train = 262144
105
+ print_info: n_embd = 2048
106
+ print_info: n_embd_inp = 2048
107
+ print_info: n_layer = 40
108
+ print_info: n_head = 16
109
+ print_info: n_head_kv = 2
110
+ print_info: n_rot = 64
111
+ print_info: n_swa = 0
112
+ print_info: is_swa_any = 0
113
+ print_info: n_embd_head_k = 256
114
+ print_info: n_embd_head_v = 256
115
+ print_info: n_gqa = 8
116
+ print_info: n_embd_k_gqa = 512
117
+ print_info: n_embd_v_gqa = 512
118
+ print_info: f_norm_eps = 0.0e+00
119
+ print_info: f_norm_rms_eps = 1.0e-06
120
+ print_info: f_clamp_kqv = 0.0e+00
121
+ print_info: f_max_alibi_bias = 0.0e+00
122
+ print_info: f_logit_scale = 0.0e+00
123
+ print_info: f_attn_scale = 0.0e+00
124
+ print_info: n_ff = 0
125
+ print_info: n_expert = 256
126
+ print_info: n_expert_used = 8
127
+ print_info: n_expert_groups = 0
128
+ print_info: n_group_used = 0
129
+ print_info: causal attn = 1
130
+ print_info: pooling type = -1
131
+ print_info: rope type = 40
132
+ print_info: rope scaling = linear
133
+ print_info: freq_base_train = 10000000.0
134
+ print_info: freq_scale_train = 1
135
+ print_info: n_ctx_orig_yarn = 262144
136
+ print_info: rope_yarn_log_mul = 0.0000
137
+ print_info: rope_finetuned = unknown
138
+ print_info: mrope sections = [11, 11, 10, 0]
139
+ print_info: ssm_d_conv = 4
140
+ print_info: ssm_d_inner = 4096
141
+ print_info: ssm_d_state = 128
142
+ print_info: ssm_dt_rank = 32
143
+ print_info: ssm_n_group = 16
144
+ print_info: ssm_dt_b_c_rms = 0
145
+ print_info: model type = 35B.A3B
146
+ print_info: model params = 34.66 B
147
+ print_info: general.name = Qwen3.6-35B-A3B
148
+ print_info: vocab type = BPE
149
+ print_info: n_vocab = 248320
150
+ print_info: n_merges = 247587
151
+ print_info: BOS token = 248044 '<|endoftext|>'
152
+ print_info: EOS token = 248046 '<|im_end|>'
153
+ print_info: EOT token = 248046 '<|im_end|>'
154
+ print_info: PAD token = 248055 '<|vision_pad|>'
155
+ print_info: LF token = 198 'Ċ'
156
+ print_info: FIM PRE token = 248060 '<|fim_prefix|>'
157
+ print_info: FIM SUF token = 248062 '<|fim_suffix|>'
158
+ print_info: FIM MID token = 248061 '<|fim_middle|>'
159
+ print_info: FIM PAD token = 248063 '<|fim_pad|>'
160
+ print_info: FIM REP token = 248064 '<|repo_name|>'
161
+ print_info: FIM SEP token = 248065 '<|file_sep|>'
162
+ print_info: EOG token = 248044 '<|endoftext|>'
163
+ print_info: EOG token = 248046 '<|im_end|>'
164
+ print_info: EOG token = 248063 '<|fim_pad|>'
165
+ print_info: EOG token = 248064 '<|repo_name|>'
166
+ print_info: EOG token = 248065 '<|file_sep|>'
167
+ print_info: max token length = 256
168
+ load_tensors: loading model tensors, this can take a while... (mmap = true, direct_io = false)
169
+ load_tensors: offloading 0 repeating layers to GPU
170
+ load_tensors: offloaded 0/41 layers to GPU
171
+ load_tensors: CPU_Mapped model buffer size = 20798.80 MiB
172
+ load_tensors: CPU_REPACK model buffer size = 20699.06 MiB
173
+ .................................................................................................
174
+ common_init_result: added <|endoftext|> logit bias = -inf
175
+ common_init_result: added <|im_end|> logit bias = -inf
176
+ common_init_result: added <|fim_pad|> logit bias = -inf
177
+ common_init_result: added <|repo_name|> logit bias = -inf
178
+ common_init_result: added <|file_sep|> logit bias = -inf
179
+ llama_context: constructing llama_context
180
+ llama_context: n_seq_max = 1
181
+ llama_context: n_ctx = 4096
182
+ llama_context: n_ctx_seq = 4096
183
+ llama_context: n_batch = 2048
184
+ llama_context: n_ubatch = 512
185
+ llama_context: causal_attn = 1
186
+ llama_context: flash_attn = auto
187
+ llama_context: kv_unified = false
188
+ llama_context: freq_base = 10000000.0
189
+ llama_context: freq_scale = 1
190
+ llama_context: n_ctx_seq (4096) < n_ctx_train (262144) -- the full capacity of the model will not be utilized
191
+ ggml_metal_init: allocating
192
+ ggml_metal_init: found device: Apple M1 Ultra
193
+ ggml_metal_init: picking default device: Apple M1 Ultra
194
+ ggml_metal_init: use fusion = true
195
+ ggml_metal_init: use concurrency = true
196
+ ggml_metal_init: use graph optimize = true
197
+ llama_context: CPU output buffer size = 0.95 MiB
198
+ llama_kv_cache: CPU KV buffer size = 80.00 MiB
199
+ llama_kv_cache: size = 80.00 MiB ( 4096 cells, 10 layers, 1/1 seqs), K (f16): 40.00 MiB, V (f16): 40.00 MiB
200
+ llama_kv_cache: attn_rot_k = 0, n_embd_head_k_all = 256
201
+ llama_kv_cache: attn_rot_v = 0, n_embd_head_k_all = 256
202
+ llama_memory_recurrent: CPU RS buffer size = 62.81 MiB
203
+ llama_memory_recurrent: size = 62.81 MiB ( 1 cells, 40 layers, 1 seqs), R (f32): 2.81 MiB, S (f32): 60.00 MiB
204
+ sched_reserve: reserving ...
205
+ sched_reserve: Flash Attention was auto, set to enabled
206
+ sched_reserve: resolving fused Gated Delta Net support:
207
+ sched_reserve: fused Gated Delta Net (autoregressive) enabled
208
+ sched_reserve: fused Gated Delta Net (chunked) enabled
209
+ sched_reserve: MTL0 compute buffer size = 16.01 MiB
210
+ sched_reserve: CPU compute buffer size = 493.00 MiB
211
+ sched_reserve: graph nodes = 3729
212
+ sched_reserve: graph splits = 281 (with bs=512), 1 (with bs=1)
213
+ sched_reserve: reserve took 7.47 ms, sched copies = 1
214
+
215
+ === MoE Expert Routing Analysis ===
216
+ Model experts: 256, used per token: 8
217
+ Logging ALL MoE layers (--all-layers)
218
+ Will export expert ranking to: /Users/jdumay/.cache/mesh-llm/moe-rankings/hf-unsloth--Qwen3.6-35B-A3B-GGUF-9280dd353ab587157920d5bd391ada414d84e552-Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf.csv
219
+ Running 10 prompts, generating 32 tokens each
220
+ Logging first 9999 MoE layers per eval
221
+
222
+ Prompt 1/10: <|im_start|>user
223
+ Write a Python function to find the nth Fib...
224
+ collected 2640 layer snapshots (total: 2640)
225
+ Prompt 2/10: <|im_start|>user
226
+ Write a Rust function that reads a CSV file...
227
+ collected 2640 layer snapshots (total: 5280)
228
+ Prompt 3/10: <|im_start|>user
229
+ Explain how a B-tree index works in a datab...
230
+ collected 2640 layer snapshots (total: 7920)
231
+ Prompt 4/10: <|im_start|>user
232
+ If all roses are flowers and some flowers f...
233
+ collected 2640 layer snapshots (total: 10560)
234
+ Prompt 5/10: <|im_start|>user
235
+ A train travels 120km in 2 hours. It then s...
236
+ collected 2640 layer snapshots (total: 13200)
237
+ Prompt 6/10: <|im_start|>user
238
+ Hello! What's the best way to learn a new l...
239
+ collected 2640 layer snapshots (total: 15840)
240
+ Prompt 7/10: <|im_start|>user
241
+ Tell me a joke about programmers.<|im_end|>...
242
+ collected 1840 layer snapshots (total: 17680)
243
+ Prompt 8/10: <|im_start|>user
244
+ Summarize the key differences between TCP a...
245
+ collected 2640 layer snapshots (total: 20320)
246
+ Prompt 9/10: <|im_start|>user
247
+ Translate 'The weather is beautiful today' ...
248
+ collected 2640 layer snapshots (total: 22960)
249
+ Prompt 10/10: <|im_start|>user
250
+ List 5 healthy breakfast options with brief...
251
+ collected 2640 layer snapshots (total: 25600)
252
+
253
+ === Expert Popularity (gate mass, summed across all tokens & logged layers) ===
254
+ Total tokens × layers: 3304225
255
+
256
+ Top 20 experts by gate mass:
257
+ Expert Mass Mass% Selected
258
+ 0 12897.1294 32.06 3277738
259
+ 243 153.0632 0.38 1539
260
+ 89 150.4899 0.37 1298
261
+ 60 149.0570 0.37 1645
262
+ 224 146.0130 0.36 1489
263
+ 64 142.5874 0.35 1426
264
+ 95 140.2351 0.35 1550
265
+ 189 140.1161 0.35 1045
266
+ 229 136.1348 0.34 1522
267
+ 36 136.1316 0.34 1348
268
+ 125 134.2771 0.33 1138
269
+ 108 133.8681 0.33 1271
270
+ 254 133.3297 0.33 1197
271
+ 167 133.2555 0.33 1316
272
+ 43 132.8598 0.33 1652
273
+ 88 132.6322 0.33 1366
274
+ 137 132.2451 0.33 1186
275
+ 165 131.3198 0.33 1255
276
+ 160 129.6081 0.32 1217
277
+ 46 129.0560 0.32 1160
278
+
279
+ Concentration:
280
+ Top 4 experts: 33.2% of total gate mass
281
+ Top 8 experts: 34.6% of total gate mass
282
+ Top 16 experts: 37.3% of total gate mass
283
+ Top 32 experts: 42.3% of total gate mass
284
+ Top 64 experts: 51.8% of total gate mass
285
+
286
+ Exported expert ranking to: /Users/jdumay/.cache/mesh-llm/moe-rankings/hf-unsloth--Qwen3.6-35B-A3B-GGUF-9280dd353ab587157920d5bd391ada414d84e552-Qwen3.6-35B-A3B-UD-Q4_K_XL.gguf.csv
287
+ Use with moe-split: --group-map <file generated from this ranking>
288
+
289
+
290
+ === Group Masking Analysis (best-group capture ratio) ===
291
+ For each group count, what fraction of the unrestricted top-8 mass
292
+ is captured by the best single group?
293
+
294
+ Groups Replicas Exp/Grp Mean P25 P50 P5
295
+ 2 0 128 0.999 1.000 1.000 1.000
296
+ 2 1 128 0.999 1.000 1.000 1.000
297
+ 2 2 128 0.999 1.000 1.000 1.000
298
+ 2 4 128 0.999 1.000 1.000 1.000
299
+ 2 8 128 0.999 1.000 1.000 1.000
300
+
301
+ 4 0 64 0.997 1.000 1.000 1.000
302
+ 4 1 64 0.997 1.000 1.000 1.000
303
+ 4 2 64 0.998 1.000 1.000 1.000
304
+ 4 4 64 0.998 1.000 1.000 1.000
305
+ 4 8 64 0.998 1.000 1.000 1.000
306
+
307
+ 8 0 32 0.997 1.000 1.000 1.000
308
+ 8 1 32 0.997 1.000 1.000 1.000
309
+ 8 2 32 0.997 1.000 1.000 1.000
310
+ 8 4 32 0.997 1.000 1.000 1.000
311
+ 8 8 32 0.997 1.000 1.000 1.000
312
+
313
+ === Interpretation ===
314
+ Mean close to 1.0 = masking barely hurts (best group captures most of top-k mass)
315
+ Mean < 0.7 = significant quality risk from group restriction
316
+ P5 close to 1.0 = even worst-case tokens are OK
317
+ P5 < 0.5 = some tokens will be badly served by any single group
318
+
319
+ === Phase 1b: Masked Generation Quality (logprob comparison) ===
320
+ Testing 4 groups (64 experts/group) vs baseline (all 256 experts)
321
+ Using first 5 prompts, generating 32 tokens each
322
+
323
+ Group 0 (experts 0-63 + 2 hot replicas): avg logprob delta = -0.3751
324
+ Group 1 (experts 64-127 + 2 hot replicas): avg logprob delta = -0.1213
325
+ Group 2 (experts 128-191 + 2 hot replicas): avg logprob delta = -0.1904
326
+ Group 3 (experts 192-255 + 2 hot replicas): avg logprob delta = -0.3067
327
+
328
+ === Interpretation (logprob delta) ===
329
+ Delta near 0.0 = masking barely affects generation quality
330
+ Delta < -0.1 = noticeable quality loss
331
+ Delta < -0.5 = significant degradation
332
+
333
+ ggml_metal_free: deallocating