MickJ's picture
Sync clean ModelOpt weights from BBuf/flux2-dev-modelopt-fp8-sglang-transformer
9d09357 verified
{
"_class_name": "Flux2Transformer2DModel",
"_diffusers_version": "0.37.0",
"_name_or_path": "/root/.cache/huggingface/hub/models--black-forest-labs--FLUX.2-dev/snapshots/26afe3a78bb242c0a8bb181dcc8937bb16e5c66c/transformer",
"attention_head_dim": 128,
"axes_dims_rope": [
32,
32,
32,
32
],
"eps": 1e-06,
"guidance_embeds": true,
"in_channels": 128,
"joint_attention_dim": 15360,
"mlp_ratio": 3.0,
"num_attention_heads": 48,
"num_layers": 8,
"num_single_layers": 48,
"out_channels": null,
"patch_size": 1,
"rope_theta": 2000,
"timestep_guidance_channels": 256,
"quantization_config": {
"config_groups": {
"group_0": {
"input_activations": {
"dynamic": false,
"num_bits": 8,
"type": "float"
},
"weights": {
"dynamic": false,
"num_bits": 8,
"type": "float"
},
"targets": [
"Linear"
]
}
},
"ignore": [],
"producer": {
"name": "modelopt",
"version": "0.42.0"
},
"quant_algo": "FP8",
"quant_method": "modelopt"
}
}