SmolVLM-Instruct-awq / quantization_config.json
ronantakizawa's picture
Add quantization configuration metadata
3543b6d verified
raw
history blame contribute delete
438 Bytes
{
"quant_method": "awq",
"bits": 4,
"group_size": 128,
"dataset": "lmms-lab/flickr30k",
"calibration_samples": 256,
"model_seqlen": 2048,
"block_name_to_quantize": "LlamaDecoderLayer",
"module_name_preceding_first_block": [
"model.vision_model",
"model.connector"
],
"quantization_tool": "llm-compressor",
"preserved_components": [
"vision_model",
"vision_tower",
"connector",
"lm_head"
]
}