| { | |
| "bits": 4, | |
| "group_size": 128, | |
| "sym": true, | |
| "data_type": "int", | |
| "enable_quanted_input": true, | |
| "enable_minmax_tuning": true, | |
| "seqlen": 512, | |
| "batch_size": 1, | |
| "scale_dtype": "torch.float16", | |
| "lr": 0.005, | |
| "minmax_lr": 0.005, | |
| "gradient_accumulate_steps": 4, | |
| "iters": 200, | |
| "amp": true, | |
| "nsamples": 128, | |
| "low_gpu_mem_usage": false, | |
| "to_quant_block_names": [ | |
| [ | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.0", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.1", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.2", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.3", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.4", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.5", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.6", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.7", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.8", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.9", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.10", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.11", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.12", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.13", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.14", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.15", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.16", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.17", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.18", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.19", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.20", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.21", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.22", | |
| "model.vision_embed_tokens.img_processor.vision_model.encoder.layers.23" | |
| ], | |
| [ | |
| "model.layers.0", | |
| "model.layers.1", | |
| "model.layers.2", | |
| "model.layers.3", | |
| "model.layers.4", | |
| "model.layers.5", | |
| "model.layers.6", | |
| "model.layers.7", | |
| "model.layers.8", | |
| "model.layers.9", | |
| "model.layers.10", | |
| "model.layers.11", | |
| "model.layers.12", | |
| "model.layers.13", | |
| "model.layers.14", | |
| "model.layers.15", | |
| "model.layers.16", | |
| "model.layers.17", | |
| "model.layers.18", | |
| "model.layers.19", | |
| "model.layers.20", | |
| "model.layers.21", | |
| "model.layers.22", | |
| "model.layers.23", | |
| "model.layers.24", | |
| "model.layers.25", | |
| "model.layers.26", | |
| "model.layers.27", | |
| "model.layers.28", | |
| "model.layers.29", | |
| "model.layers.30", | |
| "model.layers.31" | |
| ] | |
| ], | |
| "enable_norm_bias_tuning": false, | |
| "dataset": "liuhaotian/llava_conv_58k", | |
| "autoround_version": "0.4.0.dev", | |
| "quant_method": "auto-round" | |
| } |