faster-transformers-scripts / memory-requirements-quantized-vs-dequantized.py
ariG23498's picture
ariG23498 HF Staff
Create memory-requirements-quantized-vs-dequantized.py
77f96d0 verified
# Measure and compare VRAM with and without MXFP4 dequantize
import gc
import torch
from transformers import AutoModelForCausalLM, Mxfp4Config
MODEL_ID = "openai/gpt-oss-20b"
DEVICE = "cuda:0"
def get_used_gb():
free, total = torch.cuda.mem_get_info()
return (total - free) / (1024**3), total / (1024**3)
def clear_memory():
del_vars = [k for k in list(globals().keys()) if k.startswith("_tmp_")]
for k in del_vars:
globals().pop(k, None)
gc.collect()
torch.cuda.empty_cache()
torch.cuda.synchronize()
assert torch.cuda.is_available(), "CUDA is not available."
# --- Dequantized (heavier) ---
clear_memory()
before_deq_used, total_gb = get_used_gb()
qconf = Mxfp4Config(dequantize=True)
model_deq = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype="auto",
device_map=DEVICE,
quantization_config=qconf,
).eval()
after_deq_used, _ = get_used_gb()
# --- Quantized (lighter) ---
del model_deq
clear_memory()
before_q_used, _ = get_used_gb()
model_q = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype="auto",
device_map=DEVICE,
).eval()
after_q_used, _ = get_used_gb()
print(f"[dequantized] used before: {before_deq_used:.2f} GB, after: {after_deq_used:.2f} GB / total {total_gb:.2f} GB")
print(f"[quantized ] used before: {before_q_used:.2f} GB, after: {after_q_used:.2f} GB / total {total_gb:.2f} GB")
# Make these available for plotting
mx_results = {
"total_gb": total_gb,
"after_dequantized_gb": after_deq_used,
"after_quantized_gb": after_q_used,
}
# Outputs:
# [dequantized] used before: 0.41 GB, after: 43.18 GB / total 79.25 GB
# [quantized ] used before: 0.49 GB, after: 13.37 GB / total 79.25 GB