hujiefrank commited on
Commit
423f169
·
verified ·
1 Parent(s): 2a9dedc

update for lower VRAM inference

Browse files
Files changed (1) hide show
  1. README.md +2 -1
README.md CHANGED
@@ -91,7 +91,8 @@ pipe = LongCatImageEditPipeline.from_pretrained(
91
  transformer=transformer,
92
  text_processor=text_processor,
93
  )
94
- pipe.to(device, torch.bfloat16)
 
95
 
96
  generator = torch.Generator("cpu").manual_seed(43)
97
  img = Image.open('assets/test.png')
 
91
  transformer=transformer,
92
  text_processor=text_processor,
93
  )
94
+ # pipe.to(device, torch.bfloat16) # Uncomment for high VRAM devices (Faster inference)
95
+ pipe.enable_model_cpu_offload() # Offload to CPU to save VRAM (Required ~19 GB); slower but prevents OOM
96
 
97
  generator = torch.Generator("cpu").manual_seed(43)
98
  img = Image.open('assets/test.png')