|
|
import torch
|
|
|
from transformers import AutoModelForCausalLM, AutoTokenizer
|
|
|
import os
|
|
|
|
|
|
|
|
|
|
|
|
input_dir = "A:\LLM\.cache\huggingface\hub\models--wzhouad--gemma-2-9b-it-WPO-HB"
|
|
|
|
|
|
|
|
|
output_dir = "A:\LLM\.cache\huggingface\hub\models--wzhouad--gemma-2-9b-it-WPO-HB_FP16"
|
|
|
|
|
|
|
|
|
|
|
|
if not os.path.exists(output_dir):
|
|
|
os.makedirs(output_dir)
|
|
|
|
|
|
|
|
|
print(f"Loading tokenizer from {input_dir}...")
|
|
|
tokenizer = AutoTokenizer.from_pretrained(input_dir)
|
|
|
|
|
|
|
|
|
print(f"Loading FP32 model from {input_dir}...")
|
|
|
model = AutoModelForCausalLM.from_pretrained(
|
|
|
input_dir,
|
|
|
torch_dtype=torch.float32,
|
|
|
device_map="cpu"
|
|
|
|
|
|
)
|
|
|
|
|
|
|
|
|
print("Converting model to FP16 and saving to disk...")
|
|
|
model.half().save_pretrained(
|
|
|
output_dir,
|
|
|
safe_serialization=True,
|
|
|
max_shard_size="5GB"
|
|
|
)
|
|
|
tokenizer.save_pretrained(output_dir)
|
|
|
|
|
|
print(f"Model successfully converted and saved to {output_dir}")
|
|
|
print("You can now use this new FP16 model in your mergekit config.yaml.") |