lvj commited on
Commit
eb0f3ac
·
verified ·
1 Parent(s): 89b30f3

Upload qat_sft.py with huggingface_hub

Browse files
Files changed (1) hide show
  1. qat_sft.py +18 -10
qat_sft.py CHANGED
@@ -7,7 +7,7 @@ import re
7
  import torch
8
  import torch.distributed as dist
9
 
10
- from datasets import load_dataset
11
  from torch.distributed.elastic.multiprocessing.api import record
12
  from torch.distributed.fsdp import (
13
  FullOptimStateDictConfig,
@@ -23,7 +23,7 @@ from torchao.prototype.parq.api import QuantConfig, create_optimizer
23
  from torchao.quantization.qat import QATConfig, IntxFakeQuantizeConfig
24
  from torchao.quantization.quant_api import quantize_
25
  from transformers import AutoTokenizer, Trainer
26
- from transformers.trainer import OPTIMIZER_NAME_BIN
27
  from transformers.trainer_utils import get_last_checkpoint
28
  from trl import SFTConfig, SFTTrainer, ScriptArguments, TrlParser
29
  from dataclasses import field
@@ -116,6 +116,8 @@ class PARQTrainer(Trainer):
116
  "Only single GPU is supported for torchao_convert from FSDP checkpoint"
117
  )
118
  optim_path = os.path.join(resume_from_checkpoint, OPTIMIZER_NAME_BIN)
 
 
119
  assert os.path.isfile(optim_path), f"Optimizer file not found at {optim_path}"
120
 
121
  if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
@@ -137,7 +139,7 @@ class PARQTrainer(Trainer):
137
  optimizer.load_state_dict(optimizer_state_dict)
138
 
139
  self.accelerator.print(
140
- f"Loaded FSDP model and optimizer state from {resume_from_checkpoint}"
141
  )
142
  return True
143
 
@@ -292,6 +294,11 @@ def tokenize_fn(examples, tokenizer):
292
  }
293
 
294
 
 
 
 
 
 
295
  @record
296
  def train(sft_config, custom_args, parq_config, world_size):
297
  tokenizer = AutoTokenizer.from_pretrained(custom_args.model_name_or_path)
@@ -337,14 +344,15 @@ def train(sft_config, custom_args, parq_config, world_size):
337
  num_shards=sft_config.dataloader_num_workers * world_size,
338
  )
339
 
 
 
 
 
 
 
340
  if not custom_args.enable_thinking:
341
  with sft_config.main_process_first(desc="disable thinking mode"):
342
- dataset = dataset.map(
343
- lambda x: x.update(
344
- {"chat_template_kwargs": {"enable_thinking": False}}
345
- ),
346
- num_proc=sft_config.dataset_num_proc,
347
- )
348
 
349
  if prompt_format_fn is not None:
350
  sft_config.dataset_text_field = "messages"
@@ -361,12 +369,12 @@ def train(sft_config, custom_args, parq_config, world_size):
361
  dataset = dataset.map(
362
  prompt_format_fn,
363
  batched=True,
364
- num_proc=sft_config.dataset_num_proc,
365
  remove_columns=[
366
  c
367
  for c in dataset.column_names
368
  if c != sft_config.dataset_text_field
369
  ],
 
370
  )
371
 
372
  if parq_config.weight_bits is not None:
 
7
  import torch
8
  import torch.distributed as dist
9
 
10
+ from datasets import Dataset, load_dataset
11
  from torch.distributed.elastic.multiprocessing.api import record
12
  from torch.distributed.fsdp import (
13
  FullOptimStateDictConfig,
 
23
  from torchao.quantization.qat import QATConfig, IntxFakeQuantizeConfig
24
  from torchao.quantization.quant_api import quantize_
25
  from transformers import AutoTokenizer, Trainer
26
+ from transformers.trainer import OPTIMIZER_NAME, OPTIMIZER_NAME_BIN
27
  from transformers.trainer_utils import get_last_checkpoint
28
  from trl import SFTConfig, SFTTrainer, ScriptArguments, TrlParser
29
  from dataclasses import field
 
116
  "Only single GPU is supported for torchao_convert from FSDP checkpoint"
117
  )
118
  optim_path = os.path.join(resume_from_checkpoint, OPTIMIZER_NAME_BIN)
119
+ if not os.path.isfile(optim_path):
120
+ optim_path = os.path.join(resume_from_checkpoint, OPTIMIZER_NAME)
121
  assert os.path.isfile(optim_path), f"Optimizer file not found at {optim_path}"
122
 
123
  if isinstance(resume_from_checkpoint, bool) and resume_from_checkpoint:
 
139
  optimizer.load_state_dict(optimizer_state_dict)
140
 
141
  self.accelerator.print(
142
+ f"Loaded model and optimizer state from {resume_from_checkpoint}"
143
  )
144
  return True
145
 
 
294
  }
295
 
296
 
297
+ def disable_thinking(examples):
298
+ examples.setdefault("chat_template_kwargs", {})["enable_thinking"] = False
299
+ return examples
300
+
301
+
302
  @record
303
  def train(sft_config, custom_args, parq_config, world_size):
304
  tokenizer = AutoTokenizer.from_pretrained(custom_args.model_name_or_path)
 
344
  num_shards=sft_config.dataloader_num_workers * world_size,
345
  )
346
 
347
+ map_kwargs = (
348
+ {"num_proc": sft_config.dataset_num_proc}
349
+ if isinstance(dataset, Dataset)
350
+ else {}
351
+ )
352
+
353
  if not custom_args.enable_thinking:
354
  with sft_config.main_process_first(desc="disable thinking mode"):
355
+ dataset = dataset.map(disable_thinking, **map_kwargs)
 
 
 
 
 
356
 
357
  if prompt_format_fn is not None:
358
  sft_config.dataset_text_field = "messages"
 
369
  dataset = dataset.map(
370
  prompt_format_fn,
371
  batched=True,
 
372
  remove_columns=[
373
  c
374
  for c in dataset.column_names
375
  if c != sft_config.dataset_text_field
376
  ],
377
+ **map_kwargs,
378
  )
379
 
380
  if parq_config.weight_bits is not None: