KotshinZ commited on
Commit
da4ccf7
·
verified ·
1 Parent(s): 780ea3f

Model save

Browse files
README.md CHANGED
@@ -1,11 +1,9 @@
1
  ---
2
  base_model: openai-community/gpt2
3
- datasets: HuggingFaceH4/Bespoke-Stratos-17k
4
  library_name: transformers
5
  model_name: gpt2-SFT
6
  tags:
7
  - generated_from_trainer
8
- - open-r1
9
  - trl
10
  - sft
11
  licence: license
@@ -13,7 +11,7 @@ licence: license
13
 
14
  # Model Card for gpt2-SFT
15
 
16
- This model is a fine-tuned version of [openai-community/gpt2](https://huggingface.co/openai-community/gpt2) on the [HuggingFaceH4/Bespoke-Stratos-17k](https://huggingface.co/datasets/HuggingFaceH4/Bespoke-Stratos-17k) dataset.
17
  It has been trained using [TRL](https://github.com/huggingface/trl).
18
 
19
  ## Quick start
@@ -29,14 +27,14 @@ print(output["generated_text"])
29
 
30
  ## Training procedure
31
 
32
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/s18574s18574-/huggingface/runs/eptavvvk)
33
 
34
 
35
  This model was trained with SFT.
36
 
37
  ### Framework versions
38
 
39
- - TRL: 0.16.0.dev0
40
  - Transformers: 4.50.0.dev0
41
  - Pytorch: 2.5.1
42
  - Datasets: 3.3.2
 
1
  ---
2
  base_model: openai-community/gpt2
 
3
  library_name: transformers
4
  model_name: gpt2-SFT
5
  tags:
6
  - generated_from_trainer
 
7
  - trl
8
  - sft
9
  licence: license
 
11
 
12
  # Model Card for gpt2-SFT
13
 
14
+ This model is a fine-tuned version of [openai-community/gpt2](https://huggingface.co/openai-community/gpt2).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/s18574s18574-/huggingface/runs/92j1a6yg)
31
 
32
 
33
  This model was trained with SFT.
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.15.2
38
  - Transformers: 4.50.0.dev0
39
  - Pytorch: 2.5.1
40
  - Datasets: 3.3.2
all_results.json CHANGED
@@ -4,10 +4,10 @@
4
  "eval_samples": 100,
5
  "eval_samples_per_second": 239.436,
6
  "eval_steps_per_second": 60.186,
7
- "total_flos": 4.794029428021658e+16,
8
- "train_loss": 1.842788679657142,
9
- "train_runtime": 1095.7674,
10
- "train_samples": 16610,
11
- "train_samples_per_second": 83.719,
12
- "train_steps_per_second": 2.616
13
  }
 
4
  "eval_samples": 100,
5
  "eval_samples_per_second": 239.436,
6
  "eval_steps_per_second": 60.186,
7
+ "total_flos": 15459466346496.0,
8
+ "train_loss": 3.207212707142771,
9
+ "train_runtime": 884.063,
10
+ "train_samples": 19883,
11
+ "train_samples_per_second": 23.43,
12
+ "train_steps_per_second": 0.366
13
  }
config.json CHANGED
@@ -1,5 +1,4 @@
1
  {
2
- "_name_or_path": "openai-community/gpt2",
3
  "activation_function": "gelu_new",
4
  "architectures": [
5
  "GPT2LMHeadModel"
@@ -34,6 +33,6 @@
34
  },
35
  "torch_dtype": "bfloat16",
36
  "transformers_version": "4.50.0.dev0",
37
- "use_cache": true,
38
  "vocab_size": 50257
39
  }
 
1
  {
 
2
  "activation_function": "gelu_new",
3
  "architectures": [
4
  "GPT2LMHeadModel"
 
33
  },
34
  "torch_dtype": "bfloat16",
35
  "transformers_version": "4.50.0.dev0",
36
+ "use_cache": false,
37
  "vocab_size": 50257
38
  }
model.safetensors CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:d7a4d3e9366758ab67ab6631e5f3cf745e2ecf43c3dcbaa713d59efbb4912848
3
- size 326089656
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1f42938a2cbb5158523dd7829a5530450f1a8f4d17462bdb98f4adbfa962c0cc
3
+ size 248894656
tokenizer_config.json CHANGED
@@ -11,7 +11,6 @@
11
  }
12
  },
13
  "bos_token": "<|endoftext|>",
14
- "chat_template": "{% for message in messages %}{{ message['role'] }}: {{ message['content'] }}\n{% endfor %}",
15
  "clean_up_tokenization_spaces": false,
16
  "eos_token": "<|endoftext|>",
17
  "extra_special_tokens": {},
 
11
  }
12
  },
13
  "bos_token": "<|endoftext|>",
 
14
  "clean_up_tokenization_spaces": false,
15
  "eos_token": "<|endoftext|>",
16
  "extra_special_tokens": {},
train_results.json CHANGED
@@ -1,8 +1,8 @@
1
  {
2
- "total_flos": 4.794029428021658e+16,
3
- "train_loss": 1.842788679657142,
4
- "train_runtime": 1095.7674,
5
- "train_samples": 16610,
6
- "train_samples_per_second": 83.719,
7
- "train_steps_per_second": 2.616
8
  }
 
1
  {
2
+ "total_flos": 15459466346496.0,
3
+ "train_loss": 3.207212707142771,
4
+ "train_runtime": 884.063,
5
+ "train_samples": 19883,
6
+ "train_samples_per_second": 23.43,
7
+ "train_steps_per_second": 0.366
8
  }
trainer_state.json CHANGED
The diff for this file is too large to render. See raw diff
 
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:895d105dd61e8ebfe31e8e0469aec7ef17c2b3df2ae35e198d4214d31665f4d7
3
- size 7352
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:047c9541a64228bde09a78f0ca1cf98bb5e12b59574cf6d5c1d47e96a8bc50e1
3
+ size 7288