SystemAdmin123 commited on
Commit
7bf1cc0
·
verified ·
1 Parent(s): 91ab16a

Training in progress, step 40

Browse files
axolotl_config.yaml ADDED
@@ -0,0 +1,50 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ base_model: facebook/opt-125m
2
+ batch_size: 128
3
+ bf16: true
4
+ chat_template: tokenizer_default_fallback_alpaca
5
+ datasets:
6
+ - format: custom
7
+ path: https://gradients.s3.eu-north-1.amazonaws.com/7575a7e5b38479e3_train_data.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVVZOOA7SA4UOFLPI%2F20250209%2Feu-north-1%2Fs3%2Faws4_request&X-Amz-Date=20250209T024133Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=5b334fbc5cfa88197315bf2cd74f2114e46bdb6bf8716ba3c96b73b8c3e6ef9a
8
+ type:
9
+ field_instruction: instruction
10
+ field_output: output
11
+ format: '{instruction}'
12
+ no_input_format: '{instruction}'
13
+ system_format: '{system}'
14
+ system_prompt: ''
15
+ device_map: auto
16
+ eval_sample_packing: false
17
+ eval_steps: 40
18
+ flash_attention: true
19
+ gradient_checkpointing: true
20
+ group_by_length: true
21
+ hub_model_id: SystemAdmin123/d778c1e6-0d20-4dd2-81e1-efc24a74b590
22
+ hub_strategy: checkpoint
23
+ learning_rate: 0.0002
24
+ logging_steps: 10
25
+ lr_scheduler: cosine
26
+ max_steps: 10000
27
+ micro_batch_size: 32
28
+ model_type: AutoModelForCausalLM
29
+ num_epochs: 100
30
+ optimizer: adamw_bnb_8bit
31
+ output_dir: /root/.sn56/axolotl/tmp/d778c1e6-0d20-4dd2-81e1-efc24a74b590
32
+ pad_to_sequence_len: true
33
+ resize_token_embeddings_to_32x: false
34
+ sample_packing: true
35
+ save_steps: 40
36
+ save_total_limit: 2
37
+ sequence_len: 2048
38
+ tokenizer_type: GPT2TokenizerFast
39
+ torch_dtype: bf16
40
+ training_args_kwargs:
41
+ hub_private_repo: true
42
+ trust_remote_code: true
43
+ val_set_size: 0.1
44
+ wandb_entity: ''
45
+ wandb_mode: online
46
+ wandb_name: facebook/opt-125m-https://gradients.s3.eu-north-1.amazonaws.com/7575a7e5b38479e3_train_data.json?X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=AKIAVVZOOA7SA4UOFLPI%2F20250209%2Feu-north-1%2Fs3%2Faws4_request&X-Amz-Date=20250209T024133Z&X-Amz-Expires=604800&X-Amz-SignedHeaders=host&X-Amz-Signature=5b334fbc5cfa88197315bf2cd74f2114e46bdb6bf8716ba3c96b73b8c3e6ef9a
47
+ wandb_project: Gradients-On-Demand
48
+ wandb_run: your_name
49
+ wandb_runid: default
50
+ warmup_ratio: 0.05
config.json ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_name_or_path": "facebook/opt-125m",
3
+ "_remove_final_layer_norm": false,
4
+ "activation_dropout": 0.0,
5
+ "activation_function": "relu",
6
+ "architectures": [
7
+ "OPTForCausalLM"
8
+ ],
9
+ "attention_dropout": 0.0,
10
+ "bos_token_id": 2,
11
+ "do_layer_norm_before": true,
12
+ "dropout": 0.1,
13
+ "enable_bias": true,
14
+ "eos_token_id": 2,
15
+ "ffn_dim": 3072,
16
+ "hidden_size": 768,
17
+ "init_std": 0.02,
18
+ "layer_norm_elementwise_affine": true,
19
+ "layerdrop": 0.0,
20
+ "max_position_embeddings": 2048,
21
+ "model_type": "opt",
22
+ "num_attention_heads": 12,
23
+ "num_hidden_layers": 12,
24
+ "pad_token_id": 1,
25
+ "prefix": "</s>",
26
+ "torch_dtype": "bfloat16",
27
+ "transformers_version": "4.47.1",
28
+ "use_cache": false,
29
+ "vocab_size": 50272,
30
+ "word_embed_proj_dim": 768
31
+ }
merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:65b22bdf43d38b61cc704a197114972c4a1ec34ec00733695846e8aa16fe9346
3
+ size 250501160
special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "</s>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "</s>",
11
+ "lstrip": false,
12
+ "normalized": true,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<pad>",
18
+ "lstrip": false,
19
+ "normalized": true,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "</s>",
25
+ "lstrip": false,
26
+ "normalized": true,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_config.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "add_bos_token": true,
3
+ "add_prefix_space": false,
4
+ "added_tokens_decoder": {
5
+ "1": {
6
+ "content": "<pad>",
7
+ "lstrip": false,
8
+ "normalized": true,
9
+ "rstrip": false,
10
+ "single_word": false,
11
+ "special": true
12
+ },
13
+ "2": {
14
+ "content": "</s>",
15
+ "lstrip": false,
16
+ "normalized": true,
17
+ "rstrip": false,
18
+ "single_word": false,
19
+ "special": true
20
+ }
21
+ },
22
+ "bos_token": "</s>",
23
+ "chat_template": "{% for message in messages %}{% if message['role'] == 'user' %}{{ '### Instruction: ' + message['content'] + '\n\n' }}{% elif message['role'] == 'assistant' %}{{ '### Response: ' + message['content'] + eos_token}}{% endif %}{% endfor %}",
24
+ "clean_up_tokenization_spaces": false,
25
+ "eos_token": "</s>",
26
+ "errors": "replace",
27
+ "extra_special_tokens": {},
28
+ "model_max_length": 1000000000000000019884624838656,
29
+ "pad_token": "<pad>",
30
+ "tokenizer_class": "GPT2Tokenizer",
31
+ "unk_token": "</s>",
32
+ "use_fast": true
33
+ }
training_args.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:edcbfeaed8172c5acebdf55d3b1d6e875f976c0ce9e9f35f87ebdee1bf4f0252
3
+ size 7032
vocab.json ADDED
The diff for this file is too large to render. See raw diff