odats commited on
Commit
efb856e
·
verified ·
1 Parent(s): 21daf6b

Training in progress, step 100

Browse files
Files changed (4) hide show
  1. README.md +5 -5
  2. config.json +52 -84
  3. model.safetensors +3 -0
  4. training_args.bin +1 -1
README.md CHANGED
@@ -1,17 +1,17 @@
1
  ---
2
- base_model: google/gemma-3-4b-it
3
  library_name: transformers
4
  model_name: nmt_21
5
  tags:
6
  - generated_from_trainer
7
- - trl
8
  - grpo
 
9
  licence: license
10
  ---
11
 
12
  # Model Card for nmt_21
13
 
14
- This model is a fine-tuned version of [google/gemma-3-4b-it](https://huggingface.co/google/gemma-3-4b-it).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
@@ -27,14 +27,14 @@ print(output["generated_text"])
27
 
28
  ## Training procedure
29
 
30
- [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/oleg-dats/nmt/runs/syf13vuv)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
 
35
  ### Framework versions
36
 
37
- - TRL: 0.23.1
38
  - Transformers: 4.56.2
39
  - Pytorch: 2.5.1+cu124
40
  - Datasets: 4.1.1
 
1
  ---
2
+ base_model: google/gemma-3-1b-it
3
  library_name: transformers
4
  model_name: nmt_21
5
  tags:
6
  - generated_from_trainer
 
7
  - grpo
8
+ - trl
9
  licence: license
10
  ---
11
 
12
  # Model Card for nmt_21
13
 
14
+ This model is a fine-tuned version of [google/gemma-3-1b-it](https://huggingface.co/google/gemma-3-1b-it).
15
  It has been trained using [TRL](https://github.com/huggingface/trl).
16
 
17
  ## Quick start
 
27
 
28
  ## Training procedure
29
 
30
+ [<img src="https://raw.githubusercontent.com/wandb/assets/main/wandb-github-badge-28.svg" alt="Visualize in Weights & Biases" width="150" height="24"/>](https://wandb.ai/oleg-dats/nmt/runs/f4rb3j16)
31
 
32
 
33
  This model was trained with GRPO, a method introduced in [DeepSeekMath: Pushing the Limits of Mathematical Reasoning in Open Language Models](https://huggingface.co/papers/2402.03300).
34
 
35
  ### Framework versions
36
 
37
+ - TRL: 0.23.0
38
  - Transformers: 4.56.2
39
  - Pytorch: 2.5.1+cu124
40
  - Datasets: 4.1.1
config.json CHANGED
@@ -1,94 +1,62 @@
1
  {
 
2
  "architectures": [
3
- "Gemma3ForConditionalGeneration"
4
  ],
5
- "boi_token_index": 255999,
 
 
6
  "bos_token_id": 2,
 
7
  "dtype": "bfloat16",
8
- "eoi_token_index": 256000,
9
  "eos_token_id": 1,
10
- "image_token_index": 262144,
 
 
 
11
  "initializer_range": 0.02,
12
- "mm_tokens_per_image": 256,
13
- "model_type": "gemma3",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
14
  "pad_token_id": 0,
15
- "text_config": {
16
- "_sliding_window_pattern": 6,
17
- "attention_bias": false,
18
- "attention_dropout": 0.0,
19
- "attn_logit_softcapping": null,
20
- "final_logit_softcapping": null,
21
- "head_dim": 256,
22
- "hidden_activation": "gelu_pytorch_tanh",
23
- "hidden_size": 2560,
24
- "initializer_range": 0.02,
25
- "intermediate_size": 10240,
26
- "layer_types": [
27
- "sliding_attention",
28
- "sliding_attention",
29
- "sliding_attention",
30
- "sliding_attention",
31
- "sliding_attention",
32
- "full_attention",
33
- "sliding_attention",
34
- "sliding_attention",
35
- "sliding_attention",
36
- "sliding_attention",
37
- "sliding_attention",
38
- "full_attention",
39
- "sliding_attention",
40
- "sliding_attention",
41
- "sliding_attention",
42
- "sliding_attention",
43
- "sliding_attention",
44
- "full_attention",
45
- "sliding_attention",
46
- "sliding_attention",
47
- "sliding_attention",
48
- "sliding_attention",
49
- "sliding_attention",
50
- "full_attention",
51
- "sliding_attention",
52
- "sliding_attention",
53
- "sliding_attention",
54
- "sliding_attention",
55
- "sliding_attention",
56
- "full_attention",
57
- "sliding_attention",
58
- "sliding_attention",
59
- "sliding_attention",
60
- "sliding_attention"
61
- ],
62
- "max_position_embeddings": 131072,
63
- "model_type": "gemma3_text",
64
- "num_attention_heads": 8,
65
- "num_hidden_layers": 34,
66
- "num_key_value_heads": 4,
67
- "query_pre_attn_scalar": 256,
68
- "rms_norm_eps": 1e-06,
69
- "rope_local_base_freq": 10000.0,
70
- "rope_scaling": {
71
- "factor": 8.0,
72
- "rope_type": "linear"
73
- },
74
- "rope_theta": 1000000.0,
75
- "sliding_window": 1024,
76
- "use_cache": true,
77
- "vocab_size": 262208
78
- },
79
  "transformers_version": "4.56.2",
80
- "vision_config": {
81
- "attention_dropout": 0.0,
82
- "hidden_act": "gelu_pytorch_tanh",
83
- "hidden_size": 1152,
84
- "image_size": 896,
85
- "intermediate_size": 4304,
86
- "layer_norm_eps": 1e-06,
87
- "model_type": "siglip_vision_model",
88
- "num_attention_heads": 16,
89
- "num_channels": 3,
90
- "num_hidden_layers": 27,
91
- "patch_size": 14,
92
- "vision_use_head": false
93
- }
94
  }
 
1
  {
2
+ "_sliding_window_pattern": 6,
3
  "architectures": [
4
+ "Gemma3ForCausalLM"
5
  ],
6
+ "attention_bias": false,
7
+ "attention_dropout": 0.0,
8
+ "attn_logit_softcapping": null,
9
  "bos_token_id": 2,
10
+ "cache_implementation": "hybrid",
11
  "dtype": "bfloat16",
 
12
  "eos_token_id": 1,
13
+ "final_logit_softcapping": null,
14
+ "head_dim": 256,
15
+ "hidden_activation": "gelu_pytorch_tanh",
16
+ "hidden_size": 1152,
17
  "initializer_range": 0.02,
18
+ "intermediate_size": 6912,
19
+ "layer_types": [
20
+ "sliding_attention",
21
+ "sliding_attention",
22
+ "sliding_attention",
23
+ "sliding_attention",
24
+ "sliding_attention",
25
+ "full_attention",
26
+ "sliding_attention",
27
+ "sliding_attention",
28
+ "sliding_attention",
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "full_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "sliding_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "full_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "sliding_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "full_attention",
44
+ "sliding_attention",
45
+ "sliding_attention"
46
+ ],
47
+ "max_position_embeddings": 32768,
48
+ "model_type": "gemma3_text",
49
+ "num_attention_heads": 4,
50
+ "num_hidden_layers": 26,
51
+ "num_key_value_heads": 1,
52
  "pad_token_id": 0,
53
+ "query_pre_attn_scalar": 256,
54
+ "rms_norm_eps": 1e-06,
55
+ "rope_local_base_freq": 10000,
56
+ "rope_scaling": null,
57
+ "rope_theta": 1000000,
58
+ "sliding_window": 512,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
59
  "transformers_version": "4.56.2",
60
+ "use_cache": true,
61
+ "vocab_size": 262144
 
 
 
 
 
 
 
 
 
 
 
 
62
  }
model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f5b69b8ecb5e61e6e3bf4f72db44e88c98f557be6a92255fddb887b7f0d4f97f
3
+ size 1999811208
training_args.bin CHANGED
@@ -1,3 +1,3 @@
1
  version https://git-lfs.github.com/spec/v1
2
- oid sha256:1962c6464b1c9286492998d5209bebd4b71eaeb55518cd47e972fb26d1807131
3
  size 6776
 
1
  version https://git-lfs.github.com/spec/v1
2
+ oid sha256:32d82cec56e20a42331cadab044e78a230fceda57b3097692eaf8e50ff1609a0
3
  size 6776