Testament200156 commited on
Commit
f82d9d1
·
1 Parent(s): 425003e

Upload 29 files

Browse files
Files changed (29) hide show
  1. .gitattributes +4 -0
  2. megdgmma3-thinking-DirectLoRA/GGUF/MedGemma3-Thinking-DirectLoRA.mmproj-f16.gguf +3 -0
  3. megdgmma3-thinking-DirectLoRA/LoRAcapture.PNG +3 -0
  4. megdgmma3-thinking-DirectLoRA/added_tokens.json +3 -0
  5. megdgmma3-thinking-DirectLoRA/chat_template.jinja +47 -0
  6. megdgmma3-thinking-DirectLoRA/config.json +125 -0
  7. megdgmma3-thinking-DirectLoRA/generation_config.json +10 -0
  8. megdgmma3-thinking-DirectLoRA/lorafiles/LoRA-URL.txt +1 -0
  9. megdgmma3-thinking-DirectLoRA/lorafiles/adapter_config.json +42 -0
  10. megdgmma3-thinking-DirectLoRA/lorafiles/adapter_model.safetensors +3 -0
  11. megdgmma3-thinking-DirectLoRA/model-00001-of-00012.safetensors +3 -0
  12. megdgmma3-thinking-DirectLoRA/model-00002-of-00012.safetensors +3 -0
  13. megdgmma3-thinking-DirectLoRA/model-00003-of-00012.safetensors +3 -0
  14. megdgmma3-thinking-DirectLoRA/model-00004-of-00012.safetensors +3 -0
  15. megdgmma3-thinking-DirectLoRA/model-00005-of-00012.safetensors +3 -0
  16. megdgmma3-thinking-DirectLoRA/model-00006-of-00012.safetensors +3 -0
  17. megdgmma3-thinking-DirectLoRA/model-00007-of-00012.safetensors +3 -0
  18. megdgmma3-thinking-DirectLoRA/model-00008-of-00012.safetensors +3 -0
  19. megdgmma3-thinking-DirectLoRA/model-00009-of-00012.safetensors +3 -0
  20. megdgmma3-thinking-DirectLoRA/model-00010-of-00012.safetensors +3 -0
  21. megdgmma3-thinking-DirectLoRA/model-00011-of-00012.safetensors +3 -0
  22. megdgmma3-thinking-DirectLoRA/model-00012-of-00012.safetensors +3 -0
  23. megdgmma3-thinking-DirectLoRA/model.safetensors.index.json +0 -0
  24. megdgmma3-thinking-DirectLoRA/preprocessor_config.json +29 -0
  25. megdgmma3-thinking-DirectLoRA/processor_config.json +4 -0
  26. megdgmma3-thinking-DirectLoRA/special_tokens_map.json +33 -0
  27. megdgmma3-thinking-DirectLoRA/tokenizer.json +3 -0
  28. megdgmma3-thinking-DirectLoRA/tokenizer.model +3 -0
  29. megdgmma3-thinking-DirectLoRA/tokenizer_config.json +0 -0
.gitattributes CHANGED
@@ -39,3 +39,7 @@ medgemma3-thinking.mmproj-f16.gguf filter=lfs diff=lfs merge=lfs -text
39
  medgemma3-thinking.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
40
  medgemma3-thinking/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
  MedGemma3-thinking.gguf filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
39
  medgemma3-thinking.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
40
  medgemma3-thinking/tokenizer.json filter=lfs diff=lfs merge=lfs -text
41
  MedGemma3-thinking.gguf filter=lfs diff=lfs merge=lfs -text
42
+ megdgmma3-thinking-DirectLoRA/GGUF/MedGemma3-Thinking-DirectLoRA.mmproj-f16.gguf filter=lfs diff=lfs merge=lfs -text
43
+ megdgmma3-thinking-DirectLoRA/GGUF/MedGemma3-Thinking-DirectLoRA.q8_0.gguf filter=lfs diff=lfs merge=lfs -text
44
+ megdgmma3-thinking-DirectLoRA/LoRAcapture.PNG filter=lfs diff=lfs merge=lfs -text
45
+ megdgmma3-thinking-DirectLoRA/tokenizer.json filter=lfs diff=lfs merge=lfs -text
megdgmma3-thinking-DirectLoRA/GGUF/MedGemma3-Thinking-DirectLoRA.mmproj-f16.gguf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:85b8335f75bf32b7fe97552a096021d380f486307ad3e60e997a2a550af71898
3
+ size 857739296
megdgmma3-thinking-DirectLoRA/LoRAcapture.PNG ADDED

Git LFS Details

  • SHA256: 5da85ca8a49bf7f7a2ecfe8b2f1774345cba3fcacddb966e599df86815afc3d8
  • Pointer size: 131 Bytes
  • Size of remote file: 185 kB
megdgmma3-thinking-DirectLoRA/added_tokens.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ {
2
+ "<image_soft_token>": 262144
3
+ }
megdgmma3-thinking-DirectLoRA/chat_template.jinja ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {{ bos_token }}
2
+ {%- if messages[0]['role'] == 'system' -%}
3
+ {%- if messages[0]['content'] is string -%}
4
+ {%- set first_user_prefix = messages[0]['content'] + '
5
+
6
+ ' -%}
7
+ {%- else -%}
8
+ {%- set first_user_prefix = messages[0]['content'][0]['text'] + '
9
+
10
+ ' -%}
11
+ {%- endif -%}
12
+ {%- set loop_messages = messages[1:] -%}
13
+ {%- else -%}
14
+ {%- set first_user_prefix = "" -%}
15
+ {%- set loop_messages = messages -%}
16
+ {%- endif -%}
17
+ {%- for message in loop_messages -%}
18
+ {%- if (message['role'] == 'user') != (loop.index0 % 2 == 0) -%}
19
+ {{ raise_exception("Conversation roles must alternate user/assistant/user/assistant/...") }}
20
+ {%- endif -%}
21
+ {%- if (message['role'] == 'assistant') -%}
22
+ {%- set role = "model" -%}
23
+ {%- else -%}
24
+ {%- set role = message['role'] -%}
25
+ {%- endif -%}
26
+ {{ '<start_of_turn>' + role + '
27
+ ' + (first_user_prefix if loop.first else "") }}
28
+ {%- if message['content'] is string -%}
29
+ {{ message['content'] | trim }}
30
+ {%- elif message['content'] is iterable -%}
31
+ {%- for item in message['content'] -%}
32
+ {%- if item['type'] == 'image' -%}
33
+ {{ '<start_of_image>' }}
34
+ {%- elif item['type'] == 'text' -%}
35
+ {{ item['text'] | trim }}
36
+ {%- endif -%}
37
+ {%- endfor -%}
38
+ {%- else -%}
39
+ {{ raise_exception("Invalid content type") }}
40
+ {%- endif -%}
41
+ {{ '<end_of_turn>
42
+ ' }}
43
+ {%- endfor -%}
44
+ {%- if add_generation_prompt -%}
45
+ {{'<start_of_turn>model
46
+ '}}
47
+ {%- endif -%}
megdgmma3-thinking-DirectLoRA/config.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "architectures": [
3
+ "Gemma3ForConditionalGeneration"
4
+ ],
5
+ "boi_token_index": 255999,
6
+ "dtype": "bfloat16",
7
+ "eoi_token_index": 256000,
8
+ "eos_token_id": [
9
+ 1,
10
+ 106
11
+ ],
12
+ "image_token_index": 262144,
13
+ "initializer_range": 0.02,
14
+ "mm_tokens_per_image": 256,
15
+ "model_type": "gemma3",
16
+ "text_config": {
17
+ "_sliding_window_pattern": 6,
18
+ "attention_bias": false,
19
+ "attention_dropout": 0.0,
20
+ "attn_logit_softcapping": null,
21
+ "dtype": "bfloat16",
22
+ "final_logit_softcapping": null,
23
+ "head_dim": 128,
24
+ "hidden_activation": "gelu_pytorch_tanh",
25
+ "hidden_size": 5376,
26
+ "initializer_range": 0.02,
27
+ "intermediate_size": 21504,
28
+ "layer_types": [
29
+ "sliding_attention",
30
+ "sliding_attention",
31
+ "sliding_attention",
32
+ "sliding_attention",
33
+ "sliding_attention",
34
+ "full_attention",
35
+ "sliding_attention",
36
+ "sliding_attention",
37
+ "sliding_attention",
38
+ "sliding_attention",
39
+ "sliding_attention",
40
+ "full_attention",
41
+ "sliding_attention",
42
+ "sliding_attention",
43
+ "sliding_attention",
44
+ "sliding_attention",
45
+ "sliding_attention",
46
+ "full_attention",
47
+ "sliding_attention",
48
+ "sliding_attention",
49
+ "sliding_attention",
50
+ "sliding_attention",
51
+ "sliding_attention",
52
+ "full_attention",
53
+ "sliding_attention",
54
+ "sliding_attention",
55
+ "sliding_attention",
56
+ "sliding_attention",
57
+ "sliding_attention",
58
+ "full_attention",
59
+ "sliding_attention",
60
+ "sliding_attention",
61
+ "sliding_attention",
62
+ "sliding_attention",
63
+ "sliding_attention",
64
+ "full_attention",
65
+ "sliding_attention",
66
+ "sliding_attention",
67
+ "sliding_attention",
68
+ "sliding_attention",
69
+ "sliding_attention",
70
+ "full_attention",
71
+ "sliding_attention",
72
+ "sliding_attention",
73
+ "sliding_attention",
74
+ "sliding_attention",
75
+ "sliding_attention",
76
+ "full_attention",
77
+ "sliding_attention",
78
+ "sliding_attention",
79
+ "sliding_attention",
80
+ "sliding_attention",
81
+ "sliding_attention",
82
+ "full_attention",
83
+ "sliding_attention",
84
+ "sliding_attention",
85
+ "sliding_attention",
86
+ "sliding_attention",
87
+ "sliding_attention",
88
+ "full_attention",
89
+ "sliding_attention",
90
+ "sliding_attention"
91
+ ],
92
+ "max_position_embeddings": 131072,
93
+ "model_type": "gemma3_text",
94
+ "num_attention_heads": 32,
95
+ "num_hidden_layers": 62,
96
+ "num_key_value_heads": 16,
97
+ "query_pre_attn_scalar": 168,
98
+ "rms_norm_eps": 1e-06,
99
+ "rope_local_base_freq": 10000.0,
100
+ "rope_scaling": {
101
+ "factor": 8.0,
102
+ "rope_type": "linear"
103
+ },
104
+ "rope_theta": 1000000.0,
105
+ "sliding_window": 1024,
106
+ "use_cache": true,
107
+ "vocab_size": 262145
108
+ },
109
+ "transformers_version": "4.56.1",
110
+ "vision_config": {
111
+ "attention_dropout": 0.0,
112
+ "dtype": "bfloat16",
113
+ "hidden_act": "gelu_pytorch_tanh",
114
+ "hidden_size": 1152,
115
+ "image_size": 896,
116
+ "intermediate_size": 4304,
117
+ "layer_norm_eps": 1e-06,
118
+ "model_type": "siglip_vision_model",
119
+ "num_attention_heads": 16,
120
+ "num_channels": 3,
121
+ "num_hidden_layers": 27,
122
+ "patch_size": 14,
123
+ "vision_use_head": false
124
+ }
125
+ }
megdgmma3-thinking-DirectLoRA/generation_config.json ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_from_model_config": true,
3
+ "bos_token_id": 2,
4
+ "eos_token_id": [
5
+ 1,
6
+ 106
7
+ ],
8
+ "pad_token_id": 0,
9
+ "transformers_version": "4.56.1"
10
+ }
megdgmma3-thinking-DirectLoRA/lorafiles/LoRA-URL.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ https://huggingface.co/jjsprockel/medgemma27b-luad-qlora
megdgmma3-thinking-DirectLoRA/lorafiles/adapter_config.json ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "alpha_pattern": {},
3
+ "auto_mapping": null,
4
+ "base_model_name_or_path": "medgemma3-thinking-origin",
5
+ "bias": "none",
6
+ "corda_config": null,
7
+ "eva_config": null,
8
+ "exclude_modules": null,
9
+ "fan_in_fan_out": false,
10
+ "inference_mode": true,
11
+ "init_lora_weights": true,
12
+ "layer_replication": null,
13
+ "layers_pattern": null,
14
+ "layers_to_transform": null,
15
+ "loftq_config": {},
16
+ "lora_alpha": 32,
17
+ "lora_bias": false,
18
+ "lora_dropout": 0.05,
19
+ "megatron_config": null,
20
+ "megatron_core": "megatron.core",
21
+ "modules_to_save": null,
22
+ "peft_type": "LORA",
23
+ "qalora_group_size": 16,
24
+ "r": 16,
25
+ "rank_pattern": {},
26
+ "revision": null,
27
+ "target_modules": [
28
+ "v_proj",
29
+ "q_proj",
30
+ "k_proj",
31
+ "up_proj",
32
+ "o_proj",
33
+ "gate_proj",
34
+ "down_proj"
35
+ ],
36
+ "target_parameters": null,
37
+ "task_type": "CAUSAL_LM",
38
+ "trainable_token_indices": null,
39
+ "use_dora": false,
40
+ "use_qalora": false,
41
+ "use_rslora": false
42
+ }
megdgmma3-thinking-DirectLoRA/lorafiles/adapter_model.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5bc6f95d38611f2bef0c0ca55834ff923041ff4efff261c0a84b915097b0970d
3
+ size 466167024
megdgmma3-thinking-DirectLoRA/model-00001-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:88f611062573aa7370625b7696e791ed5313cd0be2c16bd7b883cc9f59d2d2a1
3
+ size 4853896320
megdgmma3-thinking-DirectLoRA/model-00002-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:289c9b59fba782129a4fdeb06489cfe5ccf53dce2bf2f5873345e7649fe99e50
3
+ size 4954792944
megdgmma3-thinking-DirectLoRA/model-00003-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:92b49ccda67588a227bafe90252c7802397dc0a9643bd045331726b68ef56957
3
+ size 4954792976
megdgmma3-thinking-DirectLoRA/model-00004-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b9f19c08ee020dd5c446f938c51e475d334eac983cf6baeef114e6f849efca25
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00005-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:60c8d69608cd90e2d09599f85651892c2fd3c33e26acc3b43a7c70ef87eabfb1
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00006-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:56b76442d075a21fdaf9bff07c9409e1f40d0953e3be6bf6587c3bd96522f2a2
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00007-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:500440c469ef33f6aeeed5efac395b8355fd8200247391b7ed33fd0a40990fb9
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00008-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b7cf5fbc016c69985270edf1aabf46263be5637558bf14d4199176f20c791b9d
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00009-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9b1b542896399018327ca574b143101b1d70fa6447b1d79a286d39590ed2ba40
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00010-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:b99c2e9db03e62bc79df2f61f8f0aced51e2d59319ca27a02ae5f9a3271a417b
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00011-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:6ed795486f365581c407e837558758611d810f2e4a9e4a8b2c470e31fca3c188
3
+ size 4954793016
megdgmma3-thinking-DirectLoRA/model-00012-of-00012.safetensors ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:147db47a4b440c66103e68b94c6952517c23f6645c7a38433ab4cd83f79ef9e6
3
+ size 462476696
megdgmma3-thinking-DirectLoRA/model.safetensors.index.json ADDED
The diff for this file is too large to render. See raw diff
 
megdgmma3-thinking-DirectLoRA/preprocessor_config.json ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "do_convert_rgb": null,
3
+ "do_normalize": true,
4
+ "do_pan_and_scan": null,
5
+ "do_rescale": true,
6
+ "do_resize": true,
7
+ "image_mean": [
8
+ 0.5,
9
+ 0.5,
10
+ 0.5
11
+ ],
12
+ "image_processor_type": "Gemma3ImageProcessor",
13
+ "image_seq_length": 256,
14
+ "image_std": [
15
+ 0.5,
16
+ 0.5,
17
+ 0.5
18
+ ],
19
+ "pan_and_scan_max_num_crops": null,
20
+ "pan_and_scan_min_crop_size": null,
21
+ "pan_and_scan_min_ratio_to_activate": null,
22
+ "processor_class": "Gemma3Processor",
23
+ "resample": 2,
24
+ "rescale_factor": 0.00392156862745098,
25
+ "size": {
26
+ "height": 896,
27
+ "width": 896
28
+ }
29
+ }
megdgmma3-thinking-DirectLoRA/processor_config.json ADDED
@@ -0,0 +1,4 @@
 
 
 
 
 
1
+ {
2
+ "image_seq_length": 256,
3
+ "processor_class": "Gemma3Processor"
4
+ }
megdgmma3-thinking-DirectLoRA/special_tokens_map.json ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "boi_token": "<start_of_image>",
3
+ "bos_token": {
4
+ "content": "<bos>",
5
+ "lstrip": false,
6
+ "normalized": false,
7
+ "rstrip": false,
8
+ "single_word": false
9
+ },
10
+ "eoi_token": "<end_of_image>",
11
+ "eos_token": {
12
+ "content": "<eos>",
13
+ "lstrip": false,
14
+ "normalized": false,
15
+ "rstrip": false,
16
+ "single_word": false
17
+ },
18
+ "image_token": "<image_soft_token>",
19
+ "pad_token": {
20
+ "content": "<pad>",
21
+ "lstrip": false,
22
+ "normalized": false,
23
+ "rstrip": false,
24
+ "single_word": false
25
+ },
26
+ "unk_token": {
27
+ "content": "<unk>",
28
+ "lstrip": false,
29
+ "normalized": false,
30
+ "rstrip": false,
31
+ "single_word": false
32
+ }
33
+ }
megdgmma3-thinking-DirectLoRA/tokenizer.json ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4667f2089529e8e7657cfb6d1c19910ae71ff5f28aa7ab2ff2763330affad795
3
+ size 33384568
megdgmma3-thinking-DirectLoRA/tokenizer.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1299c11d7cf632ef3b4e11937501358ada021bbdf7c47638d13c0ee982f2e79c
3
+ size 4689074
megdgmma3-thinking-DirectLoRA/tokenizer_config.json ADDED
The diff for this file is too large to render. See raw diff