Upload folder using huggingface_hub
Browse files- README.md +28 -26
- adapter_config.json +1 -26
- config.json +2 -2
- model.safetensors +2 -2
- optimizer.pt +3 -0
- rng_state.pth +3 -0
- scheduler.pt +3 -0
- trainer_state.json +0 -0
- training_args.bin +3 -0
README.md
CHANGED
|
@@ -1,32 +1,34 @@
|
|
| 1 |
---
|
| 2 |
-
|
| 3 |
-
language:
|
| 4 |
-
- ar
|
| 5 |
---
|
| 6 |
-
|
| 7 |
-
===
|
| 8 |
|
| 9 |
-
## How to use
|
| 10 |
-
```python
|
| 11 |
-
from peft import AutoPeftModelForCausalLM
|
| 12 |
-
from transformers import AutoTokenizer
|
| 13 |
|
| 14 |
-
|
| 15 |
-
|
| 16 |
-
|
| 17 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 18 |
|
| 19 |
-
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
-
|
| 24 |
-
|
| 25 |
-
|
| 26 |
-
|
| 27 |
-
|
| 28 |
-
|
| 29 |
-
|
|
|
|
| 30 |
|
| 31 |
-
|
| 32 |
-
|
|
|
|
|
|
| 1 |
---
|
| 2 |
+
library_name: peft
|
|
|
|
|
|
|
| 3 |
---
|
| 4 |
+
## Training procedure
|
|
|
|
| 5 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 6 |
|
| 7 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 8 |
+
- quant_method: bitsandbytes
|
| 9 |
+
- load_in_8bit: True
|
| 10 |
+
- load_in_4bit: False
|
| 11 |
+
- llm_int8_threshold: 6.0
|
| 12 |
+
- llm_int8_skip_modules: None
|
| 13 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 14 |
+
- llm_int8_has_fp16_weight: False
|
| 15 |
+
- bnb_4bit_quant_type: fp4
|
| 16 |
+
- bnb_4bit_use_double_quant: False
|
| 17 |
+
- bnb_4bit_compute_dtype: float32
|
| 18 |
|
| 19 |
+
The following `bitsandbytes` quantization config was used during training:
|
| 20 |
+
- quant_method: bitsandbytes
|
| 21 |
+
- load_in_8bit: True
|
| 22 |
+
- load_in_4bit: False
|
| 23 |
+
- llm_int8_threshold: 6.0
|
| 24 |
+
- llm_int8_skip_modules: None
|
| 25 |
+
- llm_int8_enable_fp32_cpu_offload: False
|
| 26 |
+
- llm_int8_has_fp16_weight: False
|
| 27 |
+
- bnb_4bit_quant_type: fp4
|
| 28 |
+
- bnb_4bit_use_double_quant: False
|
| 29 |
+
- bnb_4bit_compute_dtype: float32
|
| 30 |
+
### Framework versions
|
| 31 |
|
| 32 |
+
- PEFT 0.5.0
|
| 33 |
+
|
| 34 |
+
- PEFT 0.5.0
|
adapter_config.json
CHANGED
|
@@ -1,26 +1 @@
|
|
| 1 |
-
{
|
| 2 |
-
"auto_mapping": null,
|
| 3 |
-
"base_model_name_or_path": "atsuki-yamaguchi/bloom-1b1-focus-ar",
|
| 4 |
-
"bias": "none",
|
| 5 |
-
"fan_in_fan_out": false,
|
| 6 |
-
"inference_mode": true,
|
| 7 |
-
"init_lora_weights": true,
|
| 8 |
-
"layers_pattern": null,
|
| 9 |
-
"layers_to_transform": null,
|
| 10 |
-
"lora_alpha": 32,
|
| 11 |
-
"lora_dropout": 0.05,
|
| 12 |
-
"modules_to_save": [
|
| 13 |
-
"lm_head",
|
| 14 |
-
"word_embeddings"
|
| 15 |
-
],
|
| 16 |
-
"peft_type": "LORA",
|
| 17 |
-
"r": 8,
|
| 18 |
-
"revision": null,
|
| 19 |
-
"target_modules": [
|
| 20 |
-
"query_key_value",
|
| 21 |
-
"dense",
|
| 22 |
-
"dense_h_to_4h",
|
| 23 |
-
"dense_4h_to_h"
|
| 24 |
-
],
|
| 25 |
-
"task_type": "CAUSAL_LM"
|
| 26 |
-
}
|
|
|
|
| 1 |
+
{"auto_mapping": null, "base_model_name_or_path": "atsuki-yamaguchi/bloom-1b1-focus-ar", "bias": "none", "fan_in_fan_out": false, "inference_mode": true, "init_lora_weights": true, "layers_pattern": null, "layers_to_transform": null, "lora_alpha": 32, "lora_dropout": 0.05, "modules_to_save": ["lm_head", "word_embeddings"], "peft_type": "LORA", "r": 8, "revision": null, "target_modules": ["query_key_value", "dense", "dense_h_to_4h", "dense_4h_to_h"], "task_type": "CAUSAL_LM"}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
config.json
CHANGED
|
@@ -1,5 +1,5 @@
|
|
| 1 |
{
|
| 2 |
-
"_name_or_path": "/
|
| 3 |
"apply_residual_connection_post_layernorm": false,
|
| 4 |
"architectures": [
|
| 5 |
"BloomForCausalLM"
|
|
@@ -24,7 +24,7 @@
|
|
| 24 |
"skip_bias_add": true,
|
| 25 |
"skip_bias_add_qkv": false,
|
| 26 |
"slow_but_exact": false,
|
| 27 |
-
"torch_dtype": "
|
| 28 |
"transformers_version": "4.35.0.dev0",
|
| 29 |
"unk_token_id": 0,
|
| 30 |
"use_cache": true,
|
|
|
|
| 1 |
{
|
| 2 |
+
"_name_or_path": "bigscience/bloom-1b1",
|
| 3 |
"apply_residual_connection_post_layernorm": false,
|
| 4 |
"architectures": [
|
| 5 |
"BloomForCausalLM"
|
|
|
|
| 24 |
"skip_bias_add": true,
|
| 25 |
"skip_bias_add_qkv": false,
|
| 26 |
"slow_but_exact": false,
|
| 27 |
+
"torch_dtype": "float64",
|
| 28 |
"transformers_version": "4.35.0.dev0",
|
| 29 |
"unk_token_id": 0,
|
| 30 |
"use_cache": true,
|
model.safetensors
CHANGED
|
@@ -1,3 +1,3 @@
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
-
oid sha256:
|
| 3 |
-
size
|
|
|
|
| 1 |
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e9c1e65ca6c0ea753bb61d9c7874553b1c42e5c1d72dd74e33959271c4574fb3
|
| 3 |
+
size 3506316928
|
optimizer.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:c390d114b2280c077adaacffd7045b937c951574b9ba80992db995887a24c139
|
| 3 |
+
size 403658106
|
rng_state.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d30effc039b0072dff703d07098b7916edb002de27991fd8b5bba760df247ce6
|
| 3 |
+
size 14244
|
scheduler.pt
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1fb51cf7d62abfd4ef2110e8b25986ad5cd67aba07081a17241a5063f417710e
|
| 3 |
+
size 1064
|
trainer_state.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
training_args.bin
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9bedae051eea3d67417d0fac7274b11e12a8fc170ea41bd9099b6bb98dbe44d4
|
| 3 |
+
size 4664
|