dacorvo HF Staff commited on
Commit
e8198c3
·
verified ·
1 Parent(s): a601879

Synchronizing local compiler cache.

Browse files
Files changed (17) hide show
  1. .gitattributes +3 -0
  2. neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.1.dev0/mixtral/mistralai/Mixtral-8x22B-Instruct-v0.1/2de1699eb875f950d938.json +58 -0
  3. neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/compile_flags.json +1 -0
  4. neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.done +0 -0
  5. neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.hlo_module.pb +3 -0
  6. neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.neff +3 -0
  7. neuronxcc-2.21.18209.0+043b1bf7/MODULE_4bf5da7e011d03e4233a+0b01cb42/compile_flags.json +1 -0
  8. neuronxcc-2.21.18209.0+043b1bf7/MODULE_4bf5da7e011d03e4233a+0b01cb42/model.hlo_module.pb +3 -0
  9. neuronxcc-2.21.18209.0+043b1bf7/MODULE_4bf5da7e011d03e4233a+0b01cb42/model.log +2 -0
  10. neuronxcc-2.21.18209.0+043b1bf7/MODULE_5907b22fb1f79eb2562e+0b01cb42/compile_flags.json +1 -0
  11. neuronxcc-2.21.18209.0+043b1bf7/MODULE_5907b22fb1f79eb2562e+0b01cb42/model.hlo_module.pb +3 -0
  12. neuronxcc-2.21.18209.0+043b1bf7/MODULE_5907b22fb1f79eb2562e+0b01cb42/model.log +2 -0
  13. neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/compile_flags.json +1 -0
  14. neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.done +0 -0
  15. neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.hlo_module.pb +3 -0
  16. neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.neff +3 -0
  17. neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/wrapped_neff.hlo +3 -0
.gitattributes CHANGED
@@ -12212,3 +12212,6 @@ neuronxcc-2.21.18209.0+043b1bf7/MODULE_9aa5b581f9d17855f430+c2248236/model.neff
12212
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_62245507fa1f848be542+c2248236/model.neff filter=lfs diff=lfs merge=lfs -text
12213
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_a104e1d968fe30c4ac21+ca355898/model.neff filter=lfs diff=lfs merge=lfs -text
12214
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_a104e1d968fe30c4ac21+ca355898/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
 
 
 
 
12212
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_62245507fa1f848be542+c2248236/model.neff filter=lfs diff=lfs merge=lfs -text
12213
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_a104e1d968fe30c4ac21+ca355898/model.neff filter=lfs diff=lfs merge=lfs -text
12214
  neuronxcc-2.21.18209.0+043b1bf7/MODULE_a104e1d968fe30c4ac21+ca355898/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
12215
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.neff filter=lfs diff=lfs merge=lfs -text
12216
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.neff filter=lfs diff=lfs merge=lfs -text
12217
+ neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/wrapped_neff.hlo filter=lfs diff=lfs merge=lfs -text
neuronxcc-2.21.18209.0+043b1bf7/0_REGISTRY/0.4.1.dev0/mixtral/mistralai/Mixtral-8x22B-Instruct-v0.1/2de1699eb875f950d938.json ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "_entry_class": "SingleModelCacheEntry",
3
+ "_model_id": "mistralai/Mixtral-8x22B-Instruct-v0.1",
4
+ "_task": "text-generation",
5
+ "architectures": [
6
+ "MixtralForCausalLM"
7
+ ],
8
+ "attention_dropout": 0.0,
9
+ "head_dim": 128,
10
+ "hidden_act": "silu",
11
+ "hidden_size": 6144,
12
+ "initializer_range": 0.02,
13
+ "intermediate_size": 16384,
14
+ "max_position_embeddings": 65536,
15
+ "model_type": "mixtral",
16
+ "neuron": {
17
+ "_serialized_key": "NxDNeuronConfig",
18
+ "batch_size": 1,
19
+ "capacity_factor": null,
20
+ "checkpoint_id": "mistralai/Mixtral-8x22B-Instruct-v0.1",
21
+ "checkpoint_revision": "cc88a6cc19fbd17d9f1c0ee0b0d70a748dce698d",
22
+ "continuous_batching": false,
23
+ "enable_bucketing": false,
24
+ "ep_degree": 1,
25
+ "fused_qkv": false,
26
+ "glu_mlp": true,
27
+ "local_ranks_size": 64,
28
+ "max_batch_size": 1,
29
+ "max_context_length": 4096,
30
+ "max_topk": 256,
31
+ "n_active_tokens": 4096,
32
+ "neuronxcc_version": "2.21.18209.0+043b1bf7",
33
+ "on_device_sampling": false,
34
+ "optimum_neuron_version": "0.4.1.dev0",
35
+ "output_logits": false,
36
+ "pp_degree": 1,
37
+ "sequence_length": 4096,
38
+ "speculation_length": 0,
39
+ "start_rank_id": 0,
40
+ "target": "trn1",
41
+ "torch_dtype": "bfloat16",
42
+ "tp_degree": 64
43
+ },
44
+ "num_attention_heads": 48,
45
+ "num_experts_per_tok": 2,
46
+ "num_hidden_layers": 56,
47
+ "num_key_value_heads": 8,
48
+ "num_local_experts": 8,
49
+ "output_router_logits": false,
50
+ "rms_norm_eps": 1e-05,
51
+ "rope_theta": 1000000.0,
52
+ "router_aux_loss_coef": 0.001,
53
+ "router_jitter_noise": 0.0,
54
+ "sliding_window": null,
55
+ "tie_word_embeddings": false,
56
+ "use_cache": true,
57
+ "vocab_size": 32768
58
+ }
neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--logfile=/tmp/nxd_model/context_encoding_model/_tp0_bk0/log-neuron-cc.txt"]
neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.done ADDED
File without changes
neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:9e755d518cc2d02cea1efff78ba96ca66fb8685df09c693296e3a4e8a18f1bed
3
+ size 2438683
neuronxcc-2.21.18209.0+043b1bf7/MODULE_050801613474e5a8dafe+c2248236/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:daaf3f55115d7fd8ad3fa9154799468dbd832d320252dba2ed61353dd34af8d6
3
+ size 4291584
neuronxcc-2.21.18209.0+043b1bf7/MODULE_4bf5da7e011d03e4233a+0b01cb42/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn2", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.21.18209.0+043b1bf7/MODULE_4bf5da7e011d03e4233a+0b01cb42/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:89b72782a8cde38374080b8cf02fd825d65d6089d84e0fec990ffcec324e4ada
3
+ size 1066523
neuronxcc-2.21.18209.0+043b1bf7/MODULE_4bf5da7e011d03e4233a+0b01cb42/model.log ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Failed compilation with ['neuronx-cc', 'compile', '--framework=XLA', '/tmp/nxd_model/token_generation_model/_tp0_bk0/model.MODULE_4bf5da7e011d03e4233a+0b01cb42.hlo_module.pb', '--output', '/tmp/nxd_model/token_generation_model/_tp0_bk0/model.MODULE_4bf5da7e011d03e4233a+0b01cb42.neff', '--target=trn2', '--enable-saturate-infinity', '--enable-mixed-precision-accumulation', '--model-type', 'transformer', '-O1', '--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2', '--auto-cast=none', '--internal-enable-dge-levels', 'vector_dynamic_offsets', '--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt', '--enable-internal-neff-wrapper', '--verbose=35']: 2025-10-21T15:12:48Z 2025-10-21 15:12:48.519681: F hilo/hlo_passes/NeuronHloVerifier.cc:504] [ERROR] [NCC_VRF009] Memory requirement exceeds target architecture's HBM limit. Needed 23487730696 bytes (21 GB) vs. available 17179869184 bytes (16 GB). TIP: Consider using smaller batches or applying model parallelism
2
+
neuronxcc-2.21.18209.0+043b1bf7/MODULE_5907b22fb1f79eb2562e+0b01cb42/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn2", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.21.18209.0+043b1bf7/MODULE_5907b22fb1f79eb2562e+0b01cb42/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cce4aa5c537de792273197588f9c934f01f4b0a0c32a37bb94afea255d393efc
3
+ size 1297455
neuronxcc-2.21.18209.0+043b1bf7/MODULE_5907b22fb1f79eb2562e+0b01cb42/model.log ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Failed compilation with ['neuronx-cc', 'compile', '--framework=XLA', '/tmp/nxd_model/token_generation_model/_tp0_bk0/model.MODULE_5907b22fb1f79eb2562e+0b01cb42.hlo_module.pb', '--output', '/tmp/nxd_model/token_generation_model/_tp0_bk0/model.MODULE_5907b22fb1f79eb2562e+0b01cb42.neff', '--target=trn2', '--enable-saturate-infinity', '--enable-mixed-precision-accumulation', '--model-type', 'transformer', '-O1', '--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2', '--auto-cast=none', '--internal-enable-dge-levels', 'vector_dynamic_offsets', '--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt', '--enable-internal-neff-wrapper', '--verbose=35']: 2025-10-21T15:14:19Z 2025-10-21 15:14:19.119164: F hilo/hlo_passes/NeuronHloVerifier.cc:504] [ERROR] [NCC_VRF009] Memory requirement exceeds target architecture's HBM limit. Needed 23890817056 bytes (22 GB) vs. available 17179869184 bytes (16 GB). TIP: Consider using smaller batches or applying model parallelism
2
+
neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/compile_flags.json ADDED
@@ -0,0 +1 @@
 
 
1
+ ["--target=trn1", "--enable-saturate-infinity", "--enable-mixed-precision-accumulation", "--model-type", "transformer", "-O1", "--tensorizer-options=--enable-ccop-compute-overlap --cc-pipeline-tiling-factor=2", "--auto-cast=none", "--internal-enable-dge-levels", "vector_dynamic_offsets", "--logfile=/tmp/nxd_model/token_generation_model/_tp0_bk0/log-neuron-cc.txt", "--enable-internal-neff-wrapper"]
neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.done ADDED
File without changes
neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.hlo_module.pb ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cff9bbd09783c80111230c07d2efac680aaae1a2c61f3ee64f62b8bb097d6ec0
3
+ size 1667462
neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/model.neff ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:75629a6c85a27eca61f437f4d95cfeb6dd6ddbab26f922e36430dbe21baf3c5f
3
+ size 3124224
neuronxcc-2.21.18209.0+043b1bf7/MODULE_887a24107396c649d6c0+ca355898/wrapped_neff.hlo ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:cf05e6e5862148d804bd68009cc957580a8f28e4247af49d5ddd7f48dd2c4f8c
3
+ size 3451260