ppo-Pyramids / run_logs /timers.json
hugginglaoda's picture
First Push
f25dd36 verified
{
"name": "root",
"gauges": {
"Pyramids.Policy.Entropy.mean": {
"value": 0.012792897410690784,
"min": 0.012792897410690784,
"max": 1.4177621603012085,
"count": 33
},
"Pyramids.Policy.Entropy.sum": {
"value": 383.1728515625,
"min": 383.1728515625,
"max": 43009.234375,
"count": 33
},
"Pyramids.Step.mean": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Step.sum": {
"value": 989927.0,
"min": 29952.0,
"max": 989927.0,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.mean": {
"value": -0.09858796745538712,
"min": -0.10970687121152878,
"max": 0.09507052600383759,
"count": 33
},
"Pyramids.Policy.ExtrinsicValueEstimate.sum": {
"value": -23.562524795532227,
"min": -26.329648971557617,
"max": 22.531715393066406,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.mean": {
"value": 0.47252652049064636,
"min": 0.31983697414398193,
"max": 0.5097172260284424,
"count": 33
},
"Pyramids.Policy.RndValueEstimate.sum": {
"value": 112.933837890625,
"min": 75.80136108398438,
"max": 122.3321304321289,
"count": 33
},
"Pyramids.Losses.PolicyLoss.mean": {
"value": 0.06890268857137026,
"min": 0.06565859583221718,
"max": 0.0739442317207423,
"count": 33
},
"Pyramids.Losses.PolicyLoss.sum": {
"value": 0.9646376399991836,
"min": 0.5052951602322109,
"max": 1.0079328600365653,
"count": 33
},
"Pyramids.Losses.ValueLoss.mean": {
"value": 0.0002069361613492926,
"min": 0.0002069361613492926,
"max": 0.017477962049152012,
"count": 33
},
"Pyramids.Losses.ValueLoss.sum": {
"value": 0.0028971062588900964,
"min": 0.0028971062588900964,
"max": 0.12234573434406408,
"count": 33
},
"Pyramids.Policy.LearningRate.mean": {
"value": 7.389311822642855e-06,
"min": 7.389311822642855e-06,
"max": 0.00029515063018788575,
"count": 33
},
"Pyramids.Policy.LearningRate.sum": {
"value": 0.00010345036551699998,
"min": 0.00010345036551699998,
"max": 0.0036333355888882,
"count": 33
},
"Pyramids.Policy.Epsilon.mean": {
"value": 0.10246307142857143,
"min": 0.10246307142857143,
"max": 0.19838354285714285,
"count": 33
},
"Pyramids.Policy.Epsilon.sum": {
"value": 1.434483,
"min": 1.3886848,
"max": 2.6111118000000006,
"count": 33
},
"Pyramids.Policy.Beta.mean": {
"value": 0.00025606083571428565,
"min": 0.00025606083571428565,
"max": 0.00983851593142857,
"count": 33
},
"Pyramids.Policy.Beta.sum": {
"value": 0.0035848516999999994,
"min": 0.0035848516999999994,
"max": 0.12113006881999999,
"count": 33
},
"Pyramids.Losses.RNDLoss.mean": {
"value": 0.46683868765830994,
"min": 0.3517557680606842,
"max": 0.5818276405334473,
"count": 33
},
"Pyramids.Losses.RNDLoss.sum": {
"value": 6.535741806030273,
"min": 3.5791425704956055,
"max": 6.968112945556641,
"count": 33
},
"Pyramids.Environment.EpisodeLength.mean": {
"value": 999.0,
"min": 933.7647058823529,
"max": 999.0,
"count": 33
},
"Pyramids.Environment.EpisodeLength.sum": {
"value": 24975.0,
"min": 15984.0,
"max": 33719.0,
"count": 33
},
"Pyramids.Environment.CumulativeReward.mean": {
"value": -0.9996160507202149,
"min": -1.0000000521540642,
"max": -0.6991353444755077,
"count": 33
},
"Pyramids.Environment.CumulativeReward.sum": {
"value": -24.99040126800537,
"min": -31.99320164322853,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.mean": {
"value": -0.9996160507202149,
"min": -1.0000000521540642,
"max": -0.6991353444755077,
"count": 33
},
"Pyramids.Policy.ExtrinsicReward.sum": {
"value": -24.99040126800537,
"min": -31.99320164322853,
"max": -16.000000834465027,
"count": 33
},
"Pyramids.Policy.RndReward.mean": {
"value": 4.554214012026787,
"min": 3.4498045127237997,
"max": 10.316271374002099,
"count": 33
},
"Pyramids.Policy.RndReward.sum": {
"value": 113.85535030066967,
"min": 103.89716279879212,
"max": 165.06034198403358,
"count": 33
},
"Pyramids.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
},
"Pyramids.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 33
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1742547451",
"python_version": "3.10.12 (main, Jul 5 2023, 18:54:27) [GCC 11.2.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/PyramidsRND.yaml --env=./training-envs-executables/linux/Pyramids/Pyramids --run-id=Pyramids Training --no-graphics",
"mlagents_version": "1.2.0.dev0",
"mlagents_envs_version": "1.2.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "2.6.0+cu124",
"numpy_version": "1.23.5",
"end_time_seconds": "1742549325"
},
"total": 1873.8962596849997,
"count": 1,
"self": 0.47661563899873727,
"children": {
"run_training.setup": {
"total": 0.02544095500070398,
"count": 1,
"self": 0.02544095500070398
},
"TrainerController.start_learning": {
"total": 1873.3942030910002,
"count": 1,
"self": 1.4551815930381053,
"children": {
"TrainerController._reset_env": {
"total": 2.344630540000253,
"count": 1,
"self": 2.344630540000253
},
"TrainerController.advance": {
"total": 1869.5091446499619,
"count": 62955,
"self": 1.4677964751081163,
"children": {
"env_step": {
"total": 1204.084720821912,
"count": 62955,
"self": 1043.61177364795,
"children": {
"SubprocessEnvManager._take_step": {
"total": 159.63871706902864,
"count": 62955,
"self": 4.7038176009173185,
"children": {
"TorchPolicy.evaluate": {
"total": 154.93489946811133,
"count": 62555,
"self": 154.93489946811133
}
}
},
"workers": {
"total": 0.83423010493334,
"count": 62955,
"self": 0.0,
"children": {
"worker_root": {
"total": 1868.6862986288215,
"count": 62955,
"is_parallel": true,
"self": 937.7341271886189,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.001985984000384633,
"count": 1,
"is_parallel": true,
"self": 0.0006565390012838179,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.001329444999100815,
"count": 8,
"is_parallel": true,
"self": 0.001329444999100815
}
}
},
"UnityEnvironment.step": {
"total": 0.05082202999983565,
"count": 1,
"is_parallel": true,
"self": 0.0005418070004452602,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.00046712499988643685,
"count": 1,
"is_parallel": true,
"self": 0.00046712499988643685
},
"communicator.exchange": {
"total": 0.04818268199960585,
"count": 1,
"is_parallel": true,
"self": 0.04818268199960585
},
"steps_from_proto": {
"total": 0.001630415999898105,
"count": 1,
"is_parallel": true,
"self": 0.00035441700129013043,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0012759989986079745,
"count": 8,
"is_parallel": true,
"self": 0.0012759989986079745
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 930.9521714402026,
"count": 62954,
"is_parallel": true,
"self": 31.97181903335604,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 22.98285553290407,
"count": 62954,
"is_parallel": true,
"self": 22.98285553290407
},
"communicator.exchange": {
"total": 779.7476971829592,
"count": 62954,
"is_parallel": true,
"self": 779.7476971829592
},
"steps_from_proto": {
"total": 96.24979969098331,
"count": 62954,
"is_parallel": true,
"self": 19.586760796652925,
"children": {
"_process_rank_one_or_two_observation": {
"total": 76.66303889433038,
"count": 503632,
"is_parallel": true,
"self": 76.66303889433038
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 663.9566273529417,
"count": 62955,
"self": 2.5653669250350504,
"children": {
"process_trajectory": {
"total": 124.3180993479009,
"count": 62955,
"self": 124.11723456190066,
"children": {
"RLTrainer._checkpoint": {
"total": 0.20086478600023838,
"count": 2,
"self": 0.20086478600023838
}
}
},
"_update_policy": {
"total": 537.0731610800058,
"count": 439,
"self": 295.15018432397846,
"children": {
"TorchPPOOptimizer.update": {
"total": 241.9229767560273,
"count": 22743,
"self": 241.9229767560273
}
}
}
}
}
}
},
"trainer_threads": {
"total": 1.0889998520724475e-06,
"count": 1,
"self": 1.0889998520724475e-06
},
"TrainerController._save_models": {
"total": 0.08524521900017135,
"count": 1,
"self": 0.0019105699993815506,
"children": {
"RLTrainer._checkpoint": {
"total": 0.0833346490007898,
"count": 1,
"self": 0.0833346490007898
}
}
}
}
}
}
}