rahul-t-p's picture
Initial Commit
70f8124
{
"name": "root",
"gauges": {
"SnowballTarget.Policy.Entropy.mean": {
"value": 0.9409595131874084,
"min": 0.9409595131874084,
"max": 2.856137990951538,
"count": 20
},
"SnowballTarget.Policy.Entropy.sum": {
"value": 8984.28125,
"min": 8984.28125,
"max": 29249.708984375,
"count": 20
},
"SnowballTarget.Step.mean": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Step.sum": {
"value": 199984.0,
"min": 9952.0,
"max": 199984.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.mean": {
"value": 12.831833839416504,
"min": 0.3668450713157654,
"max": 12.831833839416504,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicValueEstimate.sum": {
"value": 2502.20751953125,
"min": 71.1679458618164,
"max": 2606.85791015625,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.mean": {
"value": 199.0,
"min": 199.0,
"max": 199.0,
"count": 20
},
"SnowballTarget.Environment.EpisodeLength.sum": {
"value": 8756.0,
"min": 8756.0,
"max": 10945.0,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.mean": {
"value": 0.061397994916393035,
"min": 0.061397994916393035,
"max": 0.07416016884814655,
"count": 20
},
"SnowballTarget.Losses.PolicyLoss.sum": {
"value": 0.24559197966557214,
"min": 0.24559197966557214,
"max": 0.3708008442407328,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.mean": {
"value": 0.1972571582916905,
"min": 0.11579111596688116,
"max": 0.27738642488040177,
"count": 20
},
"SnowballTarget.Losses.ValueLoss.sum": {
"value": 0.789028633166762,
"min": 0.46316446386752463,
"max": 1.3406703250080931,
"count": 20
},
"SnowballTarget.Policy.LearningRate.mean": {
"value": 8.082097306000005e-06,
"min": 8.082097306000005e-06,
"max": 0.000291882002706,
"count": 20
},
"SnowballTarget.Policy.LearningRate.sum": {
"value": 3.232838922400002e-05,
"min": 3.232838922400002e-05,
"max": 0.00138516003828,
"count": 20
},
"SnowballTarget.Policy.Epsilon.mean": {
"value": 0.10269400000000001,
"min": 0.10269400000000001,
"max": 0.19729400000000002,
"count": 20
},
"SnowballTarget.Policy.Epsilon.sum": {
"value": 0.41077600000000003,
"min": 0.41077600000000003,
"max": 0.96172,
"count": 20
},
"SnowballTarget.Policy.Beta.mean": {
"value": 0.0001444306000000001,
"min": 0.0001444306000000001,
"max": 0.0048649706,
"count": 20
},
"SnowballTarget.Policy.Beta.sum": {
"value": 0.0005777224000000004,
"min": 0.0005777224000000004,
"max": 0.023089828,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.mean": {
"value": 25.15909090909091,
"min": 3.340909090909091,
"max": 25.30909090909091,
"count": 20
},
"SnowballTarget.Environment.CumulativeReward.sum": {
"value": 1107.0,
"min": 147.0,
"max": 1392.0,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.mean": {
"value": 25.15909090909091,
"min": 3.340909090909091,
"max": 25.30909090909091,
"count": 20
},
"SnowballTarget.Policy.ExtrinsicReward.sum": {
"value": 1107.0,
"min": 147.0,
"max": 1392.0,
"count": 20
},
"SnowballTarget.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
},
"SnowballTarget.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 20
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1675947826",
"python_version": "3.8.10 (default, Nov 14 2022, 12:59:47) \n[GCC 9.4.0]",
"command_line_arguments": "/usr/local/bin/mlagents-learn ./config/ppo/SnowballTarget.yaml --env=./training-envs-executables/linux/SnowballTarget/SnowballTarget --run-id=SnowballTarget1 --no-graphics",
"mlagents_version": "0.29.0.dev0",
"mlagents_envs_version": "0.29.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.8.1+cu102",
"numpy_version": "1.21.6",
"end_time_seconds": "1675948301"
},
"total": 474.743503603,
"count": 1,
"self": 0.43875301100001707,
"children": {
"run_training.setup": {
"total": 0.11267042899999069,
"count": 1,
"self": 0.11267042899999069
},
"TrainerController.start_learning": {
"total": 474.192080163,
"count": 1,
"self": 0.5538825219957744,
"children": {
"TrainerController._reset_env": {
"total": 9.574592048,
"count": 1,
"self": 9.574592048
},
"TrainerController.advance": {
"total": 463.9420596110043,
"count": 18202,
"self": 0.27263129300069977,
"children": {
"env_step": {
"total": 463.6694283180036,
"count": 18202,
"self": 319.28711302102795,
"children": {
"SubprocessEnvManager._take_step": {
"total": 144.0997382639822,
"count": 18202,
"self": 1.4528688439835378,
"children": {
"TorchPolicy.evaluate": {
"total": 142.64686941999867,
"count": 18202,
"self": 31.27760227999613,
"children": {
"TorchPolicy.sample_actions": {
"total": 111.36926714000253,
"count": 18202,
"self": 111.36926714000253
}
}
}
}
},
"workers": {
"total": 0.2825770329934585,
"count": 18202,
"self": 0.0,
"children": {
"worker_root": {
"total": 472.60816781700305,
"count": 18202,
"is_parallel": true,
"self": 227.2961726250096,
"children": {
"run_training.setup": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"steps_from_proto": {
"total": 0.010136070000044128,
"count": 1,
"is_parallel": true,
"self": 0.0034538610000254266,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.006682209000018702,
"count": 10,
"is_parallel": true,
"self": 0.006682209000018702
}
}
},
"UnityEnvironment.step": {
"total": 0.03539712500003134,
"count": 1,
"is_parallel": true,
"self": 0.0005473249999567997,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 0.0002922710000348161,
"count": 1,
"is_parallel": true,
"self": 0.0002922710000348161
},
"communicator.exchange": {
"total": 0.03269671999998991,
"count": 1,
"is_parallel": true,
"self": 0.03269671999998991
},
"steps_from_proto": {
"total": 0.0018608090000498123,
"count": 1,
"is_parallel": true,
"self": 0.00041319000001749373,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.0014476190000323186,
"count": 10,
"is_parallel": true,
"self": 0.0014476190000323186
}
}
}
}
}
}
},
"UnityEnvironment.step": {
"total": 245.31199519199345,
"count": 18201,
"is_parallel": true,
"self": 9.681647351994911,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 5.241232093005806,
"count": 18201,
"is_parallel": true,
"self": 5.241232093005806
},
"communicator.exchange": {
"total": 195.65108589199872,
"count": 18201,
"is_parallel": true,
"self": 195.65108589199872
},
"steps_from_proto": {
"total": 34.73802985499401,
"count": 18201,
"is_parallel": true,
"self": 7.116257604005455,
"children": {
"_process_rank_one_or_two_observation": {
"total": 27.621772250988556,
"count": 182010,
"is_parallel": true,
"self": 27.621772250988556
}
}
}
}
}
}
}
}
}
}
}
}
},
"trainer_threads": {
"total": 0.00011666699992929352,
"count": 1,
"self": 0.00011666699992929352,
"children": {
"thread_root": {
"total": 0.0,
"count": 0,
"is_parallel": true,
"self": 0.0,
"children": {
"trainer_advance": {
"total": 460.2835570269815,
"count": 417308,
"is_parallel": true,
"self": 10.776881780955364,
"children": {
"process_trajectory": {
"total": 260.7446263980272,
"count": 417308,
"is_parallel": true,
"self": 259.9376430720272,
"children": {
"RLTrainer._checkpoint": {
"total": 0.8069833260000223,
"count": 4,
"is_parallel": true,
"self": 0.8069833260000223
}
}
},
"_update_policy": {
"total": 188.76204884799893,
"count": 90,
"is_parallel": true,
"self": 66.73709740899778,
"children": {
"TorchPPOOptimizer.update": {
"total": 122.02495143900114,
"count": 4587,
"is_parallel": true,
"self": 122.02495143900114
}
}
}
}
}
}
}
}
},
"TrainerController._save_models": {
"total": 0.12142931500000032,
"count": 1,
"self": 0.000944230000072821,
"children": {
"RLTrainer._checkpoint": {
"total": 0.1204850849999275,
"count": 1,
"self": 0.1204850849999275
}
}
}
}
}
}
}