poca-SoccerTwos / run_logs /timers.json
JessicaHsu's picture
First Push
1a9f81c
{
"name": "root",
"gauges": {
"SoccerTwos.Policy.Entropy.mean": {
"value": 2.8800697326660156,
"min": 2.7971508502960205,
"max": 3.2957067489624023,
"count": 500
},
"SoccerTwos.Policy.Entropy.sum": {
"value": 49214.6328125,
"min": 11543.6328125,
"max": 132192.15625,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.mean": {
"value": 197.92592592592592,
"min": 115.02564102564102,
"max": 999.0,
"count": 500
},
"SoccerTwos.Environment.EpisodeLength.sum": {
"value": 21376.0,
"min": 16132.0,
"max": 23648.0,
"count": 500
},
"SoccerTwos.Self-play.ELO.mean": {
"value": 1272.36506910111,
"min": 1194.692912581449,
"max": 1272.36506910111,
"count": 358
},
"SoccerTwos.Self-play.ELO.sum": {
"value": 68707.71373145994,
"min": 2389.458277317844,
"max": 97962.82461127103,
"count": 358
},
"SoccerTwos.Step.mean": {
"value": 4999508.0,
"min": 9976.0,
"max": 4999508.0,
"count": 500
},
"SoccerTwos.Step.sum": {
"value": 4999508.0,
"min": 9976.0,
"max": 4999508.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.mean": {
"value": 0.08261680603027344,
"min": -0.02772657200694084,
"max": 0.08261680603027344,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicBaselineEstimate.sum": {
"value": 4.461307525634766,
"min": -0.3755653500556946,
"max": 4.461307525634766,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.mean": {
"value": 0.09392579644918442,
"min": -0.019030623137950897,
"max": 0.09392579644918442,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicValueEstimate.sum": {
"value": 5.071992874145508,
"min": -0.39550870656967163,
"max": 5.071992874145508,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.mean": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Environment.CumulativeReward.sum": {
"value": 0.0,
"min": 0.0,
"max": 0.0,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.mean": {
"value": 0.020985184513308382,
"min": -0.6199400007724762,
"max": 0.44327059563468485,
"count": 500
},
"SoccerTwos.Policy.ExtrinsicReward.sum": {
"value": 1.1331999637186527,
"min": -15.8471999168396,
"max": 18.599999964237213,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.mean": {
"value": 0.020985184513308382,
"min": -0.6199400007724762,
"max": 0.44327059563468485,
"count": 500
},
"SoccerTwos.Environment.GroupCumulativeReward.sum": {
"value": 1.1331999637186527,
"min": -15.8471999168396,
"max": 18.599999964237213,
"count": 500
},
"SoccerTwos.IsTraining.mean": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.IsTraining.sum": {
"value": 1.0,
"min": 1.0,
"max": 1.0,
"count": 500
},
"SoccerTwos.Losses.PolicyLoss.mean": {
"value": 0.01645166991026296,
"min": 0.010685314397172381,
"max": 0.025194068586764238,
"count": 233
},
"SoccerTwos.Losses.PolicyLoss.sum": {
"value": 0.01645166991026296,
"min": 0.010685314397172381,
"max": 0.025194068586764238,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.mean": {
"value": 0.04083418982724349,
"min": 1.8388129466491894e-07,
"max": 0.04083418982724349,
"count": 233
},
"SoccerTwos.Losses.ValueLoss.sum": {
"value": 0.04083418982724349,
"min": 1.8388129466491894e-07,
"max": 0.04083418982724349,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.mean": {
"value": 0.04173363422354062,
"min": 1.941327316975124e-07,
"max": 0.04173363422354062,
"count": 233
},
"SoccerTwos.Losses.BaselineLoss.sum": {
"value": 0.04173363422354062,
"min": 1.941327316975124e-07,
"max": 0.04173363422354062,
"count": 233
},
"SoccerTwos.Policy.LearningRate.mean": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.LearningRate.sum": {
"value": 0.0003,
"min": 0.0003,
"max": 0.0003,
"count": 233
},
"SoccerTwos.Policy.Epsilon.mean": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 233
},
"SoccerTwos.Policy.Epsilon.sum": {
"value": 0.20000000000000007,
"min": 0.20000000000000007,
"max": 0.20000000000000007,
"count": 233
},
"SoccerTwos.Policy.Beta.mean": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
},
"SoccerTwos.Policy.Beta.sum": {
"value": 0.005000000000000001,
"min": 0.005000000000000001,
"max": 0.005000000000000001,
"count": 233
}
},
"metadata": {
"timer_format_version": "0.1.0",
"start_time_seconds": "1678803189",
"python_version": "3.9.16 (main, Mar 8 2023, 04:29:44) \n[Clang 14.0.6 ]",
"command_line_arguments": "/Users/Ca/opt/anaconda3/envs/rl/bin/mlagents-learn ./config/poca/SoccerTwos.yaml --env=./training-envs-executables/SoccerTwos/SoccerTwos.app --run-id=SoccerTwos --no-graphics",
"mlagents_version": "0.31.0.dev0",
"mlagents_envs_version": "0.31.0.dev0",
"communication_protocol_version": "1.5.0",
"pytorch_version": "1.11.0",
"numpy_version": "1.21.2",
"end_time_seconds": "1678823876"
},
"total": 20686.559014875,
"count": 1,
"self": 0.673993167001754,
"children": {
"run_training.setup": {
"total": 0.329239458,
"count": 1,
"self": 0.329239458
},
"TrainerController.start_learning": {
"total": 20685.55578225,
"count": 1,
"self": 4.179235140993114,
"children": {
"TrainerController._reset_env": {
"total": 8.37700170799874,
"count": 25,
"self": 8.37700170799874
},
"TrainerController.advance": {
"total": 20672.89009298401,
"count": 325185,
"self": 4.0852370792672446,
"children": {
"env_step": {
"total": 4611.209081970977,
"count": 325185,
"self": 3748.059476784651,
"children": {
"SubprocessEnvManager._take_step": {
"total": 860.3115061578112,
"count": 325185,
"self": 22.73343601277213,
"children": {
"TorchPolicy.evaluate": {
"total": 837.5780701450391,
"count": 645002,
"self": 837.5780701450391
}
}
},
"workers": {
"total": 2.8380990285152663,
"count": 325185,
"self": 0.0,
"children": {
"worker_root": {
"total": 20671.292230147857,
"count": 325185,
"is_parallel": true,
"self": 17455.679545158306,
"children": {
"steps_from_proto": {
"total": 0.03426083300603011,
"count": 50,
"is_parallel": true,
"self": 0.0059753719907442715,
"children": {
"_process_rank_one_or_two_observation": {
"total": 0.02828546101528584,
"count": 200,
"is_parallel": true,
"self": 0.02828546101528584
}
}
},
"UnityEnvironment.step": {
"total": 3215.578424156547,
"count": 325185,
"is_parallel": true,
"self": 138.84121177521865,
"children": {
"UnityEnvironment._generate_step_input": {
"total": 92.73925554655374,
"count": 325185,
"is_parallel": true,
"self": 92.73925554655374
},
"communicator.exchange": {
"total": 2554.985181854378,
"count": 325185,
"is_parallel": true,
"self": 2554.985181854378
},
"steps_from_proto": {
"total": 429.0127749803968,
"count": 650370,
"is_parallel": true,
"self": 69.99788359482523,
"children": {
"_process_rank_one_or_two_observation": {
"total": 359.01489138557156,
"count": 2601480,
"is_parallel": true,
"self": 359.01489138557156
}
}
}
}
}
}
}
}
}
}
},
"trainer_advance": {
"total": 16057.595773933765,
"count": 325185,
"self": 34.39603538314623,
"children": {
"process_trajectory": {
"total": 1798.6496443966257,
"count": 325185,
"self": 1797.5627657706264,
"children": {
"RLTrainer._checkpoint": {
"total": 1.0868786259993612,
"count": 10,
"self": 1.0868786259993612
}
}
},
"_update_policy": {
"total": 14224.550094153992,
"count": 233,
"self": 610.1583948359221,
"children": {
"TorchPOCAOptimizer.update": {
"total": 13614.39169931807,
"count": 6990,
"self": 13614.39169931807
}
}
}
}
}
}
},
"trainer_threads": {
"total": 5.000001692678779e-07,
"count": 1,
"self": 5.000001692678779e-07
},
"TrainerController._save_models": {
"total": 0.10945191700011492,
"count": 1,
"self": 0.0009360839976579882,
"children": {
"RLTrainer._checkpoint": {
"total": 0.10851583300245693,
"count": 1,
"self": 0.10851583300245693
}
}
}
}
}
}
}