diff --git a/scenarionet/examples/combine_dataset_and_run.py b/scenarionet/examples/combine_dataset_and_run.py index 61c69e8..bf8beb9 100644 --- a/scenarionet/examples/combine_dataset_and_run.py +++ b/scenarionet/examples/combine_dataset_and_run.py @@ -38,22 +38,23 @@ if __name__ == '__main__': } ) success = [] - env.reset(force_seed=2) while True: - env.reset(force_seed=2) - for t in range(10000): - o, r, d, info = env.step([0, 0]) - assert env.observation_space.contains(o) - c_lane = env.vehicle.lane - long, lat, = c_lane.local_coordinates(env.vehicle.position) - # if env.config["use_render"]: - env.render( - text={ - "seed": env.engine.global_seed + env.config["start_scenario_index"], - } - ) + for seed in [91]: + env.reset(force_seed=seed) + for t in range(10000): + o, r, d, info = env.step([0, 0]) + assert env.observation_space.contains(o) + c_lane = env.vehicle.lane + long, lat, = c_lane.local_coordinates(env.vehicle.position) + # if env.config["use_render"]: + env.render( + text={ + "seed": env.engine.global_seed + env.config["start_scenario_index"], + } + ) - if d: - if info["arrive_dest"]: - print("seed:{}, success".format(env.engine.global_random_seed)) - break + if d: + if info["arrive_dest"]: + print("seed:{}, success".format(env.engine.global_random_seed)) + print(t) + break diff --git a/scenarionet/tests/local_test/_test_combine_dataset_local.py b/scenarionet/tests/local_test/_test_combine_dataset_local.py index 9b5621f..000953e 100644 --- a/scenarionet/tests/local_test/_test_combine_dataset_local.py +++ b/scenarionet/tests/local_test/_test_combine_dataset_local.py @@ -14,7 +14,7 @@ def _test_combine_dataset(): combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset") combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True) os.makedirs("verify_results", exist_ok=True) - success, result = verify_loading_into_metadrive(combine_path, "verify_results", steps_to_run=250) + success, result = verify_loading_into_metadrive(combine_path, "verify_results") assert success diff --git a/scenarionet/tests/test_combine_dataset.py b/scenarionet/tests/test_combine_dataset.py index ffabed4..8f24603 100644 --- a/scenarionet/tests/test_combine_dataset.py +++ b/scenarionet/tests/test_combine_dataset.py @@ -21,9 +21,11 @@ def test_combine_multiple_dataset(): summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path) for scenario_file in sorted_scenarios: read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file)) + num_worker = 4 if len(summary) > 4 else 1 success, result = verify_loading_into_metadrive(dataset_path, result_save_dir="test_dataset", - steps_to_run=300) + steps_to_run=1000, + num_workers=num_worker) assert success diff --git a/scenarionet/verifier/utils.py b/scenarionet/verifier/utils.py index 9aa4d9a..4e63f0b 100644 --- a/scenarionet/verifier/utils.py +++ b/scenarionet/verifier/utils.py @@ -11,7 +11,7 @@ from metadrive.scenario.utils import get_number_of_scenarios from functools import partial -def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=300, num_workers=8): +def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=1000, num_workers=8): if result_save_dir is not None: assert os.path.exists(result_save_dir) and os.path.isdir( result_save_dir), "Argument result_save_dir must be an existing dir" @@ -45,7 +45,7 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=30 if result: print("All scenarios can be loaded successfully!") else: - print("Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(logs)) + print("Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(len(logs))) return result, logs