diff --git a/scenarionet/tests/test_dataset/nuscenes_0/dataset_summary.pkl b/scenarionet/tests/test_dataset/nuscenes_0/dataset_summary.pkl deleted file mode 100644 index 2321d88..0000000 Binary files a/scenarionet/tests/test_dataset/nuscenes_0/dataset_summary.pkl and /dev/null differ diff --git a/scenarionet/tests/test_dataset/nuscenes_1/dataset_mapping.pkl b/scenarionet/tests/test_dataset/nuscenes_1/dataset_mapping.pkl deleted file mode 100644 index 150c406..0000000 Binary files a/scenarionet/tests/test_dataset/nuscenes_1/dataset_mapping.pkl and /dev/null differ diff --git a/scenarionet/tests/test_dataset/nuscenes_2/sd_nuscenes_v1.0-mini_scene-0757.pkl b/scenarionet/tests/test_dataset/nuscenes_2/0.pkl similarity index 100% rename from scenarionet/tests/test_dataset/nuscenes_2/sd_nuscenes_v1.0-mini_scene-0757.pkl rename to scenarionet/tests/test_dataset/nuscenes_2/0.pkl diff --git a/scenarionet/tests/test_dataset/nuscenes_2/sd_nuscenes_v1.0-mini_scene-0796.pkl b/scenarionet/tests/test_dataset/nuscenes_2/1.pkl similarity index 100% rename from scenarionet/tests/test_dataset/nuscenes_2/sd_nuscenes_v1.0-mini_scene-0796.pkl rename to scenarionet/tests/test_dataset/nuscenes_2/1.pkl diff --git a/scenarionet/tests/test_dataset/nuscenes_2/dataset_mapping.pkl b/scenarionet/tests/test_dataset/nuscenes_2/dataset_mapping.pkl deleted file mode 100644 index 4866016..0000000 Binary files a/scenarionet/tests/test_dataset/nuscenes_2/dataset_mapping.pkl and /dev/null differ diff --git a/scenarionet/tests/test_dataset/nuscenes_2/dataset_summary.pkl b/scenarionet/tests/test_dataset/nuscenes_2/dataset_summary.pkl deleted file mode 100644 index 3a1218f..0000000 Binary files a/scenarionet/tests/test_dataset/nuscenes_2/dataset_summary.pkl and /dev/null differ diff --git a/scenarionet/tests/test_dataset/nuscenes_3/dataset_mapping.pkl b/scenarionet/tests/test_dataset/nuscenes_3/dataset_mapping.pkl deleted file mode 100644 index 912a311..0000000 Binary files a/scenarionet/tests/test_dataset/nuscenes_3/dataset_mapping.pkl and /dev/null differ diff --git a/scenarionet/tests/test_dataset/nuscenes_3/dataset_summary.pkl b/scenarionet/tests/test_dataset/nuscenes_3/dataset_summary.pkl deleted file mode 100644 index 316ef7b..0000000 Binary files a/scenarionet/tests/test_dataset/nuscenes_3/dataset_summary.pkl and /dev/null differ diff --git a/scenarionet/tests/test_filter.py b/scenarionet/tests/test_filter.py index 3cda91b..cc753d8 100644 --- a/scenarionet/tests/test_filter.py +++ b/scenarionet/tests/test_filter.py @@ -19,7 +19,7 @@ def test_filter_dataset(): # nuscenes data has no light # light_condition = ScenarioFilter.make(ScenarioFilter.has_traffic_light) sdc_driving_condition = ScenarioFilter.make(ScenarioFilter.sdc_moving_dist, target_dist=30, condition="smaller") - answer = ['scene-0553', 'scene-0757', 'scene-1100'] + answer = ['sd_nuscenes_v1.0-mini_scene-0553.pkl', '0.pkl', 'sd_nuscenes_v1.0-mini_scene-1100.pkl'] summary, mapping = combine_multiple_dataset( output_path, *dataset_paths, @@ -34,7 +34,7 @@ def test_filter_dataset(): if a in s: in_ = True break - assert in_ + assert in_, summary.keys() sdc_driving_condition = ScenarioFilter.make(ScenarioFilter.sdc_moving_dist, target_dist=5, condition="greater") summary, mapping = combine_multiple_dataset( diff --git a/scenarionet/tests/test_generate_from_error_file.py b/scenarionet/tests/test_generate_from_error_file.py new file mode 100644 index 0000000..88200fc --- /dev/null +++ b/scenarionet/tests/test_generate_from_error_file.py @@ -0,0 +1,29 @@ +import os +import os.path + +from scenarionet import SCENARIONET_PACKAGE_PATH +from scenarionet.builder.utils import combine_multiple_dataset, read_dataset_summary, read_scenario +from scenarionet.verifier.utils import verify_loading_into_metadrive, set_random_drop + + +def test_combine_multiple_dataset(): + set_random_drop(True) + dataset_name = "nuscenes" + original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name) + dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)] + output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine") + combine_multiple_dataset(output_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True) + + dataset_paths.append(output_path) + for dataset_path in dataset_paths: + summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path) + for scenario_file in sorted_scenarios: + read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file)) + success, result = verify_loading_into_metadrive( + dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4) + assert success + set_random_drop(False) + + +if __name__ == '__main__': + test_combine_multiple_dataset() diff --git a/scenarionet/verifier/utils.py b/scenarionet/verifier/utils.py index e120de0..8a1b336 100644 --- a/scenarionet/verifier/utils.py +++ b/scenarionet/verifier/utils.py @@ -2,7 +2,7 @@ import logging import multiprocessing import os -from metadrive.scenario.scenario_description import ScenarioDescription as SD +import numpy as np from scenarionet.verifier.error import ErrorDescription as ED from scenarionet.verifier.error import ErrorFile as EF @@ -14,6 +14,14 @@ from metadrive.policy.replay_policy import ReplayEgoCarPolicy from metadrive.scenario.utils import get_number_of_scenarios from functools import partial +# this global variable is for generating broken scenarios for testing +RANDOM_DROP = False + + +def set_random_drop(drop): + global RANDOM_DROP + RANDOM_DROP = drop + def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=1000, num_workers=8): assert os.path.exists(result_save_dir) and os.path.isdir(result_save_dir), "result_save_dir must be an existing dir" @@ -56,6 +64,7 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=10 def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, steps_to_run, metadrive_config=None): + global RANDOM_DROP logger.info( "================ Begin Scenario Loading Verification for scenario {}-{} ================ \n".format( start_scenario_index, num_scenario + start_scenario_index)) @@ -77,6 +86,8 @@ def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, ste try: env.reset(force_seed=scenario_index) arrive = False + if RANDOM_DROP and np.random.rand() < 0.5: + raise ValueError("Random Drop") for _ in range(steps_to_run): o, r, d, info = env.step([0, 0]) if d and info["arrive_dest"]: