From ed8fed4c1b872054d5b7ccf444c5858d36e6dfed Mon Sep 17 00:00:00 2001 From: QuanyiLi Date: Mon, 8 May 2023 12:52:41 +0100 Subject: [PATCH] move all test genrtaed file to tmp, add genertae error set test --- .gitignore | 4 ++ scenarionet/common_utils.py | 12 +++++- scenarionet/tests/test_combine_dataset.py | 4 +- scenarionet/tests/test_filter.py | 2 +- .../tests/test_generate_from_error_file.py | 39 ++++++++++++++----- scenarionet/verifier/utils.py | 2 +- 6 files changed, 47 insertions(+), 16 deletions(-) diff --git a/.gitignore b/.gitignore index 9671ccf..759a188 100644 --- a/.gitignore +++ b/.gitignore @@ -12,3 +12,7 @@ dataset/* **/combine/ **.json + +**/tmp/** +**/failed_scenarios/ +**/passed_senarios/ diff --git a/scenarionet/common_utils.py b/scenarionet/common_utils.py index 62cd3b5..993a5b3 100644 --- a/scenarionet/common_utils.py +++ b/scenarionet/common_utils.py @@ -1,3 +1,4 @@ +import os.path import pickle import numpy as np @@ -77,5 +78,12 @@ def read_dataset_summary(dataset_path): return sd_utils.read_dataset_summary(dataset_path) -def read_scenario(pkl_file_path): - return sd_utils.read_scenario_data(pkl_file_path) +def read_scenario(dataset_path, mapping, scenario_file_name): + """ + read a scenario + :param dataset_path: the location where dataset_summary.pkl is + :param mapping: a dict recording the relative position from dataset_path to real scenario file + :param scenario_file_name: scenario filename + :return: ScenarioDescription + """ + return sd_utils.read_scenario_data(os.path.join(dataset_path, mapping[scenario_file_name], scenario_file_name)) diff --git a/scenarionet/tests/test_combine_dataset.py b/scenarionet/tests/test_combine_dataset.py index c677949..54ad460 100644 --- a/scenarionet/tests/test_combine_dataset.py +++ b/scenarionet/tests/test_combine_dataset.py @@ -12,13 +12,13 @@ def test_combine_multiple_dataset(): original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name) dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)] - output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine") + output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "combine") combine_multiple_dataset(output_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True) dataset_paths.append(output_path) for dataset_path in dataset_paths: summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path) for scenario_file in sorted_scenarios: - read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file)) + read_scenario(dataset_path, mapping, scenario_file) success, result = verify_loading_into_metadrive( dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4 ) diff --git a/scenarionet/tests/test_filter.py b/scenarionet/tests/test_filter.py index cc753d8..073ac0e 100644 --- a/scenarionet/tests/test_filter.py +++ b/scenarionet/tests/test_filter.py @@ -13,7 +13,7 @@ def test_filter_dataset(): original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name) dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)] - output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine") + output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "combine") # ========================= test 1 ========================= # nuscenes data has no light diff --git a/scenarionet/tests/test_generate_from_error_file.py b/scenarionet/tests/test_generate_from_error_file.py index 0e7396d..66770fa 100644 --- a/scenarionet/tests/test_generate_from_error_file.py +++ b/scenarionet/tests/test_generate_from_error_file.py @@ -1,9 +1,11 @@ +import copy import os import os.path -from metadrive.scenario.utils import assert_scenario_equal + from scenarionet import SCENARIONET_PACKAGE_PATH from scenarionet.builder.utils import combine_multiple_dataset from scenarionet.common_utils import read_dataset_summary, read_scenario +from scenarionet.common_utils import recursive_equal from scenarionet.verifier.error import ErrorFile from scenarionet.verifier.utils import verify_loading_into_metadrive, set_random_drop @@ -13,27 +15,44 @@ def test_combine_multiple_dataset(): dataset_name = "nuscenes" original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name) dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)] - dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine") + dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "combine") combine_multiple_dataset(dataset_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True) summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path) for scenario_file in sorted_scenarios: - read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file)) + read_scenario(dataset_path, mapping, scenario_file) success, logs = verify_loading_into_metadrive( - dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4) + dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=3) set_random_drop(False) - # regenerate + # get error file file_name = ErrorFile.get_error_file_name(dataset_path) error_file_path = os.path.join("test_dataset", file_name) + # regenerate + pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "passed_senarios") + fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "failed_scenarios") + pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, force_overwrite=True, + broken_scenario=False) + fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, force_overwrite=True, + broken_scenario=True) - pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "passed_senarios") - fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "failed_scenarios") - pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, broken_scenario=False) - fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, broken_scenario=True) - + # assert read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset) + assert recursive_equal(read_pass_summary, pass_summary) + assert recursive_equal(read_pass_mapping, pass_mapping) read_fail_summary, _, read_fail_mapping, = read_dataset_summary(fail_dataset) + assert recursive_equal(read_fail_mapping, fail_mapping) + assert recursive_equal(read_fail_summary, fail_summary) + # assert pass+fail = origin + all_summaries = copy.deep(read_pass_summary) + all_summaries.update(fail_summary) + assert recursive_equal(all_summaries, summary) + + # test read + for scenario in read_pass_summary: + read_scenario(pass_dataset, read_pass_mapping, scenario) + for scenario in read_pass_summary: + read_scenario(fail_dataset, read_fail_mapping, scenario) if __name__ == '__main__': diff --git a/scenarionet/verifier/utils.py b/scenarionet/verifier/utils.py index c84e7ed..5a81644 100644 --- a/scenarionet/verifier/utils.py +++ b/scenarionet/verifier/utils.py @@ -85,7 +85,7 @@ def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, ste try: env.reset(force_seed=scenario_index) arrive = False - if RANDOM_DROP and np.random.rand() < 0.8: + if RANDOM_DROP and np.random.rand() < 0.5: raise ValueError("Random Drop") for _ in range(steps_to_run): o, r, d, info = env.step([0, 0])