From 5c3e516c0624c168d9284234d940091390e7904f Mon Sep 17 00:00:00 2001 From: QuanyiLi Date: Mon, 8 May 2023 17:15:58 +0100 Subject: [PATCH] install md for github test --- .github/workflows/main.yml | 6 ++ scenarionet/builder/utils.py | 3 +- scenarionet/common_utils.py | 6 +- scenarionet/converter/utils.py | 94 +++++++++++-------- scenarionet/scripts/combine_dataset.py | 7 +- scenarionet/scripts/convert_nuplan.py | 13 ++- scenarionet/scripts/convert_nuscenes.py | 15 ++- scenarionet/scripts/convert_pg.py | 12 ++- scenarionet/scripts/convert_waymo.py | 12 ++- .../_test_generate_from_error_file.py | 13 ++- .../tests/test_generate_from_error_file.py | 13 ++- scenarionet/verifier/error.py | 11 +-- scenarionet/verifier/utils.py | 25 +++-- 13 files changed, 136 insertions(+), 94 deletions(-) diff --git a/.github/workflows/main.yml b/.github/workflows/main.yml index 6a59090..484f1a2 100644 --- a/.github/workflows/main.yml +++ b/.github/workflows/main.yml @@ -39,6 +39,12 @@ jobs: pip install pytest pip install pytest-cov pip install ray + + git clone git@github.com:metadriverse/metadrive.git + cd metadrive + pip install -e . + cd ../ + cd scenarionet/ pytest --cov=./ --cov-config=.coveragerc --cov-report=xml -sv tests diff --git a/scenarionet/builder/utils.py b/scenarionet/builder/utils.py index 35bc678..c9b3b43 100644 --- a/scenarionet/builder/utils.py +++ b/scenarionet/builder/utils.py @@ -26,8 +26,7 @@ def try_generating_summary(file_folder): def combine_multiple_dataset( - output_path, *dataset_paths, force_overwrite=False, try_generate_missing_file=True, - filters: List[Callable] = None + output_path, *dataset_paths, force_overwrite=False, try_generate_missing_file=True, filters: List[Callable] = None ): """ Combine multiple datasets. Each dataset should have a dataset_summary.pkl diff --git a/scenarionet/common_utils.py b/scenarionet/common_utils.py index 14a7a00..664264c 100644 --- a/scenarionet/common_utils.py +++ b/scenarionet/common_utils.py @@ -71,8 +71,10 @@ def save_summary_anda_mapping(summary_file_path, mapping_file_path, summary, map pickle.dump(dict_recursive_remove_array_and_set(summary), file) with open(mapping_file_path, "wb") as file: pickle.dump(mapping, file) - print("\n ================ Dataset Summary and Mapping are saved at: {} " - "================ \n".format(summary_file_path)) + print( + "\n ================ Dataset Summary and Mapping are saved at: {} " + "================ \n".format(summary_file_path) + ) def read_dataset_summary(dataset_path): diff --git a/scenarionet/converter/utils.py b/scenarionet/converter/utils.py index a474253..4d96559 100644 --- a/scenarionet/converter/utils.py +++ b/scenarionet/converter/utils.py @@ -60,20 +60,24 @@ def contains_explicit_return(f): return any(isinstance(node, ast.Return) for node in ast.walk(ast.parse(inspect.getsource(f)))) -def write_to_directory(convert_func, - scenarios, - output_path, - dataset_version, - dataset_name, - force_overwrite=False, - num_workers=8, - **kwargs): +def write_to_directory( + convert_func, + scenarios, + output_path, + dataset_version, + dataset_name, + force_overwrite=False, + num_workers=8, + **kwargs +): # make sure dir not exist save_path = copy.deepcopy(output_path) if os.path.exists(output_path): if not force_overwrite: - raise ValueError("Directory {} already exists! Abort. " - "\n Try setting force_overwrite=True or adding --overwrite".format(output_path)) + raise ValueError( + "Directory {} already exists! Abort. " + "\n Try setting force_overwrite=True or adding --overwrite".format(output_path) + ) basename = os.path.basename(output_path) dir = os.path.dirname(output_path) @@ -81,8 +85,10 @@ def write_to_directory(convert_func, output_path = os.path.join(dir, "{}_{}".format(basename, str(i))) if os.path.exists(output_path): if not force_overwrite: - raise ValueError("Directory {} already exists! Abort. " - "\n Try setting force_overwrite=True or adding --overwrite".format(output_path)) + raise ValueError( + "Directory {} already exists! Abort. " + "\n Try setting force_overwrite=True or adding --overwrite".format(output_path) + ) # get arguments for workers num_files = len(scenarios) if num_files < num_workers: @@ -103,42 +109,46 @@ def write_to_directory(convert_func, argument_list.append([scenarios[i * num_files_each_worker:end_idx], kwargs, i, output_path]) # prefill arguments - func = partial(writing_to_directory_wrapper, - convert_func=convert_func, - dataset_version=dataset_version, - dataset_name=dataset_name, - force_overwrite=force_overwrite) + func = partial( + writing_to_directory_wrapper, + convert_func=convert_func, + dataset_version=dataset_version, + dataset_name=dataset_name, + force_overwrite=force_overwrite + ) # Run, workers and process result from worker with multiprocessing.Pool(num_workers) as p: all_result = list(p.imap(func, argument_list)) - combine_multiple_dataset(save_path, *output_pathes, force_overwrite=force_overwrite, - try_generate_missing_file=False) + combine_multiple_dataset( + save_path, *output_pathes, force_overwrite=force_overwrite, try_generate_missing_file=False + ) return all_result -def writing_to_directory_wrapper(args, - convert_func, - dataset_version, - dataset_name, - force_overwrite=False): - return write_to_directory_single_worker(convert_func=convert_func, - scenarios=args[0], - output_path=args[3], - dataset_version=dataset_version, - dataset_name=dataset_name, - force_overwrite=force_overwrite, - worker_index=args[2], - **args[1]) +def writing_to_directory_wrapper(args, convert_func, dataset_version, dataset_name, force_overwrite=False): + return write_to_directory_single_worker( + convert_func=convert_func, + scenarios=args[0], + output_path=args[3], + dataset_version=dataset_version, + dataset_name=dataset_name, + force_overwrite=force_overwrite, + worker_index=args[2], + **args[1] + ) -def write_to_directory_single_worker(convert_func, - scenarios, - output_path, - dataset_version, - dataset_name, - worker_index=0, - force_overwrite=False, **kwargs): +def write_to_directory_single_worker( + convert_func, + scenarios, + output_path, + dataset_version, + dataset_name, + worker_index=0, + force_overwrite=False, + **kwargs +): """ Convert a batch of scenarios. """ @@ -162,8 +172,10 @@ def write_to_directory_single_worker(convert_func, if force_overwrite: delay_remove = save_path else: - raise ValueError("Directory already exists! Abort." - "\n Try setting force_overwrite=True or using --overwrite") + raise ValueError( + "Directory already exists! Abort." + "\n Try setting force_overwrite=True or using --overwrite" + ) summary_file = SD.DATASET.SUMMARY_FILE mapping_file = SD.DATASET.MAPPING_FILE diff --git a/scenarionet/scripts/combine_dataset.py b/scenarionet/scripts/combine_dataset.py index 63def34..1f9cdac 100644 --- a/scenarionet/scripts/combine_dataset.py +++ b/scenarionet/scripts/combine_dataset.py @@ -9,7 +9,6 @@ if __name__ == '__main__': parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it") args = parser.parse_args() if len(args.from_datasets) != 0: - combine_multiple_dataset(args.to, - *args.from_datasets, - force_overwrite=args.overwrite, - try_generate_missing_file=True) + combine_multiple_dataset( + args.to, *args.from_datasets, force_overwrite=args.overwrite, try_generate_missing_file=True + ) diff --git a/scenarionet/scripts/convert_nuplan.py b/scenarionet/scripts/convert_nuplan.py index 3b081fd..8184f75 100644 --- a/scenarionet/scripts/convert_nuplan.py +++ b/scenarionet/scripts/convert_nuplan.py @@ -7,10 +7,15 @@ from scenarionet.converter.utils import write_to_directory if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("--dataset_name", "-n", default="nuplan", - help="Dataset name, will be used to generate scenario files") - parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "nuplan"), - help="The path of the dataset") + parser.add_argument( + "--dataset_name", "-n", default="nuplan", help="Dataset name, will be used to generate scenario files" + ) + parser.add_argument( + "--dataset_path", + "-d", + default=os.path.join(SCENARIONET_DATASET_PATH, "nuplan"), + help="The path of the dataset" + ) parser.add_argument("--version", "-v", default='v1.1', help="version") parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it") parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use") diff --git a/scenarionet/scripts/convert_nuscenes.py b/scenarionet/scripts/convert_nuscenes.py index aa93772..1d9dc05 100644 --- a/scenarionet/scripts/convert_nuscenes.py +++ b/scenarionet/scripts/convert_nuscenes.py @@ -7,11 +7,16 @@ from scenarionet.converter.utils import write_to_directory if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("--dataset_name", "-n", default="nuscenes", - help="Dataset name, will be used to generate scenario files") - parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "nuscenes"), - help="The path of the dataset") - parser.add_argument("--version", "-v", default='v1.0-mini', help="version") + parser.add_argument( + "--dataset_name", "-n", default="nuscenes", help="Dataset name, will be used to generate scenario files" + ) + parser.add_argument( + "--dataset_path", + "-d", + default=os.path.join(SCENARIONET_DATASET_PATH, "nuscenes"), + help="The path of the dataset" + ) + parser.add_argument("--version", "-v", default='v1.0-mini', help="version") parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it") parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use") args = parser.parse_args() diff --git a/scenarionet/scripts/convert_pg.py b/scenarionet/scripts/convert_pg.py index 31a779b..3df1782 100644 --- a/scenarionet/scripts/convert_pg.py +++ b/scenarionet/scripts/convert_pg.py @@ -10,11 +10,13 @@ from scenarionet.converter.utils import write_to_directory if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("--dataset_name", "-n", default="pg", - help="Dataset name, will be used to generate scenario files") - parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "pg"), - help="The path of the dataset") - parser.add_argument("--version", "-v", default=metadrive.constants.DATA_VERSION, help="version") + parser.add_argument( + "--dataset_name", "-n", default="pg", help="Dataset name, will be used to generate scenario files" + ) + parser.add_argument( + "--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "pg"), help="The path of the dataset" + ) + parser.add_argument("--version", "-v", default=metadrive.constants.DATA_VERSION, help="version") parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it") parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use") args = parser.parse_args() diff --git a/scenarionet/scripts/convert_waymo.py b/scenarionet/scripts/convert_waymo.py index 7e6a6ce..ae4782d 100644 --- a/scenarionet/scripts/convert_waymo.py +++ b/scenarionet/scripts/convert_waymo.py @@ -10,11 +10,13 @@ logger = logging.getLogger(__name__) if __name__ == '__main__': parser = argparse.ArgumentParser() - parser.add_argument("--dataset_name", "-n", default="waymo", - help="Dataset name, will be used to generate scenario files") - parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "waymo"), - help="The path of the dataset") - parser.add_argument("--version", "-v", default='v1.2', help="version") + parser.add_argument( + "--dataset_name", "-n", default="waymo", help="Dataset name, will be used to generate scenario files" + ) + parser.add_argument( + "--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "waymo"), help="The path of the dataset" + ) + parser.add_argument("--version", "-v", default='v1.2', help="version") parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it") parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use") args = parser.parse_args() diff --git a/scenarionet/tests/local_test/_test_generate_from_error_file.py b/scenarionet/tests/local_test/_test_generate_from_error_file.py index 9f14ed2..021868d 100644 --- a/scenarionet/tests/local_test/_test_generate_from_error_file.py +++ b/scenarionet/tests/local_test/_test_generate_from_error_file.py @@ -31,7 +31,8 @@ def test_generate_from_error(): for scenario_file in sorted_scenarios: read_scenario(dataset_path, mapping, scenario_file) success, logs = verify_loading_into_metadrive( - dataset_path, result_save_dir="../test_dataset", steps_to_run=1000, num_workers=16) + dataset_path, result_save_dir="../test_dataset", steps_to_run=1000, num_workers=16 + ) set_random_drop(False) # get error file file_name = ErrorFile.get_error_file_name(dataset_path) @@ -39,10 +40,12 @@ def test_generate_from_error(): # regenerate pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "passed_scenarios") fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "failed_scenarios") - pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, force_overwrite=True, - broken_scenario=False) - fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, force_overwrite=True, - broken_scenario=True) + pass_summary, pass_mapping = ErrorFile.generate_dataset( + error_file_path, pass_dataset, force_overwrite=True, broken_scenario=False + ) + fail_summary, fail_mapping = ErrorFile.generate_dataset( + error_file_path, fail_dataset, force_overwrite=True, broken_scenario=True + ) # assert read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset) diff --git a/scenarionet/tests/test_generate_from_error_file.py b/scenarionet/tests/test_generate_from_error_file.py index ff2f569..66805b3 100644 --- a/scenarionet/tests/test_generate_from_error_file.py +++ b/scenarionet/tests/test_generate_from_error_file.py @@ -24,7 +24,8 @@ def test_generate_from_error(): for scenario_file in sorted_scenarios: read_scenario(dataset_path, mapping, scenario_file) success, logs = verify_loading_into_metadrive( - dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=3) + dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=3 + ) set_random_drop(False) # get error file file_name = ErrorFile.get_error_file_name(dataset_path) @@ -32,10 +33,12 @@ def test_generate_from_error(): # regenerate pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "passed_senarios") fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "failed_scenarios") - pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, force_overwrite=True, - broken_scenario=False) - fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, force_overwrite=True, - broken_scenario=True) + pass_summary, pass_mapping = ErrorFile.generate_dataset( + error_file_path, pass_dataset, force_overwrite=True, broken_scenario=False + ) + fail_summary, fail_mapping = ErrorFile.generate_dataset( + error_file_path, fail_dataset, force_overwrite=True, broken_scenario=True + ) # assert read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset) diff --git a/scenarionet/verifier/error.py b/scenarionet/verifier/error.py index d436f85..597bcc4 100644 --- a/scenarionet/verifier/error.py +++ b/scenarionet/verifier/error.py @@ -24,10 +24,7 @@ class ErrorDescription: "\n Scenario Error, " "scenario_index: {}, file_path: {}.\n Error message: {}".format(scenario_index, file_path, str(error)) ) - return {cls.INDEX: scenario_index, - cls.PATH: file_path, - cls.FILE_NAME: file_name, - cls.ERROR: str(error)} + return {cls.INDEX: scenario_index, cls.PATH: file_path, cls.FILE_NAME: file_name, cls.ERROR: str(error)} class ErrorFile: @@ -69,8 +66,10 @@ class ErrorFile: if force_overwrite: shutil.rmtree(new_dataset_path) else: - raise ValueError("Directory: {} already exists! " - "Set force_overwrite=True to overwrite".format(new_dataset_path)) + raise ValueError( + "Directory: {} already exists! " + "Set force_overwrite=True to overwrite".format(new_dataset_path) + ) os.makedirs(new_dataset_path, exist_ok=False) with open(error_file_path, "r+") as f: diff --git a/scenarionet/verifier/utils.py b/scenarionet/verifier/utils.py index d7bad6b..9bd89af 100644 --- a/scenarionet/verifier/utils.py +++ b/scenarionet/verifier/utils.py @@ -59,7 +59,8 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=10 path = EF.dump(result_save_dir, errors, dataset_path) logger.info( "Fail to load all scenarios. Number of failed scenarios: {}. " - "See: {} more details! ".format(len(errors), path)) + "See: {} more details! ".format(len(errors), path) + ) return success, errors @@ -67,17 +68,21 @@ def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, ste global RANDOM_DROP logger.info( "================ Begin Scenario Loading Verification for scenario {}-{} ================ \n".format( - start_scenario_index, num_scenario + start_scenario_index)) + start_scenario_index, num_scenario + start_scenario_index + ) + ) success = True metadrive_config = metadrive_config or {} - metadrive_config.update({ - "agent_policy": ReplayEgoCarPolicy, - "num_scenarios": num_scenario, - "horizon": 1000, - "start_scenario_index": start_scenario_index, - "no_static_vehicles": False, - "data_directory": dataset_path, - }) + metadrive_config.update( + { + "agent_policy": ReplayEgoCarPolicy, + "num_scenarios": num_scenario, + "horizon": 1000, + "start_scenario_index": start_scenario_index, + "no_static_vehicles": False, + "data_directory": dataset_path, + } + ) env = ScenarioEnv(metadrive_config) logging.disable(logging.INFO) error_msgs = []