install md for github test
This commit is contained in:
6
.github/workflows/main.yml
vendored
6
.github/workflows/main.yml
vendored
@@ -39,6 +39,12 @@ jobs:
|
||||
pip install pytest
|
||||
pip install pytest-cov
|
||||
pip install ray
|
||||
|
||||
git clone git@github.com:metadriverse/metadrive.git
|
||||
cd metadrive
|
||||
pip install -e .
|
||||
cd ../
|
||||
|
||||
cd scenarionet/
|
||||
pytest --cov=./ --cov-config=.coveragerc --cov-report=xml -sv tests
|
||||
|
||||
|
||||
@@ -26,8 +26,7 @@ def try_generating_summary(file_folder):
|
||||
|
||||
|
||||
def combine_multiple_dataset(
|
||||
output_path, *dataset_paths, force_overwrite=False, try_generate_missing_file=True,
|
||||
filters: List[Callable] = None
|
||||
output_path, *dataset_paths, force_overwrite=False, try_generate_missing_file=True, filters: List[Callable] = None
|
||||
):
|
||||
"""
|
||||
Combine multiple datasets. Each dataset should have a dataset_summary.pkl
|
||||
|
||||
@@ -71,8 +71,10 @@ def save_summary_anda_mapping(summary_file_path, mapping_file_path, summary, map
|
||||
pickle.dump(dict_recursive_remove_array_and_set(summary), file)
|
||||
with open(mapping_file_path, "wb") as file:
|
||||
pickle.dump(mapping, file)
|
||||
print("\n ================ Dataset Summary and Mapping are saved at: {} "
|
||||
"================ \n".format(summary_file_path))
|
||||
print(
|
||||
"\n ================ Dataset Summary and Mapping are saved at: {} "
|
||||
"================ \n".format(summary_file_path)
|
||||
)
|
||||
|
||||
|
||||
def read_dataset_summary(dataset_path):
|
||||
|
||||
@@ -60,20 +60,24 @@ def contains_explicit_return(f):
|
||||
return any(isinstance(node, ast.Return) for node in ast.walk(ast.parse(inspect.getsource(f))))
|
||||
|
||||
|
||||
def write_to_directory(convert_func,
|
||||
scenarios,
|
||||
output_path,
|
||||
dataset_version,
|
||||
dataset_name,
|
||||
force_overwrite=False,
|
||||
num_workers=8,
|
||||
**kwargs):
|
||||
def write_to_directory(
|
||||
convert_func,
|
||||
scenarios,
|
||||
output_path,
|
||||
dataset_version,
|
||||
dataset_name,
|
||||
force_overwrite=False,
|
||||
num_workers=8,
|
||||
**kwargs
|
||||
):
|
||||
# make sure dir not exist
|
||||
save_path = copy.deepcopy(output_path)
|
||||
if os.path.exists(output_path):
|
||||
if not force_overwrite:
|
||||
raise ValueError("Directory {} already exists! Abort. "
|
||||
"\n Try setting force_overwrite=True or adding --overwrite".format(output_path))
|
||||
raise ValueError(
|
||||
"Directory {} already exists! Abort. "
|
||||
"\n Try setting force_overwrite=True or adding --overwrite".format(output_path)
|
||||
)
|
||||
|
||||
basename = os.path.basename(output_path)
|
||||
dir = os.path.dirname(output_path)
|
||||
@@ -81,8 +85,10 @@ def write_to_directory(convert_func,
|
||||
output_path = os.path.join(dir, "{}_{}".format(basename, str(i)))
|
||||
if os.path.exists(output_path):
|
||||
if not force_overwrite:
|
||||
raise ValueError("Directory {} already exists! Abort. "
|
||||
"\n Try setting force_overwrite=True or adding --overwrite".format(output_path))
|
||||
raise ValueError(
|
||||
"Directory {} already exists! Abort. "
|
||||
"\n Try setting force_overwrite=True or adding --overwrite".format(output_path)
|
||||
)
|
||||
# get arguments for workers
|
||||
num_files = len(scenarios)
|
||||
if num_files < num_workers:
|
||||
@@ -103,42 +109,46 @@ def write_to_directory(convert_func,
|
||||
argument_list.append([scenarios[i * num_files_each_worker:end_idx], kwargs, i, output_path])
|
||||
|
||||
# prefill arguments
|
||||
func = partial(writing_to_directory_wrapper,
|
||||
convert_func=convert_func,
|
||||
dataset_version=dataset_version,
|
||||
dataset_name=dataset_name,
|
||||
force_overwrite=force_overwrite)
|
||||
func = partial(
|
||||
writing_to_directory_wrapper,
|
||||
convert_func=convert_func,
|
||||
dataset_version=dataset_version,
|
||||
dataset_name=dataset_name,
|
||||
force_overwrite=force_overwrite
|
||||
)
|
||||
|
||||
# Run, workers and process result from worker
|
||||
with multiprocessing.Pool(num_workers) as p:
|
||||
all_result = list(p.imap(func, argument_list))
|
||||
combine_multiple_dataset(save_path, *output_pathes, force_overwrite=force_overwrite,
|
||||
try_generate_missing_file=False)
|
||||
combine_multiple_dataset(
|
||||
save_path, *output_pathes, force_overwrite=force_overwrite, try_generate_missing_file=False
|
||||
)
|
||||
return all_result
|
||||
|
||||
|
||||
def writing_to_directory_wrapper(args,
|
||||
convert_func,
|
||||
dataset_version,
|
||||
dataset_name,
|
||||
force_overwrite=False):
|
||||
return write_to_directory_single_worker(convert_func=convert_func,
|
||||
scenarios=args[0],
|
||||
output_path=args[3],
|
||||
dataset_version=dataset_version,
|
||||
dataset_name=dataset_name,
|
||||
force_overwrite=force_overwrite,
|
||||
worker_index=args[2],
|
||||
**args[1])
|
||||
def writing_to_directory_wrapper(args, convert_func, dataset_version, dataset_name, force_overwrite=False):
|
||||
return write_to_directory_single_worker(
|
||||
convert_func=convert_func,
|
||||
scenarios=args[0],
|
||||
output_path=args[3],
|
||||
dataset_version=dataset_version,
|
||||
dataset_name=dataset_name,
|
||||
force_overwrite=force_overwrite,
|
||||
worker_index=args[2],
|
||||
**args[1]
|
||||
)
|
||||
|
||||
|
||||
def write_to_directory_single_worker(convert_func,
|
||||
scenarios,
|
||||
output_path,
|
||||
dataset_version,
|
||||
dataset_name,
|
||||
worker_index=0,
|
||||
force_overwrite=False, **kwargs):
|
||||
def write_to_directory_single_worker(
|
||||
convert_func,
|
||||
scenarios,
|
||||
output_path,
|
||||
dataset_version,
|
||||
dataset_name,
|
||||
worker_index=0,
|
||||
force_overwrite=False,
|
||||
**kwargs
|
||||
):
|
||||
"""
|
||||
Convert a batch of scenarios.
|
||||
"""
|
||||
@@ -162,8 +172,10 @@ def write_to_directory_single_worker(convert_func,
|
||||
if force_overwrite:
|
||||
delay_remove = save_path
|
||||
else:
|
||||
raise ValueError("Directory already exists! Abort."
|
||||
"\n Try setting force_overwrite=True or using --overwrite")
|
||||
raise ValueError(
|
||||
"Directory already exists! Abort."
|
||||
"\n Try setting force_overwrite=True or using --overwrite"
|
||||
)
|
||||
|
||||
summary_file = SD.DATASET.SUMMARY_FILE
|
||||
mapping_file = SD.DATASET.MAPPING_FILE
|
||||
|
||||
@@ -9,7 +9,6 @@ if __name__ == '__main__':
|
||||
parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it")
|
||||
args = parser.parse_args()
|
||||
if len(args.from_datasets) != 0:
|
||||
combine_multiple_dataset(args.to,
|
||||
*args.from_datasets,
|
||||
force_overwrite=args.overwrite,
|
||||
try_generate_missing_file=True)
|
||||
combine_multiple_dataset(
|
||||
args.to, *args.from_datasets, force_overwrite=args.overwrite, try_generate_missing_file=True
|
||||
)
|
||||
|
||||
@@ -7,10 +7,15 @@ from scenarionet.converter.utils import write_to_directory
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dataset_name", "-n", default="nuplan",
|
||||
help="Dataset name, will be used to generate scenario files")
|
||||
parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "nuplan"),
|
||||
help="The path of the dataset")
|
||||
parser.add_argument(
|
||||
"--dataset_name", "-n", default="nuplan", help="Dataset name, will be used to generate scenario files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_path",
|
||||
"-d",
|
||||
default=os.path.join(SCENARIONET_DATASET_PATH, "nuplan"),
|
||||
help="The path of the dataset"
|
||||
)
|
||||
parser.add_argument("--version", "-v", default='v1.1', help="version")
|
||||
parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it")
|
||||
parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use")
|
||||
|
||||
@@ -7,11 +7,16 @@ from scenarionet.converter.utils import write_to_directory
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dataset_name", "-n", default="nuscenes",
|
||||
help="Dataset name, will be used to generate scenario files")
|
||||
parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "nuscenes"),
|
||||
help="The path of the dataset")
|
||||
parser.add_argument("--version", "-v", default='v1.0-mini', help="version")
|
||||
parser.add_argument(
|
||||
"--dataset_name", "-n", default="nuscenes", help="Dataset name, will be used to generate scenario files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_path",
|
||||
"-d",
|
||||
default=os.path.join(SCENARIONET_DATASET_PATH, "nuscenes"),
|
||||
help="The path of the dataset"
|
||||
)
|
||||
parser.add_argument("--version", "-v", default='v1.0-mini', help="version")
|
||||
parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it")
|
||||
parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use")
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -10,11 +10,13 @@ from scenarionet.converter.utils import write_to_directory
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dataset_name", "-n", default="pg",
|
||||
help="Dataset name, will be used to generate scenario files")
|
||||
parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "pg"),
|
||||
help="The path of the dataset")
|
||||
parser.add_argument("--version", "-v", default=metadrive.constants.DATA_VERSION, help="version")
|
||||
parser.add_argument(
|
||||
"--dataset_name", "-n", default="pg", help="Dataset name, will be used to generate scenario files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "pg"), help="The path of the dataset"
|
||||
)
|
||||
parser.add_argument("--version", "-v", default=metadrive.constants.DATA_VERSION, help="version")
|
||||
parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it")
|
||||
parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use")
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -10,11 +10,13 @@ logger = logging.getLogger(__name__)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument("--dataset_name", "-n", default="waymo",
|
||||
help="Dataset name, will be used to generate scenario files")
|
||||
parser.add_argument("--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "waymo"),
|
||||
help="The path of the dataset")
|
||||
parser.add_argument("--version", "-v", default='v1.2', help="version")
|
||||
parser.add_argument(
|
||||
"--dataset_name", "-n", default="waymo", help="Dataset name, will be used to generate scenario files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--dataset_path", "-d", default=os.path.join(SCENARIONET_DATASET_PATH, "waymo"), help="The path of the dataset"
|
||||
)
|
||||
parser.add_argument("--version", "-v", default='v1.2', help="version")
|
||||
parser.add_argument("--overwrite", action="store_true", help="If the dataset_path exists, overwrite it")
|
||||
parser.add_argument("--num_workers", type=int, default=8, help="number of workers to use")
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -31,7 +31,8 @@ def test_generate_from_error():
|
||||
for scenario_file in sorted_scenarios:
|
||||
read_scenario(dataset_path, mapping, scenario_file)
|
||||
success, logs = verify_loading_into_metadrive(
|
||||
dataset_path, result_save_dir="../test_dataset", steps_to_run=1000, num_workers=16)
|
||||
dataset_path, result_save_dir="../test_dataset", steps_to_run=1000, num_workers=16
|
||||
)
|
||||
set_random_drop(False)
|
||||
# get error file
|
||||
file_name = ErrorFile.get_error_file_name(dataset_path)
|
||||
@@ -39,10 +40,12 @@ def test_generate_from_error():
|
||||
# regenerate
|
||||
pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "passed_scenarios")
|
||||
fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "failed_scenarios")
|
||||
pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, force_overwrite=True,
|
||||
broken_scenario=False)
|
||||
fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, force_overwrite=True,
|
||||
broken_scenario=True)
|
||||
pass_summary, pass_mapping = ErrorFile.generate_dataset(
|
||||
error_file_path, pass_dataset, force_overwrite=True, broken_scenario=False
|
||||
)
|
||||
fail_summary, fail_mapping = ErrorFile.generate_dataset(
|
||||
error_file_path, fail_dataset, force_overwrite=True, broken_scenario=True
|
||||
)
|
||||
|
||||
# assert
|
||||
read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset)
|
||||
|
||||
@@ -24,7 +24,8 @@ def test_generate_from_error():
|
||||
for scenario_file in sorted_scenarios:
|
||||
read_scenario(dataset_path, mapping, scenario_file)
|
||||
success, logs = verify_loading_into_metadrive(
|
||||
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=3)
|
||||
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=3
|
||||
)
|
||||
set_random_drop(False)
|
||||
# get error file
|
||||
file_name = ErrorFile.get_error_file_name(dataset_path)
|
||||
@@ -32,10 +33,12 @@ def test_generate_from_error():
|
||||
# regenerate
|
||||
pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "passed_senarios")
|
||||
fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "failed_scenarios")
|
||||
pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, force_overwrite=True,
|
||||
broken_scenario=False)
|
||||
fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, force_overwrite=True,
|
||||
broken_scenario=True)
|
||||
pass_summary, pass_mapping = ErrorFile.generate_dataset(
|
||||
error_file_path, pass_dataset, force_overwrite=True, broken_scenario=False
|
||||
)
|
||||
fail_summary, fail_mapping = ErrorFile.generate_dataset(
|
||||
error_file_path, fail_dataset, force_overwrite=True, broken_scenario=True
|
||||
)
|
||||
|
||||
# assert
|
||||
read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset)
|
||||
|
||||
@@ -24,10 +24,7 @@ class ErrorDescription:
|
||||
"\n Scenario Error, "
|
||||
"scenario_index: {}, file_path: {}.\n Error message: {}".format(scenario_index, file_path, str(error))
|
||||
)
|
||||
return {cls.INDEX: scenario_index,
|
||||
cls.PATH: file_path,
|
||||
cls.FILE_NAME: file_name,
|
||||
cls.ERROR: str(error)}
|
||||
return {cls.INDEX: scenario_index, cls.PATH: file_path, cls.FILE_NAME: file_name, cls.ERROR: str(error)}
|
||||
|
||||
|
||||
class ErrorFile:
|
||||
@@ -69,8 +66,10 @@ class ErrorFile:
|
||||
if force_overwrite:
|
||||
shutil.rmtree(new_dataset_path)
|
||||
else:
|
||||
raise ValueError("Directory: {} already exists! "
|
||||
"Set force_overwrite=True to overwrite".format(new_dataset_path))
|
||||
raise ValueError(
|
||||
"Directory: {} already exists! "
|
||||
"Set force_overwrite=True to overwrite".format(new_dataset_path)
|
||||
)
|
||||
os.makedirs(new_dataset_path, exist_ok=False)
|
||||
|
||||
with open(error_file_path, "r+") as f:
|
||||
|
||||
@@ -59,7 +59,8 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=10
|
||||
path = EF.dump(result_save_dir, errors, dataset_path)
|
||||
logger.info(
|
||||
"Fail to load all scenarios. Number of failed scenarios: {}. "
|
||||
"See: {} more details! ".format(len(errors), path))
|
||||
"See: {} more details! ".format(len(errors), path)
|
||||
)
|
||||
return success, errors
|
||||
|
||||
|
||||
@@ -67,17 +68,21 @@ def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, ste
|
||||
global RANDOM_DROP
|
||||
logger.info(
|
||||
"================ Begin Scenario Loading Verification for scenario {}-{} ================ \n".format(
|
||||
start_scenario_index, num_scenario + start_scenario_index))
|
||||
start_scenario_index, num_scenario + start_scenario_index
|
||||
)
|
||||
)
|
||||
success = True
|
||||
metadrive_config = metadrive_config or {}
|
||||
metadrive_config.update({
|
||||
"agent_policy": ReplayEgoCarPolicy,
|
||||
"num_scenarios": num_scenario,
|
||||
"horizon": 1000,
|
||||
"start_scenario_index": start_scenario_index,
|
||||
"no_static_vehicles": False,
|
||||
"data_directory": dataset_path,
|
||||
})
|
||||
metadrive_config.update(
|
||||
{
|
||||
"agent_policy": ReplayEgoCarPolicy,
|
||||
"num_scenarios": num_scenario,
|
||||
"horizon": 1000,
|
||||
"start_scenario_index": start_scenario_index,
|
||||
"no_static_vehicles": False,
|
||||
"data_directory": dataset_path,
|
||||
}
|
||||
)
|
||||
env = ScenarioEnv(metadrive_config)
|
||||
logging.disable(logging.INFO)
|
||||
error_msgs = []
|
||||
|
||||
Reference in New Issue
Block a user