multi_processing test

This commit is contained in:
QuanyiLi
2023-05-07 22:54:37 +01:00
parent 5ba769c201
commit 25bb9764c8
2 changed files with 46 additions and 30 deletions

View File

@@ -13,9 +13,9 @@ def _test_combine_dataset():
combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset") combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset")
combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True) combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
success, result = verify_loading_into_metadrive(combine_path, "./", steps_to_run=250) os.makedirs("verify_results", exist_ok=True)
success, result = verify_loading_into_metadrive(combine_path, "verify_results", steps_to_run=250)
assert success assert success
if __name__ == '__main__': if __name__ == '__main__':
_test_combine_dataset() _test_combine_dataset()

View File

@@ -18,29 +18,40 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=30
num_scenario = get_number_of_scenarios(dataset_path) num_scenario = get_number_of_scenarios(dataset_path)
argument_list = [] argument_list = []
func = partial(_loading_into_metadrive, func = partial(loading_wrapper,
dataset_path=dataset_path, dataset_path=dataset_path,
result_save_dir=result_save_dir,
steps_to_run=steps_to_run) steps_to_run=steps_to_run)
num_scenario_each_worker = int(num_scenario / num_workers) num_scenario_each_worker = int(num_scenario // num_workers)
for i in range(num_workers): for i in range(num_workers):
if i == num_workers - 1: if i == num_workers - 1:
num_scenario_each_worker = num_scenario - num_scenario_each_worker * (num_workers - 1) scenario_num = num_scenario - num_scenario_each_worker * (num_workers - 1)
argument_list.append([i * num_scenario_each_worker, num_scenario_each_worker]) else:
scenario_num = num_scenario_each_worker
argument_list.append([i * num_scenario_each_worker, scenario_num])
with multiprocessing.Pool(num_workers) as p: with multiprocessing.Pool(num_workers) as p:
result = list(p.imap(func, argument_list)) all_result = list(p.imap(func, argument_list))
if all([i[0] for i in result]): result = all([i[0] for i in all_result])
logs = []
for _, log in all_result:
logs += log
if result_save_dir is not None:
file_name = "error_scenarios_for_{}.json".format(os.path.basename(dataset_path))
with open(os.path.join(result_save_dir, file_name), "w+") as f:
json.dump(logs, f, indent=4)
if result:
print("All scenarios can be loaded successfully!") print("All scenarios can be loaded successfully!")
else: else:
print("Fail to load all scenarios, see log for more details! Number of logs: {}".format(num_workers)) print("Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(logs))
return result, logs
def _loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, result_save_dir, steps_to_run): def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, steps_to_run):
print("================ Begin Scenario Loading Verification for {} ================ \n".format(result_save_dir)) print("================ Begin Scenario Loading Verification for scenario {}-{} ================ \n".format(
assert os.path.exists(result_save_dir) and os.path.isdir( start_scenario_index, num_scenario + start_scenario_index))
result_save_dir), "Argument result_save_dir must be an existing dir"
success = True success = True
env = ScenarioEnv( env = ScenarioEnv(
{ {
@@ -52,29 +63,34 @@ def _loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, re
"data_directory": dataset_path, "data_directory": dataset_path,
} }
) )
logging.disable(logging.WARNING) logging.disable(logging.INFO)
error_files = [] error_files = []
try: try:
for i in tqdm.tqdm(range(num_scenario), for scenario_index in tqdm.tqdm(range(start_scenario_index, start_scenario_index + num_scenario),
desc="Scenarios: {}-{}".format(start_scenario_index, start_scenario_index + num_scenario)): desc="Scenarios: {}-{}".format(start_scenario_index,
env.reset(force_seed=i) start_scenario_index + num_scenario)):
for i in range(steps_to_run): env.reset(force_seed=scenario_index)
o, r, d, i = env.step([0, 0]) for _ in range(steps_to_run):
o, r, d, info = env.step([0, 0])
if d: if d:
assert i["arrive_dest"], "Can not arrive destination" assert info["arrive_dest"], "Can not arrive destination"
except Exception as e: except Exception as e:
file_name = env.engine.data_manager.summary_lookup[i] file_name = env.engine.data_manager.summary_lookup[scenario_index]
file_path = os.path.join(dataset_path, env.engine.data_manager.mapping[file_name], file_name) file_path = os.path.join(dataset_path, env.engine.data_manager.mapping[file_name], file_name)
error_file = {"seed": i, "file_path": file_path, "error": e} error_file = {"scenario_index": scenario_index, "file_path": file_path, "error": str(e)}
error_files.append(error_file) error_files.append(error_file)
logger.warning("\n Scenario Error, seed: {}, file_path: {}.\n Error message: {}".format(i, file_path, e)) logger.warning("\n Scenario Error, "
"scenario_index: {}, file_path: {}.\n Error message: {}".format(scenario_index, file_path,
str(e)))
success = False success = False
finally: finally:
env.close() env.close()
if result_save_dir is not None:
file_name = "error_scenarios_{}_{}_{}.json".format(os.path.basename(dataset_path),
start_scenario_index,
start_scenario_index + num_scenario)
with open(os.path.join(result_save_dir, file_name), "w+") as f:
json.dump(error_files, f)
return success, error_files return success, error_files
def loading_wrapper(arglist, dataset_path, steps_to_run):
assert len(arglist) == 2, "Too much arguments!"
return loading_into_metadrive(arglist[0],
arglist[1],
dataset_path=dataset_path,
steps_to_run=steps_to_run)