multiprocess verify
This commit is contained in:
@@ -13,7 +13,7 @@ def _test_combine_dataset():
|
|||||||
|
|
||||||
combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset")
|
combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset")
|
||||||
combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
||||||
success, result = verify_loading_into_metadrive(combine_path, steps_to_run=250)
|
success, result = verify_loading_into_metadrive(combine_path, "./", steps_to_run=250)
|
||||||
assert success
|
assert success
|
||||||
|
|
||||||
|
|
||||||
|
|||||||
@@ -1,5 +1,6 @@
|
|||||||
import json
|
import json
|
||||||
import logging
|
import logging
|
||||||
|
import multiprocessing
|
||||||
import os
|
import os
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
@@ -7,20 +8,46 @@ import tqdm
|
|||||||
from metadrive.envs.scenario_env import ScenarioEnv
|
from metadrive.envs.scenario_env import ScenarioEnv
|
||||||
from metadrive.policy.replay_policy import ReplayEgoCarPolicy
|
from metadrive.policy.replay_policy import ReplayEgoCarPolicy
|
||||||
from metadrive.scenario.utils import get_number_of_scenarios
|
from metadrive.scenario.utils import get_number_of_scenarios
|
||||||
|
from functools import partial
|
||||||
|
|
||||||
|
|
||||||
def verify_loading_into_metadrive(dataset_path, result_save_dir=None, steps_to_run=0):
|
def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=300, num_workers=8):
|
||||||
print("================ Begin Scenario Loading Verification for {} ================ \n".format(result_save_dir))
|
|
||||||
scenario_num = get_number_of_scenarios(dataset_path)
|
|
||||||
if result_save_dir is not None:
|
if result_save_dir is not None:
|
||||||
assert os.path.exists(result_save_dir) and os.path.isdir(
|
assert os.path.exists(result_save_dir) and os.path.isdir(
|
||||||
result_save_dir), "Argument result_save_dir must be an existing dir"
|
result_save_dir), "Argument result_save_dir must be an existing dir"
|
||||||
|
num_scenario = get_number_of_scenarios(dataset_path)
|
||||||
|
argument_list = []
|
||||||
|
|
||||||
|
func = partial(_loading_into_metadrive,
|
||||||
|
dataset_path=dataset_path,
|
||||||
|
result_save_dir=result_save_dir,
|
||||||
|
steps_to_run=steps_to_run)
|
||||||
|
|
||||||
|
num_scenario_each_worker = int(num_scenario / num_workers)
|
||||||
|
for i in range(num_workers):
|
||||||
|
if i == num_workers - 1:
|
||||||
|
num_scenario_each_worker = num_scenario - num_scenario_each_worker * (num_workers - 1)
|
||||||
|
argument_list.append([i * num_scenario_each_worker, num_scenario_each_worker])
|
||||||
|
|
||||||
|
with multiprocessing.Pool(num_workers) as p:
|
||||||
|
result = list(p.imap(func, argument_list))
|
||||||
|
if all([i[0] for i in result]):
|
||||||
|
print("All scenarios can be loaded successfully!")
|
||||||
|
else:
|
||||||
|
print("Fail to load all scenarios, see log for more details! Number of logs: {}".format(num_workers))
|
||||||
|
|
||||||
|
|
||||||
|
def _loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, result_save_dir, steps_to_run):
|
||||||
|
print("================ Begin Scenario Loading Verification for {} ================ \n".format(result_save_dir))
|
||||||
|
assert os.path.exists(result_save_dir) and os.path.isdir(
|
||||||
|
result_save_dir), "Argument result_save_dir must be an existing dir"
|
||||||
success = True
|
success = True
|
||||||
env = ScenarioEnv(
|
env = ScenarioEnv(
|
||||||
{
|
{
|
||||||
"agent_policy": ReplayEgoCarPolicy,
|
"agent_policy": ReplayEgoCarPolicy,
|
||||||
"num_scenarios": scenario_num,
|
"num_scenarios": num_scenario,
|
||||||
"horizon": 1000,
|
"horizon": 1000,
|
||||||
|
"start_scenario_index": start_scenario_index,
|
||||||
"no_static_vehicles": False,
|
"no_static_vehicles": False,
|
||||||
"data_directory": dataset_path,
|
"data_directory": dataset_path,
|
||||||
}
|
}
|
||||||
@@ -28,10 +55,13 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir=None, steps_to_r
|
|||||||
logging.disable(logging.WARNING)
|
logging.disable(logging.WARNING)
|
||||||
error_files = []
|
error_files = []
|
||||||
try:
|
try:
|
||||||
for i in tqdm.tqdm(range(scenario_num)):
|
for i in tqdm.tqdm(range(num_scenario),
|
||||||
|
desc="Scenarios: {}-{}".format(start_scenario_index, start_scenario_index + num_scenario)):
|
||||||
env.reset(force_seed=i)
|
env.reset(force_seed=i)
|
||||||
for i in range(steps_to_run):
|
for i in range(steps_to_run):
|
||||||
env.step([0, 0])
|
o, r, d, i = env.step([0, 0])
|
||||||
|
if d:
|
||||||
|
assert i["arrive_dest"], "Can not arrive destination"
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
file_name = env.engine.data_manager.summary_lookup[i]
|
file_name = env.engine.data_manager.summary_lookup[i]
|
||||||
file_path = os.path.join(dataset_path, env.engine.data_manager.mapping[file_name], file_name)
|
file_path = os.path.join(dataset_path, env.engine.data_manager.mapping[file_name], file_name)
|
||||||
@@ -42,7 +72,9 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir=None, steps_to_r
|
|||||||
finally:
|
finally:
|
||||||
env.close()
|
env.close()
|
||||||
if result_save_dir is not None:
|
if result_save_dir is not None:
|
||||||
with open(os.path.join(result_save_dir, "error_scenarios_{}.json".format(os.path.basename(dataset_path))),
|
file_name = "error_scenarios_{}_{}_{}.json".format(os.path.basename(dataset_path),
|
||||||
"w+") as f:
|
start_scenario_index,
|
||||||
|
start_scenario_index + num_scenario)
|
||||||
|
with open(os.path.join(result_save_dir, file_name), "w+") as f:
|
||||||
json.dump(error_files, f)
|
json.dump(error_files, f)
|
||||||
return success, error_files
|
return success, error_files
|
||||||
|
|||||||
Reference in New Issue
Block a user