multi-processing test

This commit is contained in:
QuanyiLi
2023-05-07 23:14:01 +01:00
parent 5c1b2e053b
commit 97a4e61f38
4 changed files with 24 additions and 21 deletions

View File

@@ -38,22 +38,23 @@ if __name__ == '__main__':
} }
) )
success = [] success = []
env.reset(force_seed=2)
while True: while True:
env.reset(force_seed=2) for seed in [91]:
for t in range(10000): env.reset(force_seed=seed)
o, r, d, info = env.step([0, 0]) for t in range(10000):
assert env.observation_space.contains(o) o, r, d, info = env.step([0, 0])
c_lane = env.vehicle.lane assert env.observation_space.contains(o)
long, lat, = c_lane.local_coordinates(env.vehicle.position) c_lane = env.vehicle.lane
# if env.config["use_render"]: long, lat, = c_lane.local_coordinates(env.vehicle.position)
env.render( # if env.config["use_render"]:
text={ env.render(
"seed": env.engine.global_seed + env.config["start_scenario_index"], text={
} "seed": env.engine.global_seed + env.config["start_scenario_index"],
) }
)
if d: if d:
if info["arrive_dest"]: if info["arrive_dest"]:
print("seed:{}, success".format(env.engine.global_random_seed)) print("seed:{}, success".format(env.engine.global_random_seed))
break print(t)
break

View File

@@ -14,7 +14,7 @@ def _test_combine_dataset():
combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset") combine_path = os.path.join(SCENARIONET_DATASET_PATH, "combined_dataset")
combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True) combine_multiple_dataset(combine_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
os.makedirs("verify_results", exist_ok=True) os.makedirs("verify_results", exist_ok=True)
success, result = verify_loading_into_metadrive(combine_path, "verify_results", steps_to_run=250) success, result = verify_loading_into_metadrive(combine_path, "verify_results")
assert success assert success

View File

@@ -21,9 +21,11 @@ def test_combine_multiple_dataset():
summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path) summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path)
for scenario_file in sorted_scenarios: for scenario_file in sorted_scenarios:
read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file)) read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file))
num_worker = 4 if len(summary) > 4 else 1
success, result = verify_loading_into_metadrive(dataset_path, success, result = verify_loading_into_metadrive(dataset_path,
result_save_dir="test_dataset", result_save_dir="test_dataset",
steps_to_run=300) steps_to_run=1000,
num_workers=num_worker)
assert success assert success

View File

@@ -11,7 +11,7 @@ from metadrive.scenario.utils import get_number_of_scenarios
from functools import partial from functools import partial
def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=300, num_workers=8): def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=1000, num_workers=8):
if result_save_dir is not None: if result_save_dir is not None:
assert os.path.exists(result_save_dir) and os.path.isdir( assert os.path.exists(result_save_dir) and os.path.isdir(
result_save_dir), "Argument result_save_dir must be an existing dir" result_save_dir), "Argument result_save_dir must be an existing dir"
@@ -45,7 +45,7 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=30
if result: if result:
print("All scenarios can be loaded successfully!") print("All scenarios can be loaded successfully!")
else: else:
print("Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(logs)) print("Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(len(logs)))
return result, logs return result, logs