2023-05-07 13:52:43 +01:00
|
|
|
import os
|
|
|
|
|
import os.path
|
|
|
|
|
|
|
|
|
|
from scenarionet import SCENARIONET_PACKAGE_PATH
|
|
|
|
|
from scenarionet.builder.utils import combine_multiple_dataset, read_dataset_summary, read_scenario
|
2023-05-07 14:42:50 +01:00
|
|
|
from scenarionet.verifier.utils import verify_loading_into_metadrive
|
2023-05-07 12:26:02 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
def test_combine_multiple_dataset():
|
2023-05-07 13:52:43 +01:00
|
|
|
dataset_name = "nuscenes"
|
2023-05-07 19:13:51 +01:00
|
|
|
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
2023-05-07 13:52:43 +01:00
|
|
|
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
|
|
|
|
|
2023-05-07 14:42:50 +01:00
|
|
|
output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine")
|
2023-05-07 23:18:45 +01:00
|
|
|
combine_multiple_dataset(output_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
2023-05-07 13:52:43 +01:00
|
|
|
dataset_paths.append(output_path)
|
|
|
|
|
for dataset_path in dataset_paths:
|
|
|
|
|
summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path)
|
|
|
|
|
for scenario_file in sorted_scenarios:
|
|
|
|
|
read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file))
|
2023-05-07 23:18:45 +01:00
|
|
|
success, result = verify_loading_into_metadrive(
|
2023-05-08 10:02:13 +01:00
|
|
|
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4
|
2023-05-07 23:18:45 +01:00
|
|
|
)
|
2023-05-07 15:59:38 +01:00
|
|
|
assert success
|
2023-05-07 13:52:43 +01:00
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == '__main__':
|
|
|
|
|
test_combine_multiple_dataset()
|