move all test genrtaed file to tmp, add genertae error set test
This commit is contained in:
4
.gitignore
vendored
4
.gitignore
vendored
@@ -12,3 +12,7 @@
|
|||||||
dataset/*
|
dataset/*
|
||||||
**/combine/
|
**/combine/
|
||||||
**.json
|
**.json
|
||||||
|
|
||||||
|
**/tmp/**
|
||||||
|
**/failed_scenarios/
|
||||||
|
**/passed_senarios/
|
||||||
|
|||||||
@@ -1,3 +1,4 @@
|
|||||||
|
import os.path
|
||||||
import pickle
|
import pickle
|
||||||
|
|
||||||
import numpy as np
|
import numpy as np
|
||||||
@@ -77,5 +78,12 @@ def read_dataset_summary(dataset_path):
|
|||||||
return sd_utils.read_dataset_summary(dataset_path)
|
return sd_utils.read_dataset_summary(dataset_path)
|
||||||
|
|
||||||
|
|
||||||
def read_scenario(pkl_file_path):
|
def read_scenario(dataset_path, mapping, scenario_file_name):
|
||||||
return sd_utils.read_scenario_data(pkl_file_path)
|
"""
|
||||||
|
read a scenario
|
||||||
|
:param dataset_path: the location where dataset_summary.pkl is
|
||||||
|
:param mapping: a dict recording the relative position from dataset_path to real scenario file
|
||||||
|
:param scenario_file_name: scenario filename
|
||||||
|
:return: ScenarioDescription
|
||||||
|
"""
|
||||||
|
return sd_utils.read_scenario_data(os.path.join(dataset_path, mapping[scenario_file_name], scenario_file_name))
|
||||||
|
|||||||
@@ -12,13 +12,13 @@ def test_combine_multiple_dataset():
|
|||||||
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
||||||
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
||||||
|
|
||||||
output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine")
|
output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "combine")
|
||||||
combine_multiple_dataset(output_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
combine_multiple_dataset(output_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
||||||
dataset_paths.append(output_path)
|
dataset_paths.append(output_path)
|
||||||
for dataset_path in dataset_paths:
|
for dataset_path in dataset_paths:
|
||||||
summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path)
|
summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path)
|
||||||
for scenario_file in sorted_scenarios:
|
for scenario_file in sorted_scenarios:
|
||||||
read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file))
|
read_scenario(dataset_path, mapping, scenario_file)
|
||||||
success, result = verify_loading_into_metadrive(
|
success, result = verify_loading_into_metadrive(
|
||||||
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4
|
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4
|
||||||
)
|
)
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ def test_filter_dataset():
|
|||||||
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
||||||
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
||||||
|
|
||||||
output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine")
|
output_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "combine")
|
||||||
|
|
||||||
# ========================= test 1 =========================
|
# ========================= test 1 =========================
|
||||||
# nuscenes data has no light
|
# nuscenes data has no light
|
||||||
|
|||||||
@@ -1,9 +1,11 @@
|
|||||||
|
import copy
|
||||||
import os
|
import os
|
||||||
import os.path
|
import os.path
|
||||||
from metadrive.scenario.utils import assert_scenario_equal
|
|
||||||
from scenarionet import SCENARIONET_PACKAGE_PATH
|
from scenarionet import SCENARIONET_PACKAGE_PATH
|
||||||
from scenarionet.builder.utils import combine_multiple_dataset
|
from scenarionet.builder.utils import combine_multiple_dataset
|
||||||
from scenarionet.common_utils import read_dataset_summary, read_scenario
|
from scenarionet.common_utils import read_dataset_summary, read_scenario
|
||||||
|
from scenarionet.common_utils import recursive_equal
|
||||||
from scenarionet.verifier.error import ErrorFile
|
from scenarionet.verifier.error import ErrorFile
|
||||||
from scenarionet.verifier.utils import verify_loading_into_metadrive, set_random_drop
|
from scenarionet.verifier.utils import verify_loading_into_metadrive, set_random_drop
|
||||||
|
|
||||||
@@ -13,27 +15,44 @@ def test_combine_multiple_dataset():
|
|||||||
dataset_name = "nuscenes"
|
dataset_name = "nuscenes"
|
||||||
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
original_dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "test_dataset", dataset_name)
|
||||||
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
dataset_paths = [original_dataset_path + "_{}".format(i) for i in range(5)]
|
||||||
dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "combine")
|
dataset_path = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "combine")
|
||||||
combine_multiple_dataset(dataset_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
combine_multiple_dataset(dataset_path, *dataset_paths, force_overwrite=True, try_generate_missing_file=True)
|
||||||
|
|
||||||
summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path)
|
summary, sorted_scenarios, mapping = read_dataset_summary(dataset_path)
|
||||||
for scenario_file in sorted_scenarios:
|
for scenario_file in sorted_scenarios:
|
||||||
read_scenario(os.path.join(dataset_path, mapping[scenario_file], scenario_file))
|
read_scenario(dataset_path, mapping, scenario_file)
|
||||||
success, logs = verify_loading_into_metadrive(
|
success, logs = verify_loading_into_metadrive(
|
||||||
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=4)
|
dataset_path, result_save_dir="test_dataset", steps_to_run=1000, num_workers=3)
|
||||||
set_random_drop(False)
|
set_random_drop(False)
|
||||||
# regenerate
|
# get error file
|
||||||
file_name = ErrorFile.get_error_file_name(dataset_path)
|
file_name = ErrorFile.get_error_file_name(dataset_path)
|
||||||
error_file_path = os.path.join("test_dataset", file_name)
|
error_file_path = os.path.join("test_dataset", file_name)
|
||||||
|
# regenerate
|
||||||
|
pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "passed_senarios")
|
||||||
|
fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "tmp", "failed_scenarios")
|
||||||
|
pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, force_overwrite=True,
|
||||||
|
broken_scenario=False)
|
||||||
|
fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, force_overwrite=True,
|
||||||
|
broken_scenario=True)
|
||||||
|
|
||||||
pass_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "passed_senarios")
|
# assert
|
||||||
fail_dataset = os.path.join(SCENARIONET_PACKAGE_PATH, "tests", "failed_scenarios")
|
|
||||||
pass_summary, pass_mapping = ErrorFile.generate_dataset(error_file_path, pass_dataset, broken_scenario=False)
|
|
||||||
fail_summary, fail_mapping = ErrorFile.generate_dataset(error_file_path, fail_dataset, broken_scenario=True)
|
|
||||||
|
|
||||||
read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset)
|
read_pass_summary, _, read_pass_mapping = read_dataset_summary(pass_dataset)
|
||||||
|
assert recursive_equal(read_pass_summary, pass_summary)
|
||||||
|
assert recursive_equal(read_pass_mapping, pass_mapping)
|
||||||
read_fail_summary, _, read_fail_mapping, = read_dataset_summary(fail_dataset)
|
read_fail_summary, _, read_fail_mapping, = read_dataset_summary(fail_dataset)
|
||||||
|
assert recursive_equal(read_fail_mapping, fail_mapping)
|
||||||
|
assert recursive_equal(read_fail_summary, fail_summary)
|
||||||
|
|
||||||
|
# assert pass+fail = origin
|
||||||
|
all_summaries = copy.deep(read_pass_summary)
|
||||||
|
all_summaries.update(fail_summary)
|
||||||
|
assert recursive_equal(all_summaries, summary)
|
||||||
|
|
||||||
|
# test read
|
||||||
|
for scenario in read_pass_summary:
|
||||||
|
read_scenario(pass_dataset, read_pass_mapping, scenario)
|
||||||
|
for scenario in read_pass_summary:
|
||||||
|
read_scenario(fail_dataset, read_fail_mapping, scenario)
|
||||||
|
|
||||||
|
|
||||||
if __name__ == '__main__':
|
if __name__ == '__main__':
|
||||||
|
|||||||
@@ -85,7 +85,7 @@ def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, ste
|
|||||||
try:
|
try:
|
||||||
env.reset(force_seed=scenario_index)
|
env.reset(force_seed=scenario_index)
|
||||||
arrive = False
|
arrive = False
|
||||||
if RANDOM_DROP and np.random.rand() < 0.8:
|
if RANDOM_DROP and np.random.rand() < 0.5:
|
||||||
raise ValueError("Random Drop")
|
raise ValueError("Random Drop")
|
||||||
for _ in range(steps_to_run):
|
for _ in range(steps_to_run):
|
||||||
o, r, d, info = env.step([0, 0])
|
o, r, d, info = env.step([0, 0])
|
||||||
|
|||||||
Reference in New Issue
Block a user