build new dataset from error logs

This commit is contained in:
QuanyiLi
2023-05-08 11:40:47 +01:00
parent 297f0d59f0
commit 817e7c173c
10 changed files with 166 additions and 67 deletions

View File

@@ -7,6 +7,7 @@ import shutil
from typing import Callable, List from typing import Callable, List
import metadrive.scenario.utils as sd_utils import metadrive.scenario.utils as sd_utils
import numpy as np
from metadrive.scenario.scenario_description import ScenarioDescription from metadrive.scenario.scenario_description import ScenarioDescription
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@@ -33,7 +34,8 @@ def try_generating_summary(file_folder):
def combine_multiple_dataset( def combine_multiple_dataset(
output_path, *dataset_paths, force_overwrite=False, try_generate_missing_file=True, filters: List[Callable] = None output_path, *dataset_paths, force_overwrite=False, try_generate_missing_file=True,
filters: List[Callable] = None
): ):
""" """
Combine multiple datasets. Each dataset should have a dataset_summary.pkl Combine multiple datasets. Each dataset should have a dataset_summary.pkl
@@ -99,10 +101,27 @@ def combine_multiple_dataset(
summaries.pop(file) summaries.pop(file)
mappings.pop(file) mappings.pop(file)
with open(osp.join(output_abs_path, ScenarioDescription.DATASET.SUMMARY_FILE), "wb+") as f: summary_file = osp.join(output_abs_path, ScenarioDescription.DATASET.SUMMARY_FILE)
pickle.dump(summaries, f) mapping_file = osp.join(output_abs_path, ScenarioDescription.DATASET.MAPPING_FILE)
save_summary_anda_mapping(summary_file, mapping_file, summaries, mappings)
with open(osp.join(output_abs_path, ScenarioDescription.DATASET.MAPPING_FILE), "wb+") as f:
pickle.dump(mappings, f)
return summaries, mappings return summaries, mappings
def dict_recursive_remove_array_and_set(d):
if isinstance(d, np.ndarray):
return d.tolist()
if isinstance(d, set):
return tuple(d)
if isinstance(d, dict):
for k in d.keys():
d[k] = dict_recursive_remove_array_and_set(d[k])
return d
def save_summary_anda_mapping(summary_file_path, mapping_file_path, summary, mapping):
with open(summary_file_path, "wb") as file:
pickle.dump(dict_recursive_remove_array_and_set(summary), file)
with open(mapping_file_path, "wb") as file:
pickle.dump(mapping, file)
print("Dataset Summary and Mapping are saved at: {}".format(summary_file_path))

View File

@@ -6,7 +6,7 @@ import math
import os import os
import pickle import pickle
import shutil import shutil
from scenarionet.builder.utils import save_summary_anda_mapping
import numpy as np import numpy as np
import tqdm import tqdm
from metadrive.scenario import ScenarioDescription as SD from metadrive.scenario import ScenarioDescription as SD
@@ -46,17 +46,6 @@ def compute_angular_velocity(initial_heading, final_heading, dt):
return angular_vel return angular_vel
def dict_recursive_remove_array_and_set(d):
if isinstance(d, np.ndarray):
return d.tolist()
if isinstance(d, set):
return tuple(d)
if isinstance(d, dict):
for k in d.keys():
d[k] = dict_recursive_remove_array_and_set(d[k])
return d
def mph_to_kmh(speed_in_mph: float): def mph_to_kmh(speed_in_mph: float):
speed_in_kmh = speed_in_mph * 1.609344 speed_in_kmh = speed_in_mph * 1.609344
return speed_in_kmh return speed_in_kmh
@@ -67,7 +56,7 @@ def contains_explicit_return(f):
def write_to_directory( def write_to_directory(
convert_func, scenarios, output_path, dataset_version, dataset_name, force_overwrite=False, **kwargs convert_func, scenarios, output_path, dataset_version, dataset_name, force_overwrite=False, **kwargs
): ):
""" """
Convert a batch of scenarios. Convert a batch of scenarios.
@@ -134,12 +123,8 @@ def write_to_directory(
with open(p, "wb") as f: with open(p, "wb") as f:
pickle.dump(sd_scenario, f) pickle.dump(sd_scenario, f)
# store summary file, which is human-readable # store summary file
with open(summary_file_path, "wb") as file: save_summary_anda_mapping(summary_file_path, mapping_file_path, summary, mapping)
pickle.dump(dict_recursive_remove_array_and_set(summary), file)
with open(mapping_file_path, "wb") as file:
pickle.dump(mapping, file)
print("Dataset Summary and Mapping are saved at: {}".format(summary_file_path))
# rename and save # rename and save
if delay_remove is not None: if delay_remove is not None:

View File

@@ -0,0 +1,95 @@
import json
import logging
import os
from typing import List
from metadrive.scenario.scenario_description import ScenarioDescription as SD
from scenarionet.builder.utils import read_dataset_summary
from scenarionet.builder.utils import save_summary_anda_mapping
logger = logging.getLogger(__name__)
class ErrorDescription:
INDEX = "scenario_index"
PATH = "file_path"
FILE_NAME = "file_name"
ERROR = "error_message"
METADATA = "metadata"
@classmethod
def make(cls, scenario_index, file_path, file_name, error):
logger.warning(
"\n Scenario Error, "
"scenario_index: {}, file_path: {}.\n Error message: {}".format(scenario_index, file_path, str(error))
)
return {cls.INDEX: scenario_index,
cls.PATH: file_path,
cls.FILE_NAME: file_name,
cls.ERROR: str(error)}
class ErrorFile:
PREFIX = "error_scenarios_for"
DATASET = "dataset_path"
ERRORS = "errors"
@classmethod
def dump(cls, save_dir, errors: List, dataset_path):
"""
Save test result
:param save_dir: which dir to save this file
:param errors: error list, containing a list of dict from ErrorDescription.make()
:param dataset_path: dataset_path, the dir of dataset_summary.pkl
"""
file_name = "{}_{}.json".format(cls.PREFIX, os.path.basename(dataset_path))
with open(os.path.join(save_dir, file_name), "w+") as f:
json.dump({cls.DATASET: dataset_path, cls.ERRORS: errors}, f, indent=4)
@classmethod
def generate_dataset(cls, error_file_path, new_dataset_path, force_overwrite=False, broken_scenario=False):
"""
Generate a new dataset containing all broken scenarios or all good scenarios
:param error_file_path: error file path
:param new_dataset_path: a directory where you want to store your data
:param force_overwrite: if new_dataset_path exists, whether to overwrite
:param broken_scenario: generate broken scenarios. You can generate such a broken scenarios for debugging
:return: dataset summary, dataset mapping
"""
# TODO Add test!
new_dataset_path = os.path.abspath(new_dataset_path)
if os.path.exists(new_dataset_path) and not force_overwrite:
raise ValueError("Directory: {} already exists! "
"Set force_overwrite=True to overwrite".format(new_dataset_path))
os.makedirs(new_dataset_path, exist_ok=True)
with open(error_file_path, "r+") as f:
error_file = json.load(f)
origin_dataset_path = error_file[cls.DATASET]
origin_summary, origin_list, origin_mapping = read_dataset_summary(origin_dataset_path)
errors = error_file[cls.ERRORS]
# make new summary
new_summary = {}
new_mapping = {}
new_summary_file_path = os.path.join(new_dataset_path, SD.DATASET.SUMMARY_FILE)
new_mapping_file_path = os.path.join(new_dataset_path, SD.DATASET.MAPPING_FILE)
if broken_scenario:
for error in errors:
file_name = error[ErrorDescription.FILE_NAME]
new_summary[file_name] = origin_summary[file_name]
scenario_dir = os.path.join(origin_dataset_path, origin_mapping[file_name])
new_mapping[file_name] = os.path.relpath(scenario_dir, new_dataset_path)
else:
error_scenario = [error[ErrorDescription.FILE_NAME] for error in errors]
for scenario in origin_summary:
if scenario in error_scenario:
continue
new_summary[scenario] = origin_summary[scenario]
scenario_dir = os.path.join(origin_dataset_path, origin_mapping[scenario])
new_mapping[scenario] = os.path.relpath(scenario_dir, new_dataset_path)
save_summary_anda_mapping(new_summary_file_path, new_mapping_file_path, new_summary, new_mapping)
return new_summary, new_mapping

View File

@@ -1,8 +1,12 @@
import json
import logging import logging
import multiprocessing import multiprocessing
import os import os
from metadrive.scenario.scenario_description import ScenarioDescription as SD
from scenarionet.verifier.error import ErrorDescription as ED
from scenarionet.verifier.error import ErrorFile as EF
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
import tqdm import tqdm
from metadrive.envs.scenario_env import ScenarioEnv from metadrive.envs.scenario_env import ScenarioEnv
@@ -12,15 +16,14 @@ from functools import partial
def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=1000, num_workers=8): def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=1000, num_workers=8):
if result_save_dir is not None: assert os.path.exists(result_save_dir) and os.path.isdir(result_save_dir), "result_save_dir must be an existing dir"
assert os.path.exists(result_save_dir
) and os.path.isdir(result_save_dir), "Argument result_save_dir must be an existing dir"
num_scenario = get_number_of_scenarios(dataset_path) num_scenario = get_number_of_scenarios(dataset_path)
if num_scenario < num_workers: if num_scenario < num_workers:
# single process # single process
logger.info("Use one worker, as num_scenario < num_workers:") logger.info("Use one worker, as num_scenario < num_workers:")
num_workers = 1 num_workers = 1
# prepare arguments
argument_list = [] argument_list = []
func = partial(loading_wrapper, dataset_path=dataset_path, steps_to_run=steps_to_run) func = partial(loading_wrapper, dataset_path=dataset_path, steps_to_run=steps_to_run)
@@ -32,47 +35,46 @@ def verify_loading_into_metadrive(dataset_path, result_save_dir, steps_to_run=10
scenario_num = num_scenario_each_worker scenario_num = num_scenario_each_worker
argument_list.append([i * num_scenario_each_worker, scenario_num]) argument_list.append([i * num_scenario_each_worker, scenario_num])
# Run, workers and process result from worker
with multiprocessing.Pool(num_workers) as p: with multiprocessing.Pool(num_workers) as p:
all_result = list(p.imap(func, argument_list)) all_result = list(p.imap(func, argument_list))
result = all([i[0] for i in all_result]) result = all([i[0] for i in all_result])
logs = [] errors = []
for _, log in all_result: for _, error in all_result:
logs += log errors += error
if result_save_dir is not None: # save result
file_name = "error_scenarios_for_{}.json".format(os.path.basename(dataset_path)) EF.dump(result_save_dir, errors, dataset_path)
with open(os.path.join(result_save_dir, file_name), "w+") as f:
json.dump(logs, f, indent=4)
# logging
if result: if result:
logger.info("All scenarios can be loaded successfully!") logger.info("All scenarios can be loaded successfully!")
else: else:
logger.info( logger.info(
"Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(len(logs))) "Fail to load all scenarios, see log for more details! Number of failed scenarios: {}".format(len(errors)))
return result, logs return result, errors
def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, steps_to_run): def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, steps_to_run, metadrive_config=None):
logger.info( logger.info(
"================ Begin Scenario Loading Verification for scenario {}-{} ================ \n".format( "================ Begin Scenario Loading Verification for scenario {}-{} ================ \n".format(
start_scenario_index, num_scenario + start_scenario_index)) start_scenario_index, num_scenario + start_scenario_index))
success = True success = True
env = ScenarioEnv( metadrive_config = metadrive_config or {}
{ metadrive_config.update({
"agent_policy": ReplayEgoCarPolicy, "agent_policy": ReplayEgoCarPolicy,
"num_scenarios": num_scenario, "num_scenarios": num_scenario,
"horizon": 1000, "horizon": 1000,
"start_scenario_index": start_scenario_index, "start_scenario_index": start_scenario_index,
"no_static_vehicles": False, "no_static_vehicles": False,
"data_directory": dataset_path, "data_directory": dataset_path,
} })
) env = ScenarioEnv(metadrive_config)
logging.disable(logging.INFO) logging.disable(logging.INFO)
error_files = [] error_msgs = []
try: desc = "Scenarios: {}-{}".format(start_scenario_index, start_scenario_index + num_scenario)
for scenario_index in tqdm.tqdm(range(start_scenario_index, start_scenario_index + num_scenario), for scenario_index in tqdm.tqdm(range(start_scenario_index, start_scenario_index + num_scenario), desc=desc):
desc="Scenarios: {}-{}".format(start_scenario_index, try:
start_scenario_index + num_scenario)):
env.reset(force_seed=scenario_index) env.reset(force_seed=scenario_index)
arrive = False arrive = False
for _ in range(steps_to_run): for _ in range(steps_to_run):
@@ -80,19 +82,17 @@ def loading_into_metadrive(start_scenario_index, num_scenario, dataset_path, ste
if d and info["arrive_dest"]: if d and info["arrive_dest"]:
arrive = True arrive = True
assert arrive, "Can not arrive destination" assert arrive, "Can not arrive destination"
except Exception as e: except Exception as e:
file_name = env.engine.data_manager.summary_lookup[scenario_index] file_name = env.engine.data_manager.summary_lookup[scenario_index]
file_path = os.path.join(dataset_path, env.engine.data_manager.mapping[file_name], file_name) file_path = os.path.join(dataset_path, env.engine.data_manager.mapping[file_name], file_name)
error_file = {"scenario_index": scenario_index, "file_path": file_path, "error": str(e)} error_msg = ED.make(scenario_index, file_path, file_name, str(e))
error_files.append(error_file) error_msgs.append(error_msg)
logger.warning( success = False
"\n Scenario Error, " # proceed to next scenario
"scenario_index: {}, file_path: {}.\n Error message: {}".format(scenario_index, file_path, str(e)) continue
)
success = False env.close()
finally: return success, error_msgs
env.close()
return success, error_files
def loading_wrapper(arglist, dataset_path, steps_to_run): def loading_wrapper(arglist, dataset_path, steps_to_run):