This commit is contained in:
QuanyiLi
2023-05-06 16:59:17 +01:00
parent 2b9be248c4
commit 8eb2e07d50
7 changed files with 49 additions and 22 deletions

1
.gitignore vendored
View File

@@ -9,3 +9,4 @@
/build/ /build/
/dist/ /dist/
/documentation/build/ /documentation/build/
dataset/*

View File

@@ -1,3 +1,3 @@
from scenarionet.converter.nuscenes.utils import convert_one_nuscenes_scenario from scenarionet.converter.nuscenes.utils import convert_nuscenes_scenario
from scenarionet.converter.nuplan.utils import convert_one_nuplan_scenario from scenarionet.converter.nuplan.utils import convert_nuplan_scenario
from scenarionet.converter.utils import write_to_directory from scenarionet.converter.utils import write_to_directory

View File

@@ -388,7 +388,7 @@ def extract_traffic(scenario: NuPlanScenario, center):
return tracks return tracks
def convert_one_nuplan_scenario(scenario: NuPlanScenario): def convert_nuplan_scenario(scenario: NuPlanScenario):
""" """
Data will be interpolated to 0.1s time interval, while the time interval of original key frames are 0.5s. Data will be interpolated to 0.1s time interval, while the time interval of original key frames are 0.5s.
""" """

View File

@@ -345,7 +345,7 @@ def get_map_features(scene_info, nuscenes: NuScenes, map_center, radius=250, poi
return ret return ret
def convert_one_nuscenes_scenario(scene, nuscenes: NuScenes): def convert_nuscenes_scenario(scene, nuscenes: NuScenes):
""" """
Data will be interpolated to 0.1s time interval, while the time interval of original key frames are 0.5s. Data will be interpolated to 0.1s time interval, while the time interval of original key frames are 0.5s.
""" """
@@ -386,4 +386,4 @@ def convert_one_nuscenes_scenario(scene, nuscenes: NuScenes):
map_center = result[SD.TRACKS]["ego"]["state"]["position"][0] map_center = result[SD.TRACKS]["ego"]["state"]["position"][0]
result[SD.MAP_FEATURES] = get_map_features(scene_info, nuscenes, map_center, 250) result[SD.MAP_FEATURES] = get_map_features(scene_info, nuscenes, map_center, 250)
return result return result, scene_token

View File

@@ -10,7 +10,7 @@ import shutil
import tqdm import tqdm
from metadrive.scenario.scenario_description import ScenarioDescription from metadrive.scenario.scenario_description import ScenarioDescription
from scenarionet.converter.nuplan.utils import get_nuplan_scenarios, convert_one_nuplan_scenario from scenarionet.converter.nuplan.utils import get_nuplan_scenarios, convert_nuplan_scenario
from scenarionet.converter.utils import dict_recursive_remove_array from scenarionet.converter.utils import dict_recursive_remove_array
@@ -41,7 +41,7 @@ def convert_nuplan(dataset_params, output_path, worker_index=None, force_overwri
# Init. # Init.
scenarios = get_nuplan_scenarios(dataset_params) scenarios = get_nuplan_scenarios(dataset_params)
for scenario in tqdm.tqdm(scenarios): for scenario in tqdm.tqdm(scenarios):
sd_scenario = convert_one_nuplan_scenario(scenario) sd_scenario = convert_nuplan_scenario(scenario)
sd_scenario = sd_scenario.to_dict() sd_scenario = sd_scenario.to_dict()
ScenarioDescription.sanity_check(sd_scenario, check_self_type=True) ScenarioDescription.sanity_check(sd_scenario, check_self_type=True)
export_file_name = "sd_{}_{}.pkl".format("nuplan", scenario.scenario_name) export_file_name = "sd_{}_{}.pkl".format("nuplan", scenario.scenario_name)

View File

@@ -2,28 +2,26 @@
This script aims to convert nuscenes scenarios to ScenarioDescription, so that we can load any nuscenes scenarios into This script aims to convert nuscenes scenarios to ScenarioDescription, so that we can load any nuscenes scenarios into
MetaDrive. MetaDrive.
""" """
import os.path
from nuscenes import NuScenes
from scenarionet import SCENARIONET_DATASET_PATH from scenarionet import SCENARIONET_DATASET_PATH
from scenarionet.converter.nuscenes.utils import convert_one_nuscenes_scenario from scenarionet.converter.nuscenes.utils import convert_nuscenes_scenario
from scenarionet.converter.utils import write_to_directory from scenarionet.converter.utils import write_to_directory
try:
from nuscenes import NuScenes
except ImportError:
print("Can not find nuscenes-devkit")
# #
if __name__ == "__main__": if __name__ == "__main__":
output_path = SCENARIONET_DATASET_PATH output_path = os.path.join(SCENARIONET_DATASET_PATH, "nuscenes")
version = 'v1.0-mini' version = 'v1.0-mini'
dataroot = '/home/shady/data/nuscenes' dataroot = '/home/shady/data/nuscenes'
force_overwrite = True force_overwrite = True
nusc = NuScenes(version=version, dataroot=dataroot) nusc = NuScenes(version=version, dataroot=dataroot)
scenarios = nusc.scene scenarios = nusc.scene
write_to_directory(convert_func=convert_one_nuscenes_scenario,
write_to_directory(convert_func=convert_nuscenes_scenario,
scenarios=scenarios, scenarios=scenarios,
output_path=output_path, output_path=output_path,
version=version, dataset_version=version,
dataset_name="nuscenes", dataset_name="nuscenes",
force_overwrite=True, force_overwrite=force_overwrite,
nuscenes=nusc) nuscenes=nusc)

View File

@@ -1,7 +1,7 @@
import copy
import math
import ast import ast
import copy
import inspect import inspect
import math
import os import os
import pickle import pickle
import shutil import shutil
@@ -112,7 +112,16 @@ def contains_explicit_return(f):
return any(isinstance(node, ast.Return) for node in ast.walk(ast.parse(inspect.getsource(f)))) return any(isinstance(node, ast.Return) for node in ast.walk(ast.parse(inspect.getsource(f))))
def write_to_directory(convert_func, scenarios, output_path, version, dataset_name, force_overwrite=False, **kwargs): def write_to_directory(convert_func,
scenarios,
output_path,
dataset_version,
dataset_name,
force_overwrite=False,
**kwargs):
"""
Convert a batch of scenarios.
"""
if not contains_explicit_return(convert_func): if not contains_explicit_return(convert_func):
raise RuntimeError("The convert function should return a metadata dict") raise RuntimeError("The convert function should return a metadata dict")
@@ -135,10 +144,29 @@ def write_to_directory(convert_func, scenarios, output_path, version, dataset_na
metadata_recorder = {} metadata_recorder = {}
for scenario in tqdm.tqdm(scenarios): for scenario in tqdm.tqdm(scenarios):
sd_scenario = convert_func(scenario, **kwargs) # convert scenario
sd_scenario, scenario_id = convert_func(scenario, **kwargs)
export_file_name = "sd_{}_{}.pkl".format(dataset_name + "_" + dataset_version, scenario_id)
# add agents summary
summary_dict = {}
ego_car_id = sd_scenario[SD.METADATA][SD.SDC_ID]
summary_dict[ego_car_id] = get_agent_summary(
state_dict=sd_scenario.get_sdc_track()["state"], id=ego_car_id, type=sd_scenario.get_sdc_track()["type"]
)
for track_id, track in sd_scenario[SD.TRACKS].items():
summary_dict[track_id] = get_agent_summary(state_dict=track["state"], id=track_id, type=track["type"])
sd_scenario[SD.METADATA]["object_summary"] = summary_dict
# count some objects occurrence
sd_scenario[SD.METADATA]["number_summary"] = get_number_summary(sd_scenario)
metadata_recorder[export_file_name] = copy.deepcopy(sd_scenario[SD.METADATA])
# sanity check
sd_scenario = sd_scenario.to_dict() sd_scenario = sd_scenario.to_dict()
ScenarioDescription.sanity_check(sd_scenario, check_self_type=True) ScenarioDescription.sanity_check(sd_scenario, check_self_type=True)
export_file_name = "sd_{}_{}.pkl".format(dataset_name+"_" + version, scenario["token"])
# dump
p = os.path.join(output_path, export_file_name) p = os.path.join(output_path, export_file_name)
with open(p, "wb") as f: with open(p, "wb") as f:
pickle.dump(sd_scenario, f) pickle.dump(sd_scenario, f)