Make video (#5)
* generate accident scene * construction PG * no object * accident prob * capture script * update nuscenes toolds * make video * format * fix test * update readme * update readme * format * format
This commit is contained in:
19
README.md
19
README.md
@@ -1,6 +1,23 @@
|
||||
# ScenarioNet
|
||||
|
||||
ScenarioNet: Scalable Traffic Scenario Management System for Autonomous Driving
|
||||
**Open-Source Platform for Large-Scale Traffic Scenario Simulation and Modeling**
|
||||
|
||||
[**Webpage**](https://github.com/metadriverse/scenarionet) |
|
||||
[**Code**](https://github.com/metadriverse/scenarionet) |
|
||||
[**Video**](https://github.com/metadriverse/scenarionet) |
|
||||
[**Paper**](https://github.com/metadriverse/scenarionet) |
|
||||
|
||||
ScenarioNet allows users to load scenarios from real-world dataset like Waymo, nuPlan, nuScenes, l5 and synthetic
|
||||
dataset such as procedural generated ones and safety-critical ones generated by adversarial attack.
|
||||
The built database provides tools for building training and test sets for ML applications.
|
||||
|
||||

|
||||
|
||||
Powered by [MetaDrive Simulator](https://github.com/metadriverse/metadrive), the scenarios can be reconstructed for
|
||||
various applications like AD stack test, reinforcement learning, imitation learning, scenario generation and so on.
|
||||
|
||||

|
||||
|
||||
|
||||
## Installation
|
||||
|
||||
|
||||
BIN
docs/asset/scenarios.png
Normal file
BIN
docs/asset/scenarios.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 358 KiB |
BIN
docs/asset/sensor.png
Normal file
BIN
docs/asset/sensor.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 391 KiB |
@@ -29,13 +29,13 @@ def try_generating_summary(file_folder):
|
||||
|
||||
|
||||
def merge_database(
|
||||
output_path,
|
||||
*dataset_paths,
|
||||
exist_ok=False,
|
||||
overwrite=False,
|
||||
try_generate_missing_file=True,
|
||||
filters: List[Callable] = None,
|
||||
save=True,
|
||||
output_path,
|
||||
*dataset_paths,
|
||||
exist_ok=False,
|
||||
overwrite=False,
|
||||
try_generate_missing_file=True,
|
||||
filters: List[Callable] = None,
|
||||
save=True,
|
||||
):
|
||||
"""
|
||||
Combine multiple datasets. Each database should have a dataset_summary.pkl
|
||||
@@ -116,12 +116,7 @@ def merge_database(
|
||||
|
||||
|
||||
def copy_database(
|
||||
from_path,
|
||||
to_path,
|
||||
exist_ok=False,
|
||||
overwrite=False,
|
||||
copy_raw_data=False,
|
||||
remove_source=False
|
||||
from_path, to_path, exist_ok=False, overwrite=False, copy_raw_data=False, remove_source=False, force_move=False
|
||||
):
|
||||
if not os.path.exists(from_path):
|
||||
raise FileNotFoundError("Can not find database: {}".format(from_path))
|
||||
@@ -129,19 +124,16 @@ def copy_database(
|
||||
assert exist_ok, "to_directory already exists. Set exists_ok to allow turning it into a database"
|
||||
assert not os.path.samefile(from_path, to_path), "to_directory is the same as from_directory. Abort!"
|
||||
files = os.listdir(from_path)
|
||||
if ScenarioDescription.DATASET.MAPPING_FILE in files and ScenarioDescription.DATASET.SUMMARY_FILE in files and len(
|
||||
files) > 2:
|
||||
raise RuntimeError("The source database is not allowed to move! "
|
||||
"This will break the relationship between this database and other database built on it."
|
||||
"If it is ok for you, use 'mv' to move it manually ")
|
||||
if not force_move and (ScenarioDescription.DATASET.MAPPING_FILE in files
|
||||
and ScenarioDescription.DATASET.SUMMARY_FILE in files and len(files) > 2):
|
||||
raise RuntimeError(
|
||||
"The source database is not allowed to move! "
|
||||
"This will break the relationship between this database and other database built on it."
|
||||
"If it is ok for you, use 'mv' to move it manually "
|
||||
)
|
||||
|
||||
summaries, mappings = merge_database(
|
||||
to_path,
|
||||
from_path,
|
||||
exist_ok=exist_ok,
|
||||
overwrite=overwrite,
|
||||
try_generate_missing_file=True,
|
||||
save=False
|
||||
to_path, from_path, exist_ok=exist_ok, overwrite=overwrite, try_generate_missing_file=True, save=False
|
||||
)
|
||||
summary_file = osp.join(to_path, ScenarioDescription.DATASET.SUMMARY_FILE)
|
||||
mapping_file = osp.join(to_path, ScenarioDescription.DATASET.MAPPING_FILE)
|
||||
@@ -160,13 +152,13 @@ def copy_database(
|
||||
|
||||
|
||||
def split_database(
|
||||
from_path,
|
||||
to_path,
|
||||
start_index,
|
||||
num_scenarios,
|
||||
exist_ok=False,
|
||||
overwrite=False,
|
||||
random=False,
|
||||
from_path,
|
||||
to_path,
|
||||
start_index,
|
||||
num_scenarios,
|
||||
exist_ok=False,
|
||||
overwrite=False,
|
||||
random=False,
|
||||
):
|
||||
if not os.path.exists(from_path):
|
||||
raise FileNotFoundError("Can not find database: {}".format(from_path))
|
||||
@@ -191,13 +183,14 @@ def split_database(
|
||||
assert osp.exists(abs_dir_path), "Wrong database path. Can not find database at: {}".format(abs_dir_path)
|
||||
summaries, lookup, mappings = read_dataset_summary(from_path)
|
||||
assert start_index >= 0 and start_index + num_scenarios <= len(
|
||||
lookup), "No enough scenarios in source dataset: total {}, start_index: {}, need: {}".format(len(lookup),
|
||||
start_index,
|
||||
num_scenarios)
|
||||
lookup
|
||||
), "No enough scenarios in source dataset: total {}, start_index: {}, need: {}".format(
|
||||
len(lookup), start_index, num_scenarios
|
||||
)
|
||||
if random:
|
||||
selected = sample(lookup[start_index:], k=num_scenarios)
|
||||
else:
|
||||
selected = lookup[start_index: start_index + num_scenarios]
|
||||
selected = lookup[start_index:start_index + num_scenarios]
|
||||
selected_summary = {}
|
||||
selected_mapping = {}
|
||||
for scenario in selected:
|
||||
|
||||
@@ -9,6 +9,7 @@ from scenarionet.converter.pg.utils import get_pg_scenarios, convert_pg_scenario
|
||||
from scenarionet.converter.utils import write_to_directory
|
||||
|
||||
if __name__ == '__main__':
|
||||
# For the PG environment config, see: scenarionet/converter/pg/utils.py:6
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--database_path",
|
||||
|
||||
@@ -34,14 +34,14 @@ if __name__ == '__main__':
|
||||
default=0,
|
||||
type=int,
|
||||
help="Control how many files to use. We will list all files in the raw data folder "
|
||||
"and select files[start_file_index: start_file_index+num_files]"
|
||||
"and select files[start_file_index: start_file_index+num_files]"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--num_files",
|
||||
default=1000,
|
||||
type=int,
|
||||
help="Control how many files to use. We will list all files in the raw data folder "
|
||||
"and select files[start_file_index: start_file_index+num_files]"
|
||||
"and select files[start_file_index: start_file_index+num_files]"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -61,8 +61,9 @@ if __name__ == '__main__':
|
||||
shutil.rmtree(output_path)
|
||||
|
||||
waymo_data_directory = os.path.join(SCENARIONET_DATASET_PATH, args.raw_data_path)
|
||||
scenarios = get_waymo_scenarios(waymo_data_directory, args.start_file_index, args.num_files,
|
||||
num_workers=8) # do not use too much worker to read data
|
||||
scenarios = get_waymo_scenarios(
|
||||
waymo_data_directory, args.start_file_index, args.num_files, num_workers=8
|
||||
) # do not use too much worker to read data
|
||||
|
||||
write_to_directory(
|
||||
convert_func=convert_waymo_scenario,
|
||||
|
||||
@@ -357,9 +357,9 @@ def extract_traffic(scenario: NuPlanScenario, center):
|
||||
type=MetaDriveType.UNSET,
|
||||
state=dict(
|
||||
position=np.zeros(shape=(episode_len, 3)),
|
||||
heading=np.zeros(shape=(episode_len,)),
|
||||
heading=np.zeros(shape=(episode_len, )),
|
||||
velocity=np.zeros(shape=(episode_len, 2)),
|
||||
valid=np.zeros(shape=(episode_len,)),
|
||||
valid=np.zeros(shape=(episode_len, )),
|
||||
length=np.zeros(shape=(episode_len, 1)),
|
||||
width=np.zeros(shape=(episode_len, 1)),
|
||||
height=np.zeros(shape=(episode_len, 1))
|
||||
|
||||
@@ -244,7 +244,16 @@ def get_tracks_from_frames(nuscenes: NuScenes, scene_info, frames, num_to_interp
|
||||
# if id == "ego":
|
||||
# ego is valid all time, so we can calculate the velocity in this way
|
||||
|
||||
return interpolate_tracks
|
||||
# Normalize place all object to (0,0)
|
||||
map_center = np.array(interpolate_tracks["ego"]["state"]["position"][0])
|
||||
map_center[-1] = 0
|
||||
normalized_ret = {}
|
||||
for id, track, in interpolate_tracks.items():
|
||||
pos = track["state"]["position"] - map_center
|
||||
track["state"]["position"] = np.asarray(pos)
|
||||
normalized_ret[id] = track
|
||||
|
||||
return normalized_ret, map_center
|
||||
|
||||
|
||||
def get_map_features(scene_info, nuscenes: NuScenes, map_center, radius=500, points_distance=1):
|
||||
@@ -299,32 +308,36 @@ def get_map_features(scene_info, nuscenes: NuScenes, map_center, radius=500, poi
|
||||
for idx, boundary in enumerate(boundaries[0]):
|
||||
block_points = np.array(list(i for i in zip(boundary.coords.xy[0], boundary.coords.xy[1])))
|
||||
id = "boundary_{}".format(idx)
|
||||
ret[id] = {SD.TYPE: MetaDriveType.LINE_SOLID_SINGLE_WHITE, SD.POLYLINE: block_points}
|
||||
ret[id] = {
|
||||
SD.TYPE: MetaDriveType.LINE_SOLID_SINGLE_WHITE,
|
||||
SD.POLYLINE: block_points - np.asarray(map_center)[:2]
|
||||
}
|
||||
|
||||
for id in map_objs["lane_divider"]:
|
||||
line_info = map_api.get("lane_divider", id)
|
||||
assert line_info["token"] == id
|
||||
line = map_api.extract_line(line_info["line_token"]).coords.xy
|
||||
line = [[line[0][i], line[1][i]] for i in range(len(line[0]))]
|
||||
ret[id] = {SD.TYPE: MetaDriveType.LINE_BROKEN_SINGLE_WHITE, SD.POLYLINE: line}
|
||||
line = np.asarray([[line[0][i], line[1][i]] for i in range(len(line[0]))])
|
||||
ret[id] = {SD.TYPE: MetaDriveType.LINE_BROKEN_SINGLE_WHITE, SD.POLYLINE: line - np.asarray(map_center)[:2]}
|
||||
|
||||
for id in map_objs["road_divider"]:
|
||||
line_info = map_api.get("road_divider", id)
|
||||
assert line_info["token"] == id
|
||||
line = map_api.extract_line(line_info["line_token"]).coords.xy
|
||||
line = [[line[0][i], line[1][i]] for i in range(len(line[0]))]
|
||||
ret[id] = {SD.TYPE: MetaDriveType.LINE_SOLID_SINGLE_YELLOW, SD.POLYLINE: line}
|
||||
line = np.asarray([[line[0][i], line[1][i]] for i in range(len(line[0]))])
|
||||
ret[id] = {SD.TYPE: MetaDriveType.LINE_SOLID_SINGLE_YELLOW, SD.POLYLINE: line - np.asarray(map_center)[:2]}
|
||||
|
||||
for id in map_objs["lane"]:
|
||||
lane_info = map_api.get("lane", id)
|
||||
assert lane_info["token"] == id
|
||||
boundary = map_api.extract_polygon(lane_info["polygon_token"]).boundary.xy
|
||||
boundary_polygon = [[boundary[0][i], boundary[1][i]] for i in range(len(boundary[0]))]
|
||||
boundary_polygon = np.asarray([[boundary[0][i], boundary[1][i]] for i in range(len(boundary[0]))])
|
||||
# boundary_polygon += [[boundary[0][i], boundary[1][i]] for i in range(len(boundary[0]))]
|
||||
ret[id] = {
|
||||
SD.TYPE: MetaDriveType.LANE_SURFACE_STREET,
|
||||
SD.POLYLINE: discretize_lane(map_api.arcline_path_3[id], resolution_meters=points_distance),
|
||||
SD.POLYGON: boundary_polygon,
|
||||
SD.POLYLINE: np.asarray(discretize_lane(map_api.arcline_path_3[id], resolution_meters=points_distance)) -
|
||||
np.asarray(map_center),
|
||||
SD.POLYGON: boundary_polygon - np.asarray(map_center)[:2],
|
||||
}
|
||||
|
||||
for id in map_objs["lane_connector"]:
|
||||
@@ -335,7 +348,8 @@ def get_map_features(scene_info, nuscenes: NuScenes, map_center, radius=500, poi
|
||||
# boundary_polygon += [[boundary[0][i], boundary[1][i], 0.] for i in range(len(boundary[0]))]
|
||||
ret[id] = {
|
||||
SD.TYPE: MetaDriveType.LANE_SURFACE_STREET,
|
||||
SD.POLYLINE: discretize_lane(map_api.arcline_path_3[id], resolution_meters=points_distance),
|
||||
SD.POLYLINE: np.asarray(discretize_lane(map_api.arcline_path_3[id], resolution_meters=points_distance)) -
|
||||
np.asarray(map_center),
|
||||
# SD.POLYGON: boundary_polygon,
|
||||
"speed_limit_kmh": 100
|
||||
}
|
||||
@@ -375,14 +389,13 @@ def convert_nuscenes_scenario(scene, version, nuscenes: NuScenes):
|
||||
result[SD.METADATA]["sample_rate"] = scenario_log_interval
|
||||
result[SD.METADATA][SD.TIMESTEP] = np.arange(0., (len(frames) - 1) * 0.5 + 0.1, 0.1)
|
||||
# interpolating to 0.1s interval
|
||||
result[SD.TRACKS] = get_tracks_from_frames(nuscenes, scene_info, frames, num_to_interpolate=5)
|
||||
result[SD.TRACKS], map_center = get_tracks_from_frames(nuscenes, scene_info, frames, num_to_interpolate=5)
|
||||
result[SD.METADATA][SD.SDC_ID] = "ego"
|
||||
|
||||
# No traffic light in nuscenes at this stage
|
||||
result[SD.DYNAMIC_MAP_STATES] = {}
|
||||
|
||||
# map
|
||||
map_center = result[SD.TRACKS]["ego"]["state"]["position"][0]
|
||||
result[SD.MAP_FEATURES] = get_map_features(scene_info, nuscenes, map_center, 500)
|
||||
|
||||
return result
|
||||
|
||||
@@ -1,8 +1,28 @@
|
||||
import logging
|
||||
|
||||
from metadrive.envs.metadrive_env import MetaDriveEnv
|
||||
from metadrive.policy.idm_policy import IDMPolicy
|
||||
from metadrive.scenario.scenario_description import ScenarioDescription as SD
|
||||
|
||||
|
||||
def make_env(start_index, num_scenarios, extra_config=None):
|
||||
config = dict(
|
||||
start_seed=start_index,
|
||||
num_scenarios=num_scenarios,
|
||||
traffic_density=0.15,
|
||||
agent_policy=IDMPolicy,
|
||||
accident_prob=0.5,
|
||||
crash_vehicle_done=False,
|
||||
crash_object_done=False,
|
||||
store_map=False,
|
||||
map=2
|
||||
)
|
||||
extra_config = extra_config or {}
|
||||
config.update(extra_config)
|
||||
env = MetaDriveEnv(config)
|
||||
return env
|
||||
|
||||
|
||||
def convert_pg_scenario(scenario_index, version, env):
|
||||
"""
|
||||
Simulate to collect PG Scenarios
|
||||
|
||||
@@ -12,13 +12,11 @@ from functools import partial
|
||||
import numpy as np
|
||||
import psutil
|
||||
import tqdm
|
||||
from metadrive.envs.metadrive_env import MetaDriveEnv
|
||||
from metadrive.policy.idm_policy import IDMPolicy
|
||||
from metadrive.scenario import ScenarioDescription as SD
|
||||
|
||||
from scenarionet.builder.utils import merge_database
|
||||
from scenarionet.common_utils import save_summary_anda_mapping
|
||||
from scenarionet.converter.pg.utils import convert_pg_scenario
|
||||
from scenarionet.converter.pg.utils import convert_pg_scenario, make_env
|
||||
|
||||
logger = logging.getLogger(__file__)
|
||||
|
||||
@@ -189,18 +187,7 @@ def write_to_directory_single_worker(
|
||||
|
||||
# for pg scenario only
|
||||
if convert_func is convert_pg_scenario:
|
||||
env = MetaDriveEnv(
|
||||
dict(
|
||||
start_seed=scenarios[0],
|
||||
num_scenarios=len(scenarios),
|
||||
traffic_density=0.15,
|
||||
agent_policy=IDMPolicy,
|
||||
crash_vehicle_done=False,
|
||||
store_map=False,
|
||||
map=2
|
||||
)
|
||||
)
|
||||
kwargs["env"] = env
|
||||
kwargs["env"] = make_env(start_index=scenarios[0], num_scenarios=len(scenarios))
|
||||
|
||||
count = 0
|
||||
for scenario in tqdm.tqdm(scenarios, desc="Worker Index: {}".format(worker_index)):
|
||||
|
||||
@@ -433,7 +433,7 @@ def get_waymo_scenarios(waymo_data_directory, start_index, num, num_workers=8):
|
||||
file_list = os.listdir(waymo_data_directory)
|
||||
assert len(file_list) >= start_index + num and start_index >= 0, \
|
||||
"No sufficient files ({}) in raw_data_directory. need: {}, start: {}".format(len(file_list), num, start_index)
|
||||
file_list = file_list[start_index: start_index + num]
|
||||
file_list = file_list[start_index:start_index + num]
|
||||
num_files = len(file_list)
|
||||
if num_files < num_workers:
|
||||
# single process
|
||||
|
||||
@@ -9,15 +9,11 @@ if __name__ == '__main__':
|
||||
"--to",
|
||||
required=True,
|
||||
help="The name of the new database. "
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--remove_source",
|
||||
action="store_true",
|
||||
help="Remove the `from_database` if set this flag"
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
)
|
||||
parser.add_argument("--remove_source", action="store_true", help="Remove the `from_database` if set this flag")
|
||||
parser.add_argument(
|
||||
"--copy_raw_data",
|
||||
action="store_true",
|
||||
@@ -27,13 +23,13 @@ if __name__ == '__main__':
|
||||
"--exist_ok",
|
||||
action="store_true",
|
||||
help="Still allow to write, if the to_folder exists already. "
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="When exists ok is set but summary.pkl and map.pkl exists in existing dir, "
|
||||
"whether to overwrite both files"
|
||||
"whether to overwrite both files"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
from_path = args.__getattribute__("from")
|
||||
|
||||
@@ -10,27 +10,24 @@ if __name__ == '__main__':
|
||||
"-d",
|
||||
required=True,
|
||||
help="The name of the new database. "
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
'--from',
|
||||
required=True,
|
||||
type=str,
|
||||
help="Which dataset to filter. It takes one directory path as input"
|
||||
'--from', required=True, type=str, help="Which dataset to filter. It takes one directory path as input"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exist_ok",
|
||||
action="store_true",
|
||||
help="Still allow to write, if the dir exists already. "
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="When exists ok is set but summary.pkl and map.pkl exists in existing dir, "
|
||||
"whether to overwrite both files"
|
||||
"whether to overwrite both files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--moving_dist",
|
||||
@@ -38,47 +35,26 @@ if __name__ == '__main__':
|
||||
help="add this flag to select cases with SDC moving dist > sdc_moving_dist_min"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--sdc_moving_dist_min",
|
||||
default=10,
|
||||
type=float,
|
||||
help="Selecting case with sdc_moving_dist > this value. "
|
||||
"--sdc_moving_dist_min", default=10, type=float, help="Selecting case with sdc_moving_dist > this value. "
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--num_object",
|
||||
action="store_true",
|
||||
help="add this flag to select cases with object_num < max_num_object"
|
||||
"--num_object", action="store_true", help="add this flag to select cases with object_num < max_num_object"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--max_num_object",
|
||||
default=30,
|
||||
type=float,
|
||||
help="case will be selected if num_obj < this argument"
|
||||
"--max_num_object", default=30, type=float, help="case will be selected if num_obj < this argument"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--no_overpass",
|
||||
action="store_true",
|
||||
help="Scenarios with overpass WON'T be selected"
|
||||
)
|
||||
parser.add_argument("--no_overpass", action="store_true", help="Scenarios with overpass WON'T be selected")
|
||||
|
||||
parser.add_argument(
|
||||
"--no_traffic_light",
|
||||
action="store_true",
|
||||
help="Scenarios with traffic light WON'T be selected"
|
||||
"--no_traffic_light", action="store_true", help="Scenarios with traffic light WON'T be selected"
|
||||
)
|
||||
|
||||
parser.add_argument(
|
||||
"--id_filter",
|
||||
action="store_true",
|
||||
help="Scenarios with indicated name will NOT be selected"
|
||||
)
|
||||
parser.add_argument("--id_filter", action="store_true", help="Scenarios with indicated name will NOT be selected")
|
||||
|
||||
parser.add_argument(
|
||||
"--exclude_ids",
|
||||
nargs='+',
|
||||
default=[],
|
||||
help="Scenarios with indicated name will NOT be selected"
|
||||
"--exclude_ids", nargs='+', default=[], help="Scenarios with indicated name will NOT be selected"
|
||||
)
|
||||
|
||||
args = parser.parse_args()
|
||||
|
||||
@@ -10,9 +10,9 @@ if __name__ == '__main__':
|
||||
"-d",
|
||||
required=True,
|
||||
help="The name of the new combined database. "
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
'--from',
|
||||
@@ -25,13 +25,13 @@ if __name__ == '__main__':
|
||||
"--exist_ok",
|
||||
action="store_true",
|
||||
help="Still allow to write, if the dir exists already. "
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="When exists ok is set but summary.pkl and map.pkl exists in existing dir, "
|
||||
"whether to overwrite both files"
|
||||
"whether to overwrite both files"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--filter_moving_dist",
|
||||
@@ -43,7 +43,7 @@ if __name__ == '__main__':
|
||||
default=5,
|
||||
type=float,
|
||||
help="Selecting case with sdc_moving_dist > this value. "
|
||||
"We will add more filter conditions in the future."
|
||||
"We will add more filter conditions in the future."
|
||||
)
|
||||
args = parser.parse_args()
|
||||
target = args.sdc_moving_dist_min
|
||||
|
||||
@@ -7,12 +7,7 @@ logger = logging.getLogger(__file__)
|
||||
|
||||
if __name__ == '__main__':
|
||||
parser = argparse.ArgumentParser()
|
||||
parser.add_argument(
|
||||
"--database_path",
|
||||
"-d",
|
||||
required=True,
|
||||
help="Database to check number of scenarios"
|
||||
)
|
||||
parser.add_argument("--database_path", "-d", required=True, help="Database to check number of scenarios")
|
||||
args = parser.parse_args()
|
||||
summary, _, _, = read_dataset_summary(args.database_path)
|
||||
logger.info("Number of scenarios: {}".format(len(summary)))
|
||||
|
||||
@@ -13,26 +13,30 @@ if __name__ == '__main__':
|
||||
"--to",
|
||||
required=True,
|
||||
help="The name of the new database. "
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
"It will create a new directory to store dataset_summary.pkl and dataset_mapping.pkl. "
|
||||
"If exists_ok=True, those two .pkl files will be stored in an existing directory and turn "
|
||||
"that directory into a database."
|
||||
)
|
||||
parser.add_argument("--num_scenarios", type=int, default=64, help="how many scenarios to extract (default: 30)")
|
||||
parser.add_argument("--start_index", type=int, default=0, help="which index to start")
|
||||
parser.add_argument("--random", action="store_true", help="If set to true, it will choose scenarios randomly "
|
||||
"from all_scenarios[start_index:]. "
|
||||
"Otherwise, the scenarios will be selected sequentially")
|
||||
parser.add_argument(
|
||||
"--random",
|
||||
action="store_true",
|
||||
help="If set to true, it will choose scenarios randomly "
|
||||
"from all_scenarios[start_index:]. "
|
||||
"Otherwise, the scenarios will be selected sequentially"
|
||||
)
|
||||
parser.add_argument(
|
||||
"--exist_ok",
|
||||
action="store_true",
|
||||
help="Still allow to write, if the to_folder exists already. "
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
"This write will only create two .pkl files and this directory will become a database."
|
||||
)
|
||||
parser.add_argument(
|
||||
"--overwrite",
|
||||
action="store_true",
|
||||
help="When exists ok is set but summary.pkl and map.pkl exists in existing dir, "
|
||||
"whether to overwrite both files"
|
||||
"whether to overwrite both files"
|
||||
)
|
||||
args = parser.parse_args()
|
||||
from_path = args.__getattribute__("from")
|
||||
|
||||
59
scenarionet/tests/script/capture_database.py
Normal file
59
scenarionet/tests/script/capture_database.py
Normal file
@@ -0,0 +1,59 @@
|
||||
import pygame
|
||||
from metadrive.envs.scenario_env import ScenarioEnv
|
||||
from metadrive.policy.replay_policy import ReplayEgoCarPolicy
|
||||
|
||||
if __name__ == "__main__":
|
||||
env = ScenarioEnv(
|
||||
{
|
||||
"use_render": True,
|
||||
"agent_policy": ReplayEgoCarPolicy,
|
||||
"show_interface": False,
|
||||
"image_observation": False,
|
||||
"show_logo": False,
|
||||
"no_traffic": False,
|
||||
"drivable_region_extension": 15,
|
||||
"sequential_seed": True,
|
||||
"reactive_traffic": False,
|
||||
"show_fps": False,
|
||||
"render_pipeline": True,
|
||||
"daytime": "07:10",
|
||||
"window_size": (1600, 900),
|
||||
"camera_dist": 9,
|
||||
"start_scenario_index": 1000,
|
||||
"num_scenarios": 4000,
|
||||
"horizon": 1000,
|
||||
"store_map": False,
|
||||
"vehicle_config": dict(
|
||||
show_navi_mark=False,
|
||||
no_wheel_friction=True,
|
||||
use_special_color=False,
|
||||
image_source="depth_camera",
|
||||
lidar=dict(num_lasers=120, distance=50),
|
||||
lane_line_detector=dict(num_lasers=0, distance=50),
|
||||
side_detector=dict(num_lasers=12, distance=50)
|
||||
),
|
||||
"data_directory": "D:\\scenarionet_testset\\nuplan_test\\nuplan_test_w_raw"
|
||||
}
|
||||
)
|
||||
|
||||
# env.reset()
|
||||
#
|
||||
#
|
||||
def capture():
|
||||
env.capture("rgb_deluxe_{}_{}.jpg".format(env.current_seed, t))
|
||||
ret = env.render(
|
||||
mode="topdown", screen_size=(1600, 900), film_size=(10000, 10000), target_vehicle_heading_up=True
|
||||
)
|
||||
pygame.image.save(ret, "top_down_{}_{}.png".format(env.current_seed, env.episode_step))
|
||||
|
||||
#
|
||||
#
|
||||
# env.engine.accept("c", capture)
|
||||
|
||||
# for seed in [1001, 1002, 1005, 1011]:
|
||||
env.reset(force_seed=1020)
|
||||
for t in range(10000):
|
||||
capture()
|
||||
o, r, d, info = env.step([1, 0.88])
|
||||
if env.episode_step >= env.engine.data_manager.current_scenario_length:
|
||||
break
|
||||
@@ -1,119 +1,39 @@
|
||||
import pygame
|
||||
from metadrive.envs.metadrive_env import MetaDriveEnv
|
||||
from metadrive.utils import setup_logger
|
||||
|
||||
from scenarionet.converter.pg.utils import make_env
|
||||
|
||||
|
||||
def capture():
|
||||
env.capture("rgb_deluxe_{}_{}.jpg".format(env.current_seed, t))
|
||||
ret = env.render(mode="topdown", screen_size=(1600, 900), film_size=(6000, 6000), target_vehicle_heading_up=True)
|
||||
pygame.image.save(ret, "top_down_{}_{}.png".format(env.current_seed, env.episode_step))
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
setup_logger(True)
|
||||
env = MetaDriveEnv(
|
||||
{
|
||||
"num_scenarios": 1,
|
||||
"traffic_density": 0.15,
|
||||
"traffic_mode": "hybrid",
|
||||
"start_seed": 74,
|
||||
# "_disable_detector_mask":True,
|
||||
# "debug_physics_world": True,
|
||||
# "debug": True,
|
||||
# "global_light": False,
|
||||
# "debug_static_world": True,
|
||||
"show_interface": False,
|
||||
"cull_scene": False,
|
||||
"random_spawn_lane_index": False,
|
||||
"random_lane_width": False,
|
||||
# "image_observation": True,
|
||||
# "controller": "joystick",
|
||||
# "show_coordinates": True,
|
||||
"random_agent_model": False,
|
||||
"manual_control": True,
|
||||
"use_render": True,
|
||||
"accident_prob": 1,
|
||||
"decision_repeat": 5,
|
||||
"interface_panel": [],
|
||||
"need_inverse_traffic": False,
|
||||
"rgb_clip": True,
|
||||
"map": 2,
|
||||
# "agent_policy": ExpertPolicy,
|
||||
"random_traffic": False,
|
||||
# "random_lane_width": True,
|
||||
"driving_reward": 1.0,
|
||||
# "pstats": True,
|
||||
"force_destroy": False,
|
||||
# "show_skybox": False,
|
||||
"show_fps": False,
|
||||
"render_pipeline": True,
|
||||
# "camera_dist": 8,
|
||||
"window_size": (1600, 900),
|
||||
"camera_dist": 9,
|
||||
# "camera_pitch": 30,
|
||||
# "camera_height": 1,
|
||||
# "camera_smooth": False,
|
||||
# "camera_height": -1,
|
||||
"vehicle_config": {
|
||||
"enable_reverse": False,
|
||||
# "vehicle_model": "xl",
|
||||
# "rgb_camera": (1024, 1024),
|
||||
# "spawn_velocity": [8.728615581032535, -0.24411703918728195],
|
||||
"spawn_velocity_car_frame": True,
|
||||
# "image_source": "depth_camera",
|
||||
# "random_color": True
|
||||
# "show_lidar": True,
|
||||
"spawn_lane_index": None,
|
||||
# "destination":"2R1_3_",
|
||||
# "show_side_detector": True,
|
||||
# "show_lane_line_detector": True,
|
||||
# "side_detector": dict(num_lasers=2, distance=50),
|
||||
# "lane_line_detector": dict(num_lasers=2, distance=50),
|
||||
# "show_line_to_navi_mark": True,
|
||||
"show_navi_mark": False,
|
||||
# "show_dest_mark": True
|
||||
},
|
||||
}
|
||||
env = make_env(
|
||||
0,
|
||||
50000,
|
||||
extra_config=dict(
|
||||
use_render=True,
|
||||
show_logo=False,
|
||||
show_fps=False,
|
||||
show_interface=False,
|
||||
drivable_region_extension=15,
|
||||
window_size=(1600, 900),
|
||||
render_pipeline=True,
|
||||
camera_dist=9,
|
||||
random_spawn_lane_index=False,
|
||||
vehicle_config=dict(show_navi_mark=False),
|
||||
daytime="07:10"
|
||||
)
|
||||
)
|
||||
|
||||
o = env.reset()
|
||||
|
||||
|
||||
def capture():
|
||||
env.capture()
|
||||
ret = env.render(mode="topdown", screen_size=(1600, 900), film_size=(2000, 2000), track_target_vehicle=True)
|
||||
pygame.image.save(ret, "top_down_{}.png".format(env.current_seed))
|
||||
|
||||
env.engine.accept("c", capture)
|
||||
# env.main_camera.set_follow_lane(True)
|
||||
# env.vehicle.get_camera("rgb_camera").save_image(env.vehicle)
|
||||
# for line in env.engine.coordinate_line:
|
||||
# line.reparentTo(env.vehicle.origin)
|
||||
# env.vehicle.set_velocity([5, 0], in_local_frame=True)
|
||||
for s in range(1, 100000):
|
||||
# env.vehicle.set_velocity([1, 0], in_local_frame=True)
|
||||
o, r, d, info = env.step([0, 0])
|
||||
|
||||
# env.vehicle.set_pitch(-np.pi/4)
|
||||
# [0.09231533, 0.491018, 0.47076905, 0.7691619, 0.5, 0.5, 1.0, 0.0, 0.48037243, 0.8904728, 0.81229943, 0.7317231, 1.0, 0.85320455, 0.9747932, 0.65675277, 0.0, 0.5, 0.5]
|
||||
# else:
|
||||
# if s % 100 == 0:
|
||||
# env.close()
|
||||
# env.reset()
|
||||
# info["fuel"] = env.vehicle.energy_consumption
|
||||
# env.render(
|
||||
# text={
|
||||
# # "heading_diff": env.vehicle.heading_diff(env.vehicle.lane),
|
||||
# # "lane_width": env.vehicle.lane.width,
|
||||
# # "lane_index": env.vehicle.lane_index,
|
||||
# # "lateral": env.vehicle.lane.local_coordinates(env.vehicle.position),
|
||||
# "current_seed": env.current_seed
|
||||
# }
|
||||
# )
|
||||
# if d:
|
||||
# env.reset()
|
||||
# # assert env.observation_space.contains(o)
|
||||
# if (s + 1) % 100 == 0:
|
||||
# # print(
|
||||
# "Finish {}/10000 simulation steps. Time elapse: {:.4f}. Average FPS: {:.4f}".format(
|
||||
# s + 1,f
|
||||
# time.time() - start, (s + 1) / (time.time() - start)
|
||||
# )
|
||||
# )
|
||||
# if d:
|
||||
# # # env.close()
|
||||
# # # print(len(env.engine._spawned_objects))
|
||||
# env.reset()
|
||||
# o = env.reset(force_seed=0)
|
||||
# env.engine.accept("c", capture)
|
||||
for s in range(6, 1000):
|
||||
env.reset(force_seed=16)
|
||||
for t in range(10000):
|
||||
capture()
|
||||
o, r, d, info = env.step([0, 0])
|
||||
if info["arrive_dest"]:
|
||||
break
|
||||
|
||||
@@ -12,6 +12,6 @@ if __name__ == '__main__':
|
||||
scenarios_2 = {}
|
||||
|
||||
for i in range(9):
|
||||
scenarios_1[str(i)] = read_scenario(data_1, mapping_1, lookup_1[-9+i])
|
||||
scenarios_1[str(i)] = read_scenario(data_1, mapping_1, lookup_1[-9 + i])
|
||||
scenarios_2[str(i)] = read_scenario(data_2, mapping_2, lookup_2[i])
|
||||
# assert_scenario_equal(scenarios_1, scenarios_2, check_self_type=False, only_compare_sdc=True)
|
||||
|
||||
@@ -1,4 +1,3 @@
|
||||
import time
|
||||
import pygame
|
||||
from metadrive.engine.asset_loader import AssetLoader
|
||||
from metadrive.envs.scenario_env import ScenarioEnv
|
||||
@@ -15,12 +14,13 @@ if __name__ == "__main__":
|
||||
# "need_lane_localization": False,
|
||||
"show_logo": False,
|
||||
"no_traffic": False,
|
||||
"drivable_region_extension": 15,
|
||||
"sequential_seed": True,
|
||||
"reactive_traffic": False,
|
||||
"show_fps": False,
|
||||
# "debug": True,
|
||||
# "render_pipeline": True,
|
||||
"daytime": "11:01",
|
||||
"render_pipeline": True,
|
||||
"daytime": "08:10",
|
||||
"window_size": (1600, 900),
|
||||
"camera_dist": 0.8,
|
||||
"camera_height": 1.5,
|
||||
@@ -31,7 +31,7 @@ if __name__ == "__main__":
|
||||
# "force_reuse_object_name": True,
|
||||
# "data_directory": "/home/shady/Downloads/test_processed",
|
||||
"horizon": 1000,
|
||||
# "no_static_vehicles": True,
|
||||
"no_static_vehicles": False,
|
||||
# "show_policy_mark": True,
|
||||
# "show_coordinates": True,
|
||||
# "force_destroy": True,
|
||||
@@ -50,48 +50,23 @@ if __name__ == "__main__":
|
||||
side_detector=dict(num_lasers=12, distance=50)
|
||||
),
|
||||
"data_directory": AssetLoader.file_path("nuscenes", return_raw_style=False),
|
||||
"image_observation": True,
|
||||
# "image_observation": True,
|
||||
}
|
||||
)
|
||||
|
||||
# 0,1,3,4,5,6
|
||||
|
||||
success = []
|
||||
reset_num = 0
|
||||
start = time.time()
|
||||
reset_used_time = 0
|
||||
s = 0
|
||||
while True:
|
||||
# for i in range(10):
|
||||
start_reset = time.time()
|
||||
env.reset(force_seed=0)
|
||||
|
||||
reset_used_time += time.time() - start_reset
|
||||
reset_num += 1
|
||||
for seed in range(10):
|
||||
env.reset(force_seed=seed)
|
||||
for t in range(10000):
|
||||
if t==30:
|
||||
# env.capture("camera_deluxe.jpg")
|
||||
# ret = env.render(mode="topdown", screen_size=(1600, 900), film_size=(5000, 5000), track_target_vehicle=True)
|
||||
# pygame.image.save(ret, "top_down.png")
|
||||
env.vehicle.get_camera("depth_camera").save_image(env.vehicle, "camera.jpg")
|
||||
env.capture("rgb_deluxe_{}_{}.jpg".format(env.current_seed, t))
|
||||
ret = env.render(
|
||||
mode="topdown", screen_size=(1600, 900), film_size=(9000, 9000), target_vehicle_heading_up=True
|
||||
)
|
||||
pygame.image.save(ret, "top_down_{}_{}.png".format(env.current_seed, t))
|
||||
# env.vehicle.get_camera("depth_camera").save_image(env.vehicle, "depth_{}.jpg".format(t))
|
||||
# env.vehicle.get_camera("rgb_camera").save_image(env.vehicle, "rgb_{}.jpg".format(t))
|
||||
o, r, d, info = env.step([1, 0.88])
|
||||
assert env.observation_space.contains(o)
|
||||
s += 1
|
||||
# if env.config["use_render"]:
|
||||
# env.render(text={"seed": env.current_seed,
|
||||
# # "num_map": info["num_stored_maps"],
|
||||
# "data_coverage": info["data_coverage"],
|
||||
# "reward": r,
|
||||
# "heading_r": info["step_reward_heading"],
|
||||
# "lateral_r": info["step_reward_lateral"],
|
||||
# "smooth_action_r": info["step_reward_action_smooth"]})
|
||||
if d:
|
||||
print(
|
||||
"Time elapse: {:.4f}. Average FPS: {:.4f}, AVG_Reset_time: {:.4f}".format(
|
||||
time.time() - start, s / (time.time() - start - reset_used_time),
|
||||
reset_used_time / reset_num
|
||||
)
|
||||
)
|
||||
print("seed:{}, success".format(env.engine.global_random_seed))
|
||||
print(list(env.engine.curriculum_manager.recent_success.dict.values()))
|
||||
# if d:
|
||||
if env.episode_step >= env.engine.data_manager.current_scenario_length:
|
||||
break
|
||||
|
||||
@@ -1,104 +0,0 @@
|
||||
import time
|
||||
|
||||
import pygame
|
||||
from metadrive.envs.scenario_env import ScenarioEnv
|
||||
from metadrive.policy.replay_policy import ReplayEgoCarPolicy
|
||||
|
||||
NuScenesEnv = ScenarioEnv
|
||||
|
||||
if __name__ == "__main__":
|
||||
env = NuScenesEnv(
|
||||
{
|
||||
"use_render": True,
|
||||
"agent_policy": ReplayEgoCarPolicy,
|
||||
"show_interface": False,
|
||||
"image_observation": False,
|
||||
"show_logo": False,
|
||||
"no_traffic": False,
|
||||
"drivable_region_extension": 15,
|
||||
"sequential_seed": True,
|
||||
"reactive_traffic": False,
|
||||
"show_fps": False,
|
||||
# "debug": True,
|
||||
"render_pipeline": True,
|
||||
"daytime": "19:30",
|
||||
"window_size": (1600, 900),
|
||||
"camera_dist": 9,
|
||||
# "camera_height": 1.5,
|
||||
# "camera_pitch": None,
|
||||
# "camera_fov": 60,
|
||||
"start_scenario_index": 0,
|
||||
"num_scenarios": 4,
|
||||
# "force_reuse_object_name": True,
|
||||
# "data_directory": "/home/shady/Downloads/test_processed",
|
||||
"horizon": 1000,
|
||||
# "no_static_vehicles": True,
|
||||
# "show_policy_mark": True,
|
||||
# "show_coordinates": True,
|
||||
# "force_destroy": True,
|
||||
# "default_vehicle_in_traffic": True,
|
||||
"vehicle_config": dict(
|
||||
# light=True,
|
||||
# random_color=True,
|
||||
show_navi_mark=False,
|
||||
use_special_color=False,
|
||||
image_source="depth_camera",
|
||||
# rgb_camera=(1600, 900),
|
||||
# depth_camera=(1600, 900, True),
|
||||
# no_wheel_friction=True,
|
||||
lidar=dict(num_lasers=120, distance=50),
|
||||
lane_line_detector=dict(num_lasers=0, distance=50),
|
||||
side_detector=dict(num_lasers=12, distance=50)
|
||||
),
|
||||
"data_directory": "D:\\code\\scenarionet\\scenarionet\\tests\\script\\waymo_scenes_adv"
|
||||
}
|
||||
)
|
||||
|
||||
# 0,1,3,4,5,6
|
||||
|
||||
success = []
|
||||
reset_num = 0
|
||||
start = time.time()
|
||||
reset_used_time = 0
|
||||
s = 0
|
||||
|
||||
env.reset()
|
||||
|
||||
|
||||
def capture():
|
||||
env.capture()
|
||||
ret = env.render(mode="topdown", screen_size=(1600, 900), film_size=(7000, 7000), track_target_vehicle=True)
|
||||
pygame.image.save(ret, "top_down_{}.png".format(env.current_seed))
|
||||
|
||||
|
||||
env.engine.accept("c", capture)
|
||||
|
||||
while True:
|
||||
# for i in range(10):
|
||||
start_reset = time.time()
|
||||
env.reset()
|
||||
|
||||
reset_used_time += time.time() - start_reset
|
||||
reset_num += 1
|
||||
for t in range(10000):
|
||||
o, r, d, info = env.step([1, 0.88])
|
||||
assert env.observation_space.contains(o)
|
||||
s += 1
|
||||
# if env.config["use_render"]:
|
||||
# env.render(text={"seed": env.current_seed,
|
||||
# # "num_map": info["num_stored_maps"],
|
||||
# "data_coverage": info["data_coverage"],
|
||||
# "reward": r,
|
||||
# "heading_r": info["step_reward_heading"],
|
||||
# "lateral_r": info["step_reward_lateral"],
|
||||
# "smooth_action_r": info["step_reward_action_smooth"]})
|
||||
if d:
|
||||
print(
|
||||
"Time elapse: {:.4f}. Average FPS: {:.4f}, AVG_Reset_time: {:.4f}".format(
|
||||
time.time() - start, s / (time.time() - start - reset_used_time),
|
||||
reset_used_time / reset_num
|
||||
)
|
||||
)
|
||||
print("seed:{}, success".format(env.engine.global_random_seed))
|
||||
print(list(env.engine.curriculum_manager.recent_success.dict.values()))
|
||||
break
|
||||
@@ -21,12 +21,7 @@ def test_filter_overpass():
|
||||
filters.append(ScenarioFilter.make(ScenarioFilter.no_overpass))
|
||||
|
||||
summaries, _ = merge_database(
|
||||
output_path,
|
||||
*dataset_paths,
|
||||
exist_ok=True,
|
||||
overwrite=True,
|
||||
try_generate_missing_file=True,
|
||||
filters=filters
|
||||
output_path, *dataset_paths, exist_ok=True, overwrite=True, try_generate_missing_file=True, filters=filters
|
||||
)
|
||||
assert len(summaries) == 3
|
||||
for scenario in summaries:
|
||||
|
||||
@@ -19,7 +19,7 @@ def test_copy_database():
|
||||
# move
|
||||
for k, from_path in enumerate(dataset_paths):
|
||||
to = os.path.join(TMP_PATH, str(k))
|
||||
copy_database(from_path, to)
|
||||
copy_database(from_path, to, force_move=True, exist_ok=True, overwrite=True)
|
||||
moved_path.append(to)
|
||||
assert os.path.exists(from_path)
|
||||
merge_database(output_path, *moved_path, exist_ok=True, overwrite=True, try_generate_missing_file=True)
|
||||
@@ -37,7 +37,7 @@ def test_copy_database():
|
||||
for k, from_path in enumerate(moved_path):
|
||||
new_p = os.path.join(TMP_PATH, str(k) + str(k))
|
||||
new_move_pathes.append(new_p)
|
||||
copy_database(from_path, new_p, exist_ok=True, overwrite=True)
|
||||
copy_database(from_path, new_p, exist_ok=True, overwrite=True, remove_source=True)
|
||||
assert not os.path.exists(from_path)
|
||||
merge_database(output_path, *new_move_pathes, exist_ok=True, overwrite=True, try_generate_missing_file=True)
|
||||
# verify
|
||||
|
||||
Reference in New Issue
Block a user