diff --git a/.dockerignore b/.dockerignore new file mode 100644 index 00000000..82f0c3ac --- /dev/null +++ b/.dockerignore @@ -0,0 +1 @@ +/data/ diff --git a/.github/pull_request_template.md b/.github/pull_request_template.md new file mode 100644 index 00000000..67132344 --- /dev/null +++ b/.github/pull_request_template.md @@ -0,0 +1,29 @@ +## Description + + + +## How to review + + + +## How to test + +### test data + + + +### test command + + + +```bash + +``` + +## Reference + + + +## Notes for reviewer + + diff --git a/.github/workflows/build-and-test.yaml b/.github/workflows/build-and-test.yaml new file mode 100644 index 00000000..f4a58c42 --- /dev/null +++ b/.github/workflows/build-and-test.yaml @@ -0,0 +1,103 @@ +name: build-and-test + +on: + workflow_dispatch: + push: + branches: + - main + pull_request: + +jobs: + # Upload and download actions are used since + # gh command is't available in the docker container and + # it's hard to setup it in the container. + download-test-data: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Download test data + run: | + gh release download test-data -D tests/data + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + - name: Upload data for build-and-test + uses: actions/upload-artifact@v3 + with: + name: test-data + path: tests/data + + build-and-test: + runs-on: ubuntu-latest + container: + image: ros:${{ matrix.rosdistro }} + strategy: + matrix: + rosdistro: + - humble + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Install dependencies + run: | + apt update -yqq + apt install -yqq curl python3-venv python-is-python3 \ + libgl1-mesa-dev ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap \ + unzip tree + + - name: Download test data + uses: actions/download-artifact@v3 + with: + name: test-data + path: tests/data + + - name: Unzip test data + run: | + unzip 'tests/data/*.zip' -d tests/data/ + + - name: Check test data + run: | + tree tests/data + + - name: Build autoware perception messages + uses: autowarefoundation/autoware-github-actions/colcon-build@v1 + with: + rosdistro: ${{ matrix.rosdistro }} + target-packages: autoware_auto_perception_msgs autoware_perception_msgs + build-depends-repos: build_depends.repos + + - name: Install dependencies + run: | + apt update -yqq + apt install -yqq curl python3-venv python-is-python3 \ + libgl1-mesa-dev ros-${ROS_DISTRO}-sensor-msgs-py + + - name: Check python version + run: | + python -V + + - name: Install poetry + run: | + curl -sSL https://install.python-poetry.org | python3 - --version 1.4.2 + echo "$HOME/.local/bin" >> $GITHUB_PATH + + - name: Check poetry version + run: | + poetry --version + + - name: Install python packages + run: | + poetry install + + - name: Test with pytest + run: | + . /opt/ros/$ROS_DISTRO/setup.sh + . ./install/setup.sh + poetry run pytest diff --git a/.github/workflows/cancel-privious-workflows.yaml b/.github/workflows/cancel-privious-workflows.yaml new file mode 100644 index 00000000..d65abbb1 --- /dev/null +++ b/.github/workflows/cancel-privious-workflows.yaml @@ -0,0 +1,14 @@ +name: cancel-previous-workflows + +on: + pull_request: + +jobs: + cancel-previous-workflows: + runs-on: ubuntu-latest + steps: + - name: Cancel previous runs + uses: styfle/cancel-workflow-action@0.10.0 + with: + workflow_id: all + all_but_latest: true diff --git a/.github/workflows/github-release.yaml b/.github/workflows/github-release.yaml new file mode 100644 index 00000000..52475f24 --- /dev/null +++ b/.github/workflows/github-release.yaml @@ -0,0 +1,29 @@ +name: github-release + +on: + push: + tags: + - v* + +jobs: + github-release: + runs-on: ubuntu-latest + steps: + - name: Check out repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Run generate-changelog + id: generate-changelog + uses: autowarefoundation/autoware-github-actions/generate-changelog@v1 + + - name: Release to GitHub + run: | + gh release create "${{ github.ref_name }}" \ + --draft \ + --title "Release ${{ github.ref_name }}" \ + --notes "$NOTES" + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + NOTES: ${{ steps.generate-changelog.outputs.changelog }} diff --git a/.github/workflows/pre-commit.yml b/.github/workflows/pre-commit.yml new file mode 100644 index 00000000..635f3cbc --- /dev/null +++ b/.github/workflows/pre-commit.yml @@ -0,0 +1,21 @@ +name: pre-commit + +on: + pull_request: + workflow_dispatch: + +jobs: + pre-commit: + runs-on: ubuntu-latest + + steps: + - name: Checkout repository + uses: actions/checkout@v3 + with: + fetch-depth: 0 + + - name: Setup Python + uses: actions/setup-python@v3 + + - name: Run pre-commit + uses: pre-commit/action@v3.0.0 diff --git a/.github/workflows/semantic-pull-request.yaml b/.github/workflows/semantic-pull-request.yaml new file mode 100644 index 00000000..71224c22 --- /dev/null +++ b/.github/workflows/semantic-pull-request.yaml @@ -0,0 +1,12 @@ +name: semantic-pull-request + +on: + pull_request_target: + types: + - opened + - edited + - synchronize + +jobs: + semantic-pull-request: + uses: autowarefoundation/autoware-github-actions/.github/workflows/semantic-pull-request.yaml@v1 diff --git a/.gitignore b/.gitignore new file mode 100644 index 00000000..be0db939 --- /dev/null +++ b/.gitignore @@ -0,0 +1,165 @@ +# data +tests/scale_to_t4/data_test_scale_to_t4_converter/output_base +/data/ +/data*/ +/notebooks/data/ +/tests/data + +# ros +/src +/build +/install +/log + +### tmp file ### +node_modules +npm-debug.log +.DS_Store +.vscode + +### train file ### +*.pth +*.weights +*.onnx +*.trt + +### python ### + +# Byte-compiled / optimized / DLL files +__pycache__/ +*.py[cod] +*$py.class + +# C extensions +*.so + +# Distribution / packaging +.Python +build/ +develop-eggs/ +dist/ +downloads/ +eggs/ +.eggs/ +lib/ +lib64/ +parts/ +sdist/ +var/ +wheels/ +share/python-wheels/ +*.egg-info/ +.installed.cfg +*.egg +MANIFEST + +# PyInstaller +# Usually these files are written by a python script from a template +# before PyInstaller builds the exe, so as to inject date/other infos into it. +*.manifest +*.spec + +# Installer logs +pip-log.txt +pip-delete-this-directory.txt + +# Unit test / coverage reports +htmlcov/ +.tox/ +.nox/ +.coverage +.coverage.* +.cache +nosetests.xml +coverage.xml +*.cover +*.py,cover +.hypothesis/ +.pytest_cache/ +cover/ + +# Translations +*.mo +*.pot + +# Django stuff: +*.log +local_settings.py +db.sqlite3 +db.sqlite3-journal + +# Flask stuff: +instance/ +.webassets-cache + +# Scrapy stuff: +.scrapy + +# Sphinx documentation +docs/_build/ + +# PyBuilder +.pybuilder/ +target/ + +# Jupyter Notebook +.ipynb_checkpoints + +# IPython +profile_default/ +ipython_config.py + +# pyenv +# For a library or package, you might want to ignore these files since the code is +# intended to run in multiple environments; otherwise, check them in: +# .python-version + +# pipenv +# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control. +# However, in case of collaboration, if having platform-specific dependencies or dependencies +# having no cross-platform support, pipenv may install dependencies that don't work, or not +# install all needed dependencies. +#Pipfile.lock + +# PEP 582; used by e.g. github.com/David-OConnor/pyflow +__pypackages__/ + +# Celery stuff +celerybeat-schedule +celerybeat.pid + +# SageMath parsed files +*.sage.py + +# Environments +.env +.venv +env/ +venv/ +ENV/ +env.bak/ +venv.bak/ + +# Spyder project settings +.spyderproject +.spyproject + +# Rope project settings +.ropeproject + +# mkdocs documentation +/site + +# mypy +.mypy_cache/ +.dmypy.json +dmypy.json + +# Pyre type checker +.pyre/ + +# pytype static type analyzer +.pytype/ + +# Cython debug symbols +cython_debug/ diff --git a/.markdownlint.yaml b/.markdownlint.yaml new file mode 100644 index 00000000..605ac41c --- /dev/null +++ b/.markdownlint.yaml @@ -0,0 +1,6 @@ +default: true +MD013: false +MD024: + siblings_only: true +MD033: false +MD041: false diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 00000000..e5da6d79 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,77 @@ +# To install: +# +# pip install pre-commit +# +# To use: +# +# pre-commit run -a +# +# Or: +# +# pre-commit install # (runs every time you commit in git) +# +# To update this file: +# +# pre-commit autoupdate +# +# See https://github.com/pre-commit/pre-commit + +repos: + # Standard hooks + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v4.4.0 + hooks: + - id: check-json + - id: check-merge-conflict + - id: check-toml + - id: check-xml + - id: check-yaml + - id: detect-private-key + - id: end-of-file-fixer + - id: mixed-line-ending + - id: trailing-whitespace + args: [--markdown-linebreak-ext=md] + + - repo: https://github.com/igorshubovych/markdownlint-cli + rev: v0.33.0 + hooks: + - id: markdownlint + args: ["-c", ".markdownlint.yaml", "--fix"] + + - repo: https://github.com/pre-commit/mirrors-prettier + rev: v3.0.0-alpha.4 + hooks: + - id: prettier + + - repo: https://github.com/gruntwork-io/pre-commit + rev: v0.1.18 + hooks: + - id: shellcheck + + - repo: https://github.com/pycqa/isort + rev: 5.12.0 + hooks: + - id: isort + + - repo: https://github.com/psf/black + rev: 23.1.0 + hooks: + - id: black + + - repo: https://github.com/PyCQA/flake8 + rev: 6.0.0 + hooks: + - id: flake8 + additional_dependencies: + [ + "flake8-blind-except", + "flake8-builtins", + "flake8-class-newline", + "flake8-comprehensions", + "flake8-deprecated", + "flake8-docstrings", + "flake8-import-order", + "flake8-quotes", + ] + +exclude: ".svg|.ipynb|.venv|./data|./tests" diff --git a/.prettierignore b/.prettierignore new file mode 100644 index 00000000..6ef21498 --- /dev/null +++ b/.prettierignore @@ -0,0 +1,16 @@ +# Ignore artifacts: +build +coverage + +# Ignore all HTML files: +*.html + +# Ignore yaml +*.yaml +*.yml + +# Ignore json +*.json + +# Ignore venv +.venv/* diff --git a/.prettierrc.yaml b/.prettierrc.yaml new file mode 100644 index 00000000..ef9b1b85 --- /dev/null +++ b/.prettierrc.yaml @@ -0,0 +1,2 @@ +printWidth: 120 +tabWidth: 2 diff --git a/README.md b/README.md new file mode 100644 index 00000000..324b418a --- /dev/null +++ b/README.md @@ -0,0 +1,63 @@ +# tier4_perception_dataset + +This is the data conversion tools around T4 dataset. + +## Tools Overview + +See [tools_overview](docs/tools_overview.md) about the converters. + +## Setup + +```bash +git clone git@github.com:tier4/tier4_perception_dataset.git perception_dataset +cd perception_dataset +``` + +install and build ros dependencies (this step must be outside of poetry virtualenv) + +```bash +source /opt/ros/${ROS_DISTRO}/setup.sh +sudo apt install -y ros-${ROS_DISTRO}-sensor-msgs-py ros-${ROS_DISTRO}-rosbag2-storage-mcap + +mkdir src -p && vcs import src < build_depends.repos +colcon build --symlink-install --cmake-args -DCMAKE_BUILD_TYPE=Release --packages-up-to autoware_auto_perception_msgs autoware_perception_msgs +source ./install/setup.bash +``` + +install python dependencies + +```bash +pip3 install poetry +poetry install +``` + +## Test + +### Download test data + +- [GitHub CLI](https://github.com/cli/cli#installation) + +```shell +gh release download test-data -D tests/data +unzip 'tests/data/*.zip' -d tests/data/ +``` + +or manually download zipped data from [the release page](https://github.com/tier4/tier4_perception_dataset/releases/tag/test-data) to a `test/data` directory + +### Run tests + +```bash +source /opt/ros/${ROS_DISTRO}/setup.sh +source ./install/setup.bash +poetry run pytest +``` + +## Pre commit + +```bash +# to install hooks of formatter and linter files +pre-commit install + +# to apply hooks +pre-commit run -a +``` diff --git a/build_depends.repos b/build_depends.repos new file mode 100644 index 00000000..e0de53df --- /dev/null +++ b/build_depends.repos @@ -0,0 +1,9 @@ +repositories: + autoware_auto_msgs: + type: git + url: https://github.com/tier4/autoware_auto_msgs.git + version: tier4/main + tier4_autoware_msgs: + type: git + url: https://github.com/tier4/tier4_autoware_msgs.git + version: main diff --git a/config/convert_deepen_to_t4_sample.yaml b/config/convert_deepen_to_t4_sample.yaml new file mode 100644 index 00000000..7e154064 --- /dev/null +++ b/config/convert_deepen_to_t4_sample.yaml @@ -0,0 +1,24 @@ +task: convert_deepen_to_t4 +description: + visibility: + full: "No occlusion of the object." + most: "Object is occluded, but by less than 50%." + partial: "The object is occluded by more than 50% (but not completely)." + none: "The object is 90-100% occluded and no points/pixels are visible in the label." + camera_index: + CAM_FRONT: 0, + CAM_FRONT_RIGHT: 1, + CAM_BACK_RIGHT: 2, + CAM_BACK: 3, + CAM_BACK_LEFT: 4, + CAM_FRONT_LEFT: 5, + +conversion: + input_base: ./data/non_annotated_t4_format + input_anno_file: ./data/deepen_format/lidar_annotations_accepted_deepen.json + input_bag_base: ./data/rosbag2 + output_base: ./data/t4_format + topic_list: ./config/topic_list_sample.yaml + ignore_interpolate_label: True + dataset_corresponding: + Dataset_name: dataset_id_in_Deepen_AI diff --git a/config/convert_rosbag2_to_non_annotated_t4_sample.yaml b/config/convert_rosbag2_to_non_annotated_t4_sample.yaml new file mode 100644 index 00000000..10660a0d --- /dev/null +++ b/config/convert_rosbag2_to_non_annotated_t4_sample.yaml @@ -0,0 +1,25 @@ +task: convert_rosbag2_to_non_annotated_t4 +conversion: + input_base: ./data/rosbag2 + output_base: ./data/non_annotated_t4_format + start_timestamp_sec: 0 # Enter here if there is a timestamp for the start time. If not used, enter 0. + skip_timestamp: 2.0 # Do not load data for the first point cloud timestamp for skip_timestamp seconds. + num_load_frames: 0 # Maximum number of frames to save as t4 data. Set to 0 to automatically set it based on the number of lidar topics. + # The following configuration is generally not modified unless there are changes to the vehicle sensor configuration. + lidar_sensor: + topic: /sensing/lidar/concatenated/pointcloud + channel: LIDAR_CONCAT + camera_latency_sec: 0.02 # camera latency in seconds between the header.stamp and the exposure trigger + camera_sensors: # Keep the same order as each camera exposure timing + - topic: /sensing/camera/camera3/image_rect_color/compressed + channel: CAM_BACK_LEFT + - topic: /sensing/camera/camera2/image_rect_color/compressed + channel: CAM_FRONT_LEFT + - topic: /sensing/camera/camera0/image_rect_color/compressed + channel: CAM_FRONT + - topic: /sensing/camera/camera4/image_rect_color/compressed + channel: CAM_FRONT_RIGHT + - topic: /sensing/camera/camera5/image_rect_color/compressed + channel: CAM_BACK_RIGHT + - topic: /sensing/camera/camera1/image_rect_color/compressed + channel: CAM_BACK diff --git a/config/convert_t4_to_deepen_sample.yaml b/config/convert_t4_to_deepen_sample.yaml new file mode 100644 index 00000000..47c5e758 --- /dev/null +++ b/config/convert_t4_to_deepen_sample.yaml @@ -0,0 +1,13 @@ +task: convert_t4_to_deepen +conversion: + input_base: ./data/non_annotated_t4_format + output_base: ./data/deepen_format + annotation_hz: 10 + workers_number: 12 + camera_sensors: + - channel: CAM_FRONT + - channel: CAM_FRONT_RIGHT + - channel: CAM_BACK_RIGHT + - channel: CAM_BACK + - channel: CAM_BACK_LEFT + - channel: CAM_FRONT_LEFT diff --git a/config/rosbag2_to_t4/convert_synthetic_data.yaml b/config/rosbag2_to_t4/convert_synthetic_data.yaml new file mode 100644 index 00000000..88ee116c --- /dev/null +++ b/config/rosbag2_to_t4/convert_synthetic_data.yaml @@ -0,0 +1,16 @@ +task: convert_rosbag2_to_t4 +conversion: + # path to rosbag dir output by simulator + input_base: ./data/rosbag2_synthetic + output_base: ./data/synthetic_to_t4_format + workers_number: 1 + skip_timestamp: 2.0 + num_load_frames: 200 + crop_frames_unit: 50 # crop frames from the end so that the number of frames is divisible by crop_frames_unit. Set to 0 or 1 so as not to crop any frames. + object_topic_name: /ground_truth/objects + object_msg_type: DynamicObjectArray + world_frame_id: world + lidar_sensor: + topic: /lidar/concatenated/pointcloud + channel: LIDAR_CONCAT + camera_sensors: [] # synthetic data has no images diff --git a/config/topic_list_sample.yaml b/config/topic_list_sample.yaml new file mode 100644 index 00000000..d10c5df6 --- /dev/null +++ b/config/topic_list_sample.yaml @@ -0,0 +1,49 @@ +mandatory_topic_list: + - /sensing/lidar/concatenated/pointcloud + - /tf + - /tf_static + - /sensing/camera/camera0/image_rect_color/compressed + - /sensing/camera/camera1/image_rect_color/compressed + - /sensing/camera/camera2/image_rect_color/compressed + - /sensing/camera/camera3/image_rect_color/compressed + - /sensing/camera/camera4/image_rect_color/compressed + - /sensing/camera/camera5/image_rect_color/compressed + - /localization/kinematic_state +topic_list: + - /sensing/lidar/left/velodyne_packets + - /sensing/lidar/rear/velodyne_packets + - /sensing/lidar/right/velodyne_packets + - /sensing/lidar/top/velodyne_packets + - /sensing/lidar/concatenated/pointcloud + - /sensing/camera/camera0/image_rect_color/compressed + - /sensing/camera/camera1/image_rect_color/compressed + - /sensing/camera/camera2/image_rect_color/compressed + - /sensing/camera/camera3/image_rect_color/compressed + - /sensing/camera/camera4/image_rect_color/compressed + - /sensing/camera/camera5/image_rect_color/compressed + - /sensing/camera/camera0/camera_info + - /sensing/camera/camera1/camera_info + - /sensing/camera/camera2/camera_info + - /sensing/camera/camera3/camera_info + - /sensing/camera/camera4/camera_info + - /sensing/camera/camera5/camera_info + - /sensing/radar/front_center/from_can_bus + - /sensing/radar/front_left/from_can_bus + - /sensing/radar/front_right/from_can_bus + - /sensing/radar/rear_center/from_can_bus + - /sensing/radar/rear_left/from_can_bus + - /sensing/radar/rear_right/from_can_bus + - /pacmod/from_can_bus + - /can_rx + - /pacmod/can_rx + - /sensing/gnss/ublox/fix_velocity + - /sensing/gnss/ublox/nav_sat_fix + - /sensing/gnss/ublox/navpvt + - /sensing/gnss/septentrio/nav_sat_fix + - /sensing/gnss/fixed + - /sensing/gnss/pose + - /sensing/gnss/pose_with_covariance + - /sensing/imu/tamagawa/imu_raw + - /tf + - /tf_static + - /localization/kinematic_state diff --git a/docs/coordinate_transformation.drawio.svg b/docs/coordinate_transformation.drawio.svg new file mode 100644 index 00000000..c5ac72a2 --- /dev/null +++ b/docs/coordinate_transformation.drawio.svg @@ -0,0 +1 @@ +
ego_pose
map
calibrated_sensor
ego_vehicle
camera / lidar
T4 Format
ego_pose
map
base_link
calibrated_sensor
camera
frame_id: cameraID/camera_optical_link
calibrated_sensor
lidar
frame_id: base_link
Nuscenes Format
\ No newline at end of file diff --git a/docs/data_collection_conversion.drawio.svg b/docs/data_collection_conversion.drawio.svg new file mode 100644 index 00000000..55b7d2a2 --- /dev/null +++ b/docs/data_collection_conversion.drawio.svg @@ -0,0 +1 @@ +
filtered ros2bag
image, pointcloud
(raw data)
rosbag2 to
T4 format
annotation result (json)
Deepen
to T4 fomart
T4 dataset
Non annotated
T4 format
T4 format to
Deepen
raw ros2bag
data acquisition plan
data / deliverables
script process
operation
Design data acquisition
data collection
filtered ros2bag
data selection / rosbag filter & merge/
add and modify tf
data collection/
screening
data conversion
annotation
packets to pointcloud & concat & add tf_static
rosbag2
w/ pointcloud, tf_static
\ No newline at end of file diff --git a/docs/nuscenes-schema.svg b/docs/nuscenes-schema.svg new file mode 100644 index 00000000..c5ed3865 --- /dev/null +++ b/docs/nuscenes-schema.svg @@ -0,0 +1,3 @@ + + +category*namedescriptionindexattributenamedescriptionlidarseg*filenamesample_data_tokencalibrated_sensor*sensor_tokentranslationrotationcamera_intrinsicloglogfilevehicledate_capturedlocationsample_annotation*sample_tokeninstance_tokenattribute_tokensvisibility_tokentranslationsizerotationnum_lidar_ptsnum_radar_ptsnextprevego_pose*translationrotationtimestampsample_datasample_tokenego_pose_tokencalibrated_sensor_tokenfilenamefileformatwidthheighttimestampis_key_framenextprevsample*timestampscene_tokennextprevsensorchannelmodality
Vehicle
Vehicle
Extraction
Extraction
Annotation
Annotation
Taxonomy
Taxonomy
instance*category_tokennbr_annotationsfirst_annotation_tokenlast_annotation_tokenscene*namedescriptionlog_tokennbr_samplesfirst_sample_tokenlast_sample_tokenmap*log_tokenscategoryfilenamevisibility*leveldescription

Asterisks (*) indicate modifications compared to the nuImages schema.
Tables and fields added in nuScenes-lidarseg have a purple background color.

Asterisks (*) indicate modifications compared to the nuImages schema....
nuScenes schema
nuScenes schema
Implicitly linked
via .bin files
Implicitly linke...
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/docs/t4_format_3d_detailed.md b/docs/t4_format_3d_detailed.md new file mode 100644 index 00000000..ef9f0d31 --- /dev/null +++ b/docs/t4_format_3d_detailed.md @@ -0,0 +1,526 @@ +# T4 format 3D data + +**T4 format**, which is based on nuScenes and nuImages format, is the dataset format used in TIER IV. +**T4 non-annotated format** is the dataset format that annotated files are empty. (attribute, category, instance, sample_annotation, visibility) + +## version + +- (2021.07.08) T4 format ver1.0: Make initial T4 format confirmed to Nuscenes + nuImages format +- (2021.11.30) T4 format ver1.1: Updated format to make it available in ADP +- (2022.06.03) T4 format ver1.2: Update topics contained in input_bag. Add status.json. Remove occlusion_state in attribute, write to visibility.json +- (2022.07.26) T4 format ver1.2: Add a topic contained in input_bag. + +## Directory Structure + +- {T4 dataste ID}/ + - map/ + - lanelet2_map.osm + - pointcloud_map.pcd + - status.json + - annotation/ + - attribute.json + - calibrated_sensor.json + - category.json + - ego_pose.json + - instance.json + - log.json + - map.json + - sample.json + - sample_annotation.json + - sample_data.json + - scene.json + - sensor.json + - visibility.json + - object_ann.json (If 2D annotation exists) + - surface_ann.json (If 2D annotation exists) + - data/ + - /LIDAR_CONCAT + - 0.pcd.bin (point cloud [x, y, z, intensity, ring_idx]) + - 1.pcd.bin + - {frame_id}.pcd.bin + - /CAM_FRONT (If exists) + - 0.png + - 1.png + - {frame_id}.png + - /CAM_FRONT_LEFT (If exists) + - /CAM_FRONT_RIGHT (If exists) + - /CAM_BACK (If exists) + - /CAM_BACK_LEFT (If exists) + - /CAM_BACK_RIGHT (If exists) + - /CAM_TRAFFIC_LIGHT_NEAR (If exists) + - /CAM_TRAFFIC_LIGHT_FAR (If exists) + - input_bag/ + - {rosbag-name}.db3 + - metadata.yaml + +## coordinate system + +![coordinate_transformation.drawio.svg](coordinate_transformation.drawio.svg) + +## Data + +### Point cloud format + +- directory + +```yaml +- /LIDAR_TOP + - 0.pcd.bin (point cloud [x, y, z, intensity, ring_idx]) + - 1.pcd.bin + - {frame_id}.pcd.bin +``` + +`.pcd.bin` files are originally `numpy.ndarray` `(N, 5)`. + +Each record means `[x, y, z, intensity, ring_idx(=-1)]`. **x, y, z are given with respect to its base_link** + +`ring_idx` is not used any more. + +`.pcd.bin` data should be loaded as follows: + +```python +# shape (N*5, ) +pcd_data = np.fromfile(path_to_pcd_bin, dtype=np.float32) +# shape (N, 5) +pcd_data = pcd_data.reshape(-1, 5) +``` + +### Image data + +- Make data directory from each camera + - data: png or jpeg format +- For example, aip_xx1 have 8 cameras + - Camera for dynamic object recognition (front, front left, front right, back, back left, back right) + - Camera for traffic light recognition (near, far) + +```yaml +- /data + - /CAM_FRONT + - 0.png + - 1.png + - {frame_id}.png + - /CAM_FRONT_LEFT + - /CAM_FRONT_RIGHT + - /CAM_BACK + - /CAM_BACK_LEFT + - /CAM_BACK_RIGHT + - /CAM_TRAFFIC_LIGHT_NEAR + - /CAM_TRAFFIC_LIGHT_FAR +``` + +### Map data + +- Push lanelet2_map.osm (vector map) and pointcloud_map.pcd (pointcloud map) to dataset server + - Use with rosbag evaluation + +```yaml +- maps/ + - lanelet2_map.osm + - pointcloud_map.pcd +``` + +- (In the future) In data augmentation layer, rasterize vector map to use train 3d detection model + +### status.json + +- status.json is under construction +- Data(WIP) + - Environment data at collecting rosbag + - time + - place + - weather + - Annotation + - annotation person + - tool or outsourcing + +### Input bag + +- input_bag is the ros2 bag file generates this whole dataset. +- This ros2 bag file MUST contain only topics below + - `/localization/kinematic_state` is necessary for Obstacle Segmentation Evaluation. + +```text +/sensing/camera/camera{CAMERA_ID}/image_rect_color/compressed +/sensing/camera/camera{CAMERA_ID}/camera_info +/sensing/gnss/{GNSS_VENDOR}/fix_velocity +/sensing/gnss/{GNSS_VENDOR}/nav_sat_fix +/sensing/gnss/{GNSS_VENDOR}/navpvt +/sensing/imu/{IMU_VENDOR}/imu_raw +/sensing/lidar/{LIDAR_POSITION}/{LIDAR_VENDOR}_packets +/sensing/lidar/concatenated/pointcloud +/sensing/radar/{RADAR_POSITION}/objects_raw +/{VEHICLE_NAME}/from_can_bus +/tf +/tf_static +/localization/kinematic_state +/vehicle/status/velocity_status +``` + +## 3d detection annotation format + +- Almost equivalent to the [nuScenes format](https://www.nuscenes.org/nuscenes#data-format). +- The [official tutorial notebook](https://www.nuscenes.org/nuscenes#tutorials) will help you get familiar with it. + +![nuscenes-schema.svg](nuscenes-schema.svg) from [the official site](https://www.nuscenes.org/nuscenes#data-format). + +### Overview on availability of json files + +| File | Availability\* | +| :--------------------- | -------------------: | +| attribute.json | completely | +| calibrated_sensor.json | completely + added | +| category.json | partially | +| ego_pose.json | completely + changed | +| instance.json | completely | +| log.json | dummy file | +| map.json | dummy file | +| sample.json | completely | +| sample_annotation.json | partially | +| sample_data.json | partially | +| scene.json | partially | +| sensor.json | completely | +| visibility.json | completely | + +Availability: + +- completely: the same format as that of nuScenes +- partially: lacks of one or more items compared to nuScenes +- added: one or more items are newly added +- changed: one or more items are changed +- dummy file: the file exists, but the contents are empty. + +### attribute.json + +#### Description + +An attribute is a property of an instance that can change while the category remains the same. +e.g.: + +- pedestrian + - sitting_lying_down + - standing + - moving + +#### Items + +- attribute + - "token": -- Unique record identifier. + - "name": -- Attribute name. + - "description": -- Attribute description. + +### calibrated_sensor.json + +#### Description + +Definition of a particular sensor (lidar, camera) as calibrated on a particular vehicle. All extrinsic parameters are given with respect to ~~the ego vehicle body frame~~ the world origin. Although the distortion parameters (k1, k2, p1, p2, k3) are given, but +its calibration is not done. So, if cameras used in collecting the data have any distortion, it is responsible for you to consider the parameters. + +#### Caution + +- Currently, this converter does not consider the calibration of distortion. + Camera distortion parameters (k1, k2, p1, p2, k3) are temporarily stored in calibrated_sensor.json. + **As long as there is no distortion, there is no problem. If not, it does not work correctly for now.** +- While all extrinsic parameters are given with respect to the ego vehicle body frame in the original nuScenes dataset, they are given with respect to **the world coordinate** in this format for now because the information about ego pose is not available. +- The translation and rotation in a record of LiDAR data are equals to those of the base_link. + +#### Items + +- calibrated_sensor + - "token": [str] -- Unique record identifier. + - "sensor_token": [str] -- Foreign key pointing to the sensor type. + - "translation": [float] [3] -- Coordinate system origin in meters: (x, y, z). + - "rotation": [float] [4] -- Coordinate system orientation as - quaternion: (w, x, y, z). + - "camera_intrinsic": [float] [3, 3] -- Intrinsic camera calibration. Empty list `[]` for sensors other than cameras. + - "camera_distortion": [float] [5] -- Distortion parameters (k1, k2, p1, p2, k3) Empty list '[]' for sensors other than cameras. **(Added)** + +### category.json + +#### Description + +Taxonomy of object categories (e.g. vehicle.truck, vehicle.car, pedestrian.adult). + +#### Caution + +The item "description" for the category is not implemented for now. + +#### Items + +- category + - "token": [str] -- Unique record identifier. + - "name": [str] -- Category name. The latest format is "class" (e.g. car, truck), but DBv1.0 and DBv2.0 use "category.class" format (e.g. vehicle.car). + - "description": [str] -- Category description. Empty string `""` for now. **(Not available)** + +### ego_pose.json + +#### Description + +> Ego vehicle poses at a particular timestamp. Given with respect to the global coordinate system of the log's map. The ego_pose is the output of a lidar map-based localization algorithm described in [our paper](https://arxiv.org/abs/1903.11027). + +#### Items + +- ego_pose + - "token": [str] -- Unique record identifier. + - "translation": [float] [3] -- Coordinate system origin in meters: x, y, z. ~~Note that z is always 0~~. **(changed)** + - "rotation": [float] [4] -- Coordinate system orientation as - quaternion: w, x, y, z. + - "timestamp": [int] -- Unix time stamp (μ sec). + +#### Future extension + +- We plan to extend the twist data + - make when rosbag to Tier4 dataset +- ego_pose + - "token": [str] -- Unique record identifier. + - "translation": [float] [3] -- Coordinate system origin in meters: x, y, z. ~~Note that z is always 0.~~ + - "rotation": [float] [4] -- Coordinate system orientation as - quaternion: w, x, y, z. + - "twist": [float] [6] -- Coordinate system origin in m/s and deg/s : vel_x, vel_y, vel_z, yaw_rate, pitch_rate, roll_rate. + - "accel": [float] [2] -- Coordinate system origin in m/s^2 : long_accel, lat_accel. + - "timestamp": [int] -- Unix time stamp (μ sec). + +### instance.json + +#### Description + +A particular object instance. This table is an enumeration of all object instances we observed. Note that instances are not tracked across scenes (i.e., even if A == B, instance A in scene01 and instanceB in scene02 can be treated as different instances.) + +e.g.: + +- Frame: t + - Category: Car + - car01 (instance) + - car02 (instance) + - car03 (instance) +- Frame: t+1 + - Category: Car + - car01 (instance) + - car02 (instance) + - ~~car03 (instance)~~ (not appears because car03 went out of sight) + +#### Items + +- instance + - "token": [str] -- Unique record identifier. + - "category_token": [str] -- Foreign key pointing to the category of the instance. + - "instance_name": [str] -- Dataset Name + Instance ID defined in annotation tool. + - "nbr_annotations": [int] -- Number of annotations of this instance. + - "first_annotation_token": [str] -- Foreign key. Points to the first annotation of this instance. + - "last_annotation_token": [str] -- Foreign key. Points to the last annotation of - this instance. + +### log.json + +#### Description + +Log information on the data from which the data was extracted. + +#### Caution + +logfile, vehicle, date_captured, location is not provided for now. +This information, or the same kind of one, will be stored in collecting_collection.yml. + +#### Items + +- log + - "token": [str] -- Unique record identifier. + - "logfile": [str] -- path to logfile. **(Not available)** + - "vehicle": [str] -- Vehicle name e.g., `jpn-taxi`. **(Not available)** + - "date_captured": [str] -- Date (YEAR-MONTH-DAY-HOUR-MIN-SEC). e.g. `2020-08-29-09-30-13` **(Not available)** + - "location": [str] -- Area where log was captured. **(Not available)** + +### map.json + +- Now map.json is **Dummy File** + +#### Caution + +map.json or map data currently is not output. Since map.json and map data will be used in tasks like tracking and prediction, it will be implemented shortly. + +Items related to maps can be extracted from data submitted to Scale_AI for annotation. How to update these processes is **under consideration**. + +#### Description + +> Map data that is stored as binary semantic masks from a top-down view. +> + +#### Items + +- map + - "token": [str] -- Unique record identifier. + - "log_tokens": [str] [n] -- Foreign keys. + - "category": [str] -- Map category, currently only semantic_prior for drivable surface and sidewalk. + - "filename": [str] -- Relative path to the file with the map mask. + +### sample.json + +#### Description + +A sample is an annotated keyframe at 2 Hz. The timestamp of a sample is the same as that of a LiDAR sample data. + +#### Items + +- sample + - "token": [str] -- Unique record identifier. + - "timestamp": [int] -- Unix time stamp (μ sec). (= 'timestamp' in a LiDAR sample data) + - "scene_token": [str] -- Foreign key pointing to the scene. + - "next": [str] -- Foreign key. Sample that follows this in time. Empty if end of scene. + - "prev": [str] -- Foreign key. Sample that precedes this in time. Empty if start of scene. + +### sample_annotation.json + +#### Description + +An annotation for objects in a sample. All location data is given with respect to **the global coordinate system**. + +#### Items + +- sample_annotation + - "token": [str] -- Unique record identifier. + - "sample_token": [str] -- Foreign key. NOTE: this points to a sample NOT a sample_data since annotations are done on the sample level taking all relevant sample_data into account. + - "instance_token": [str] -- Foreign key. Which object instance is this annotating. An instance can have multiple annotations over time. + - "attribute_tokens": [str] [n] -- Foreign keys. List of attributes for this annotation. Attributes can change over time. + - "visibility_token": [str] -- Foreign key. Visibility may also change over time. If no visibility is annotated, the token is an empty string. + - "translation": [float] [3] -- The center location of bounding box in meters as (center_x, center_y, center_z). + - "size": [float] [3] -- Bounding box size in meters as (width, length, height). + - "rotation": [float] [4] -- Bounding box orientation as quaternion: w, x, y, z. + - "num_lidar_pts": [int] -- Number of lidar points in this box. Points are counted during the lidar sweep identified with this sample. + - "num_radar_pts": [int] -- Number of radar points in this box. Points are counted during the radar sweep identified with this sample. This number is summed across all radar sensors without any invalid point filtering. **(fixed to `0`)** + - "next": [str] -- Foreign key. Sample annotation from the same object instance that follows this in time. Empty string `""`if this is the last annotation for this object. + - "prev": [str] -- Foreign key. Sample annotation from the same object instance that precedes this in time. Empty string `""` if this is the first annotation for this object. + +### sample_data.json + +#### Description + +A sensor data e.g. image, point cloud or radar return. For sample_data with is_key_frame=True, the time-stamps should be very close to the sample it points to. For non key-frames the sample_data points to the sample that follows closest in time. + +#### Caution + +- 'is_key_frame': For sample_data with is_key_frame=True, the time-stamps should be very close to the sample it points to. For non key-frames the sample_data points to the sample that follows closest in time. + - this feature is not yet implemented. + +#### Items + +- sample_data + - "token": [str] -- Unique record identifier. + - "sample_token": [str] -- Foreign key. Sample to which this sample_data is associated. + - "ego_pose_token" : [str] -- Foreign key. + - "calibrated_sensor_token": [str] -- Foreign key. + - "filename": [str] -- Relative path from a dataset root directory to a file. + - "fileformat": [str] -- Data file format. (e.g., png, pcd.bin) + - "width": [int] -- If the sample data is an image, this is the image width in pixels. + - "height": [int] -- If the sample data is an image, this is the image height in pixels. + - "timestamp": [int] -- Unix time stamp (μ sec). + - "is_key_frame" : [bool] -- True if sample_data is part of key_frame, else False. + - "next": [str] -- Foreign key. Sample data from the same sensor that follows this in time. Empty if end of scene. + - "prev": [str] -- Foreign key. Sample data from the same sensor that precedes this in time. Empty if start of scene. + +### scene.json + +#### Description + +A scene is a sequence of consecutive frames extracted from a log. In TIER IV format, only one scene is included in a single dataset. +(scene > sample(=frame) > annotation) + +#### Caution + +- name: scene name is defined as "{PROJECT_NAME}\_{SCENE_TOKEN}". If the scene name is provided by other files, it should be replaced. +- description: not provided for now. + +#### Items + +- scene + - "token": [str] -- Unique record identifier. + - "name" : [str] -- Short string identifier. Defined as `{project_name}_{scene_token}`. + - "description": [str] -- description of the scene. **(Not available)** + - "log_token": [str] -- Foreign key. Points to log from where the data was extracted. + - "nbr_samples": [int] -- Number of samples in this scene. + - "first_sample_token": [str] -- Foreign key. Points to the first sample in scene. + - "last_sample_token": [str] -- Foreign key. Points to the last sample in scene. + +### sensor.json + +No change here. + +#### Description + +A description of sensor types. + +#### Items + +- sensor + - "token": [str] -- Unique record identifier. + - "channel": [str] -- Sensor channel name. + - "modality": [str] {camera, lidar, radar} -- Sensor modality. + +### visibility.json + +A description of sensor occlusion status. + +#### Description + +The visibility of an instance is the fraction of annotation visible in all 6 images. +Default visibility binned into 4 bins: + +- 1: 0% ~ 40% +- 2: 40% ~ 60% +- 3: 60% ~ 80% +- 4: 80% ~ 100% + +For T4 annotated dataset, visibility is classified into 4 bins below: + +- 1: full: "No occlusion of the object." +- 2: most: "Object is occluded, but by less than 50%." +- 3: partial: "The object is occluded by more than 50% (but not completely)." +- 4: none: "The object is 90-100% occluded and no points/pixels are visible in the label." + +**Note that this annotation is not sensitive to boundaries.** + +#### Items + +- visibility + - "token": [str] -- Unique record identifier. + - "level": [str] -- Visibility level. + - "description": [str] -- Description of visibility level. + +### Reference + +- nuScenes: + +## 2d detection annotation format with what differs from nuImages (Under construction) + +```yaml +- annotation/ + - object_ann.json + - surface_ann.json +``` + +### nuImages format + +- **_Almost_** Equivalent to the [nuImages format](https://www.nuscenes.org/nuimages). + - Annotation attribute may be updated frequently +- object_ann.json: The annotation of a foreground object (car, bike, pedestrian) in an image. Each foreground object is annotated with a 2d box, a 2d instance mask and category-specific attributes. + +```json +[ + { + "token": -- Unique record identifier. + "sample_data_token": -- Foreign key pointing to the sample data, which must be a keyframe image. + "instance_token": -- Foreign key. Which object instance is this annotating. This token includes same token in corresponding sample_annotation.json. + "category_token": -- Foreign key pointing to the object category. + "attribute_tokens": [n] -- Foreign keys. List of attributes for this annotation. + "bbox": [4] -- Annotated amodal bounding box. Given as [xmin, ymin, xmax, ymax]. + "mask": -- Run length encoding of instance mask using the pycocotools package. + } +] +``` + +- surface_ann.json: The annotation of a background object (driveable surface) in an image. Each background object is annotated with a 2d semantic segmentation mask. + +```json +[ + { + "token": -- Unique record identifier. + "sample_data_token": -- Foreign key pointing to the sample data, which must be a keyframe image. + "category_token": -- Foreign key pointing to the surface category. + "mask": -- Run length encoding of segmentation mask using the pycocotools package. + } +] +``` diff --git a/docs/tools_overview.md b/docs/tools_overview.md new file mode 100644 index 00000000..1b0e8030 --- /dev/null +++ b/docs/tools_overview.md @@ -0,0 +1,110 @@ +# Tools Overview + +This document is simply written about the script. + +![data_collection_conversion](data_collection_conversion.drawio.svg) + +## Common + +Those commands below are asuumed to be run in poetry shell built in [README.md](../README.md) +Run this to start a poetry shell. + +```bash +source /opt/ros/${ROS_DISTRO}/setup.bash +source ${ROS_WORKSPACE_WITH_CUSTOM_MESSAGES}/install/setup.bash +poetry shell +``` + +## rosbag2 to T4 non-annotated format data + +input: rosbag2 + +output: T4 non-annotated format data + +```bash +python -m perception_dataset.convert --config config/convert_rosbag2_to_non_annotated_t4_sample.yaml +# if you want to overwrite t4-format data, use --overwrite option +``` + +## Deepen + +### T4 format to Deepen format + +input: T4 format data + +output: deepen-format data + +```bash +python -m perception_dataset.convert --config config/convert_t4_to_deepen_sample.yaml +``` + +### Download Deepen annotations + +`DEEPEN_CLIENT_ID` is the `xxx` part of the URL `https://tools.deepen.ai/workspace/xxx/datasets` after logging in to Deepen. +`DEEPEN_ACCESS_TOKEN` can be obtained from [Deepen Tools](https://tools.deepen.ai/workspace/xxx/developer/tokens/developers). + +```bash +export DEEPEN_CLIENT_ID='YOUR_DEEPEN_CLIENT_ID' +export DEEPEN_ACCESS_TOKEN='YOUR_DEEPEN_ACCESS_TOKEN' +python -m perception_dataset.deepen.download_annotations --config config/convert_deepen_to_t4_sample.yaml +``` + +### Deepen format to T4 format + +input: T4 non-annotated format data + deepen annotations + +output: T4 format data + +```bash +python -m perception_dataset.convert --config config/convert_deepen_to_t4_sample.yaml +``` + +## Rosbag with objects + +### Synthetic bag to T4 format + +see [About Synthetic Data](about_synthetic_data.md) + +input: rosbag2 + +output: T4 format data + +#### Messages + +| Topic Name | Required | Message Type | +| ----------------------------------------------------------- | -------- | ------------------------------------------------- | +| `/ground_truth/filtered/objects` or `/ground_truth/objects` | o | `autoware_perception_msgs/msg/DynamicObjectArray` | +| `/sensing/lidar/concatenated/pointcloud` | o | `sensor_msgs/msg/PointCloud2` | +| `/tf` | o | `tf2_msgs/msg/TFMessage` | +| `/tf_static` | o | `tf2_msgs/msg/TFMessage` | +| | | `sensor_msgs/msg/CompressedImage` | +| | | `sensor_msgs/msg/CameraInfo` | + +#### script + +```bash +python -m perception_dataset.convert --config config/rosbag2_to_t4/convert_synthetic_data.yaml +``` + +### Pseudo-labeled bag to T4 format + +input: rosbag2 + +output: T4 format data + +#### Messages + +| Topic Name | Required | Message Type | +| ---------------------------------------------------------------------------- | -------- | --------------------------------------------------------------------------------------------------------- | +| `/perception/object_recognition/detection/apollo/objects` or other any value | o | `autoware_auto_perception_msgs/msg/TrackedObjects` or `autoware_auto_perception_msgs/msg/DetectedObjects` | +| `/sensing/lidar/concatenated/pointcloud` or other any value | o | `sensor_msgs/msg/PointCloud2` | +| `/tf` | o | `tf2_msgs/msg/TFMessage` | +| `/tf_static` | o | `tf2_msgs/msg/TFMessage` | +| `/sensing/camera/camera{ID}/image_rect_color/compressed` | | `sensor_msgs/msg/CompressedImage` | +| `/sensing/camera/camera{ID}/camera_info` | | `sensor_msgs/msg/CameraInfo` | + +#### script + +```bash +python -m perception_dataset.convert --config config/rosbag2_to_t4/convert_synthetic_data.yaml +``` diff --git a/makefile b/makefile new file mode 100644 index 00000000..1c35031e --- /dev/null +++ b/makefile @@ -0,0 +1,15 @@ +.PHONY: install_prettier +install_prettier: + npm install + +.PHONY: format_md +format_md: install_prettier + npx prettier --write . + +.PHONY: req +req: + pipenv lock -r > requirements.txt + +.PHONY: req_test +req_test: + pipenv lock -r -d > requirements_test.txt diff --git a/perception_dataset/__init__.py b/perception_dataset/__init__.py new file mode 100644 index 00000000..c5af7d7d --- /dev/null +++ b/perception_dataset/__init__.py @@ -0,0 +1,3 @@ +# flake8: noqa + +from perception_dataset import deepen, rosbag2, t4_dataset diff --git a/perception_dataset/abstract_converter.py b/perception_dataset/abstract_converter.py new file mode 100644 index 00000000..5b42d6ff --- /dev/null +++ b/perception_dataset/abstract_converter.py @@ -0,0 +1,16 @@ +from abc import ABCMeta, abstractmethod + + +class AbstractConverter(object, metaclass=ABCMeta): + def __init__( + self, + input_base: str, + output_base: str, + ): + super().__init__() + self._input_base = input_base + self._output_base = output_base + + @abstractmethod + def convert(self): + raise NotImplementedError() diff --git a/perception_dataset/abstract_converter_to_t4.py b/perception_dataset/abstract_converter_to_t4.py new file mode 100644 index 00000000..674fc9ae --- /dev/null +++ b/perception_dataset/abstract_converter_to_t4.py @@ -0,0 +1,17 @@ +from abc import ABCMeta, abstractmethod +from typing import Tuple + + +class AbstractAnnotatedToT4Converter(object, metaclass=ABCMeta): + def __init__( + self, + input_base: str, + output_base: str, + ) -> None: + super().__init__() + self._input_base = input_base + self._output_base = output_base + + @abstractmethod + def convert(self) -> Tuple[str, str]: + raise NotImplementedError() diff --git a/perception_dataset/configurations.py b/perception_dataset/configurations.py new file mode 100644 index 00000000..803360e9 --- /dev/null +++ b/perception_dataset/configurations.py @@ -0,0 +1,9 @@ +import datetime +import os + + +class Configurations(object): + log_format = os.getenv("LOG_FORMAT", "text") + log_level = os.getenv("LOG_LEVEL", "INFO") + log_file_path = os.getenv("LOG_FILE_PATH", f"/tmp/log/{datetime.date.today()}.log") + slack_token = os.getenv("SLACK_TOKEN", None) diff --git a/perception_dataset/constants.py b/perception_dataset/constants.py new file mode 100644 index 00000000..bbe1ac5c --- /dev/null +++ b/perception_dataset/constants.py @@ -0,0 +1,125 @@ +from enum import Enum +from typing import Dict, List + + +class T4_FORMAT_DIRECTORY_NAME(Enum): + ANNOTATION = "annotation" + DATA = "data" + + +class SENSOR_MODALITY_ENUM(Enum): + LIDAR = "lidar" + CAMERA = "camera" + RADAR = "radar" + + +class SENSOR_ENUM(Enum): + CAM_BACK_LEFT = { + "channel": "CAM_BACK_LEFT", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_FRONT = { + "channel": "CAM_FRONT", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_FRONT_RIGHT = { + "channel": "CAM_FRONT_RIGHT", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_BACK_RIGHT = { + "channel": "CAM_BACK_RIGHT", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_BACK = { + "channel": "CAM_BACK", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_FRONT_LEFT = { + "channel": "CAM_FRONT_LEFT", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_TRAFFIC_LIGHT_NEAR = { + "channel": "CAM_TRAFFIC_LIGHT_NEAR", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + CAM_TRAFFIC_LIGHT_FAR = { + "channel": "CAM_TRAFFIC_LIGHT_FAR", + "modality": SENSOR_MODALITY_ENUM.CAMERA.value, + } + LIDAR_TOP = { + "channel": "LIDAR_TOP", + "modality": SENSOR_MODALITY_ENUM.LIDAR.value, + } + LIDAR_CONCAT = { + "channel": "LIDAR_CONCAT", + "modality": SENSOR_MODALITY_ENUM.LIDAR.value, + } + RADAR_FRONT = { + "channel": "RADAR_FRONT", + "modality": SENSOR_MODALITY_ENUM.RADAR.value, + } + RADAR_FRONT_RIGHT = { + "channel": "RADAR_FRONT_RIGHT", + "modality": SENSOR_MODALITY_ENUM.RADAR.value, + } + RADAR_FRONT_LEFT = { + "channel": "RADAR_FRONT_LEFT", + "modality": SENSOR_MODALITY_ENUM.RADAR.value, + } + RADAR_BACK_LEFT = { + "channel": "RADAR_BACK_LEFT", + "modality": SENSOR_MODALITY_ENUM.RADAR.value, + } + RADAR_BACK_RIGHT = { + "channel": "RADAR_BACK_RIGHT", + "modality": SENSOR_MODALITY_ENUM.RADAR.value, + } + + @staticmethod + def has_value(item) -> bool: + return item in [v.value for v in SENSOR_ENUM.__members__.values()] + + @staticmethod + def has_channel(item) -> bool: + return item in [v.value["channel"] for v in SENSOR_ENUM.__members__.values()] + + @staticmethod + def values() -> List[Dict[str, str]]: + return [v.value for v in SENSOR_ENUM.__members__.values()] + + def get_sensor_modality(sensor_channel: str) -> str: + for sensor in SENSOR_ENUM.__members__.values(): + if sensor.value["channel"] == sensor_channel: + return sensor.value["modality"] + raise ValueError(f"No sensor_channel {sensor_channel}") + + +class EXTENSION_ENUM(Enum): + JPG = ".jpg" + XML = ".xml" + JSON = ".json" + PNG = ".png" + TXT = ".txt" + CSV = ".csv" + ONNX = ".onnx" + PCD = ".pcd" + BIN = ".bin" + PCDBIN = ".pcd.bin" + + @staticmethod + def has_value(item) -> bool: + return item in [v.value for v in EXTENSION_ENUM.__members__.values()] + + @staticmethod + def values() -> List[str]: + return [v.value for v in EXTENSION_ENUM.__members__.values()] + + +def constant(f): + def fset(self, value): + raise TypeError + + def fget(self): + return f() + + return property(fget, fset) diff --git a/perception_dataset/convert.py b/perception_dataset/convert.py new file mode 100644 index 00000000..a5dec04f --- /dev/null +++ b/perception_dataset/convert.py @@ -0,0 +1,131 @@ +import argparse + +import yaml + +from perception_dataset.rosbag2.converter_params import Rosbag2ConverterParams +from perception_dataset.utils.logger import configure_logger + +logger = configure_logger(modname=__name__) + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + help="path to config file", + ) + parser.add_argument( + "--overwrite", + action="store_true", + help="overwrite files if exist in output directory", + ) + parser.add_argument( + "--without_compress", + action="store_true", + help="do NOT compress rosbag/non-annotated-t4", + ) + args = parser.parse_args() + + with open(args.config, "r") as f: + config_dict = yaml.safe_load(f) + + task = config_dict["task"] + if task == "convert_rosbag2_to_non_annotated_t4": + from perception_dataset.rosbag2.rosbag2_to_non_annotated_t4_converter import ( + Rosbag2ToNonAnnotatedT4Converter, + ) + + param_args = { + "task": config_dict["task"], + "overwrite_mode": args.overwrite, + "without_compress": args.without_compress, + **config_dict["conversion"], + } + params = Rosbag2ConverterParams(**param_args) + converter = Rosbag2ToNonAnnotatedT4Converter(params) + logger.info( + f"[BEGIN] Converting Rosbag2 ({params.input_base}) to Non Annotated T4 data ({params.output_base})" + ) + converter.convert() + logger.info( + f"[END] Converting Rosbag2 ({params.input_base}) to Non Annotated T4 data ({params.output_base})" + ) + elif task == "convert_t4_to_deepen": + from perception_dataset.deepen.non_annotated_t4_to_deepen_converter import ( + NonAnnotatedT4ToDeepenConverter, + ) + + input_base = config_dict["conversion"]["input_base"] + output_base = config_dict["conversion"]["output_base"] + camera_sensors = config_dict["conversion"]["camera_sensors"] + annotation_hz = config_dict["conversion"]["annotation_hz"] + workers_number = config_dict["conversion"]["workers_number"] + + converter = NonAnnotatedT4ToDeepenConverter( + input_base=input_base, + output_base=output_base, + camera_sensors=camera_sensors, + annotation_hz=annotation_hz, + workers_number=workers_number, + ) + + logger.info( + f"[BEGIN] Converting T4 dataset ({input_base}) to deepen format dataset ({output_base})" + ) + converter.convert() + logger.info( + f"[Done] Converting T4 dataset ({input_base}) to deepen format dataset ({output_base})" + ) + elif task == "convert_deepen_to_t4": + from perception_dataset.deepen.deepen_to_t4_converter import DeepenToT4Converter + + input_base = config_dict["conversion"]["input_base"] + input_anno_file = config_dict["conversion"]["input_anno_file"] + output_base = config_dict["conversion"]["output_base"] + dataset_corresponding = config_dict["conversion"]["dataset_corresponding"] + description = config_dict["description"] + input_bag_base = config_dict["conversion"]["input_bag_base"] + topic_list_yaml_path = config_dict["conversion"]["topic_list"] + ignore_interpolate_label = False + if "ignore_interpolate_label" in config_dict["conversion"]: + ignore_interpolate_label = config_dict["conversion"]["ignore_interpolate_label"] + with open(topic_list_yaml_path) as f: + topic_list_yaml = yaml.safe_load(f) + + converter = DeepenToT4Converter( + input_base=input_base, + output_base=output_base, + input_anno_file=input_anno_file, + dataset_corresponding=dataset_corresponding, + overwrite_mode=args.overwrite, + description=description, + input_bag_base=input_bag_base, + topic_list=topic_list_yaml, + ignore_interpolate_label=ignore_interpolate_label, + ) + + logger.info(f"[BEGIN] Converting Deepen data ({input_base}) to T4 data ({output_base})") + converter.convert() + logger.info(f"[END] Converting Deepen data ({input_base}) to T4 data ({output_base})") + elif task == "convert_rosbag2_to_t4": + from perception_dataset.rosbag2.rosbag2_to_t4_converter import Rosbag2ToT4Converter + + param_args = { + "task": config_dict["task"], + "overwrite_mode": args.overwrite, + **config_dict["conversion"], + } + converter_params = Rosbag2ConverterParams(**param_args) + converter = Rosbag2ToT4Converter(converter_params) + + logger.info("[BEGIN] Converting ros2bag output by simulator --> T4 Format Data") + converter.convert() + logger.info("[END] Conversion Completed") + + else: + raise NotImplementedError() + + +if __name__ == "__main__": + main() diff --git a/perception_dataset/deepen/__init__.py b/perception_dataset/deepen/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/deepen/deepen_to_t4_converter.py b/perception_dataset/deepen/deepen_to_t4_converter.py new file mode 100644 index 00000000..707bbf4b --- /dev/null +++ b/perception_dataset/deepen/deepen_to_t4_converter.py @@ -0,0 +1,271 @@ +from collections import defaultdict +import json +import os.path as osp +import shutil +from typing import Any, Dict, List + +from nuscenes.nuscenes import NuScenes + +from perception_dataset.abstract_converter import AbstractConverter +from perception_dataset.rosbag2.rosbag2_converter import Rosbag2Converter +from perception_dataset.t4_dataset.annotation_files_generator import AnnotationFilesGenerator +from perception_dataset.utils.logger import configure_logger +import perception_dataset.utils.misc as misc_utils + +logger = configure_logger(modname=__name__) + + +class DeepenToT4Converter(AbstractConverter): + def __init__( + self, + input_base: str, + output_base: str, + input_anno_file: str, + dataset_corresponding: Dict[str, str], + overwrite_mode: bool, + description: Dict[str, Dict[str, str]], + input_bag_base: str, + topic_list: Dict[str, List[str]], + t4_dataset_dir_name: str = "t4_dataset", + ignore_interpolate_label: bool = False, + ): + super().__init__(input_base, output_base) + + self._input_anno_file: str = input_anno_file + self._t4data_name_to_deepen_dataset_id: Dict[str, str] = dataset_corresponding + self._overwrite_mode: bool = overwrite_mode + self._description: Dict[str, Dict[str, str]] = description + self._input_bag_base: str = input_bag_base + self._t4_dataset_dir_name: str = t4_dataset_dir_name + self._start_sec: float = 0 + self._end_sec: float = 1e10 + self._ignore_interpolate_label: bool = ignore_interpolate_label + + if "topic_list" in topic_list: + allow_topics = topic_list["topic_list"] + elif isinstance(topic_list, list): + allow_topics = topic_list + else: + allow_topics = [] + mandatory_topics = ( + topic_list["mandatory_topic_list"] if "mandatory_topic_list" in topic_list else [] + ) + mandatory_topics = [] if mandatory_topics is None else mandatory_topics + + self._topic_list: List[str] = allow_topics + self._mandatory_topics: List[str] = mandatory_topics + + def convert(self): + with open(self._input_anno_file) as f: + deepen_anno_json = json.load(f) + + scenes_anno_dict: Dict[str, Dict[str, Any]] = self._format_deepen_annotation( + deepen_anno_json["labels"] + ) + + for t4data_name in self._t4data_name_to_deepen_dataset_id: + is_dir_exist: bool = False + output_dir = osp.join(self._output_base, t4data_name, self._t4_dataset_dir_name) + input_dir = osp.join(self._input_base, t4data_name) + if self._input_bag_base is not None: + input_bag_dir = osp.join(self._input_bag_base, t4data_name) + if osp.exists(output_dir): + logger.error(f"{output_dir} already exists.") + is_dir_exist = True + + if self._overwrite_mode or not is_dir_exist: + shutil.rmtree(output_dir, ignore_errors=True) + self._copy_data(input_dir, output_dir) + if self._input_bag_base is not None: + self._find_start_end_time(input_dir) + self._make_rosbag(input_bag_dir, output_dir) + else: + raise ValueError("If you want to overwrite files, use --overwrite option.") + + for t4data_name, dataset_id in self._t4data_name_to_deepen_dataset_id.items(): + output_dir = osp.join(self._output_base, t4data_name, self._t4_dataset_dir_name) + input_dir = osp.join(self._input_base, t4data_name) + annotation_files_generator = AnnotationFilesGenerator(description=self._description) + annotation_files_generator.convert_one_scene( + input_dir=input_dir, + output_dir=output_dir, + scene_anno_dict=scenes_anno_dict[dataset_id], + dataset_name=t4data_name, + ) + + def _copy_data(self, input_dir: str, output_dir: str): + if input_dir != output_dir: + logger.info(f"Copying {input_dir} to {output_dir} ... ") + if osp.exists(output_dir): + shutil.rmtree(output_dir) + shutil.copytree(input_dir, output_dir) + logger.info("Done!") + + def _find_start_end_time(self, t4_dataset_dir): + nusc = NuScenes(version="annotation", dataroot=t4_dataset_dir, verbose=False) + end_nusc_timestamp = 0 + for frame_index, sample in enumerate(nusc.sample): + if frame_index == 0: + self._start_sec = ( + misc_utils.nusc_timestamp_to_unix_timestamp(sample["timestamp"]) - 2.0 + ) + if sample["timestamp"] > end_nusc_timestamp: + end_nusc_timestamp = sample["timestamp"] + self._end_sec = misc_utils.nusc_timestamp_to_unix_timestamp(end_nusc_timestamp) + 2.0 + + def _make_rosbag(self, input_bag_dir: str, output_dir: str): + logger.info(f"Copying {input_bag_dir} to {output_dir} ... ") + output_bag_dir_temp: str = osp.join(output_dir, osp.basename(input_bag_dir)) + output_bag_dir: str = osp.join(output_dir, "input_bag") + converter = Rosbag2Converter( + input_bag_dir, + output_bag_dir_temp, + self._topic_list, + self._start_sec, + self._end_sec, + self._mandatory_topics, + ) + converter.convert() + shutil.move(output_bag_dir_temp, output_bag_dir) + + def _convert_occulusion_to_visibility(self, name: str) -> str: + if name == "full": + return "none" + elif name == "partial": + return "most" + elif name == "most": + return "partial" + else: + return "full" + + def _format_deepen_annotation(self, label_dicts: List[Dict[str, Any]]): + """ + + e.g.: + [ + { + "dataset_id": "DOnC2vK05ojPr7qiqCsk2Ee7", + "file_id": "0.pcd", + "label_category_id": "car", + "label_id": "car:1", + "label_type": "3d_bbox", + "project_id": "defaultproject", + "stage_id": "QA", + "attributes": { + "state": "moving", + "occlusion": "none", + "cycle_state": "with_rider" + }, + "attributes_source": { + "state": "manual", + "occlusion": "manual", + "cycle_state": "manual" + }, + "create_time_millis": 1634623252175, + "label_set_id": "default", + "labeller_email": "grp-mlops-deepen3@tier4.jp", + "sensor_id": "lidar", + "three_d_bbox": { + "cx": 81526.54828555016, + "cy": 50383.480369180215, + "cz": 34.93298238813448, + "h": 1.5030299457129388, + "l": 4.895038637695593, + "w": 2.107137758889027, + "quaternion": { + "x": 0, + "y": 0, + "z": 0.7522213131298905, + "w": 0.6589105372303157 + } + }, + "update_time_millis": 1634623252175, + "user_id": "grp-mlops-deepen1@tier4.jp", + "version": 782 + }, + ] + + Args: + anno_path (str): path to the deepen annotation file + """ + anno_dict: Dict[str, Dict[int, List[Dict[str, Any]]]] = {} + for label_dict in label_dicts: + if ( + self._ignore_interpolate_label + and label_dict["labeller_email"] == "auto_interpolation" + ): + continue + dataset_id = label_dict["dataset_id"] + file_id = int(label_dict["file_id"].split(".")[0]) + + if dataset_id not in anno_dict: + anno_dict[dataset_id] = defaultdict(list) + + anno_label_category_id: str = label_dict["label_category_id"] + anno_label_id: str = label_dict["label_id"] + # in case the attributes is not set + if "attributes" not in label_dict: + anno_attributes = {} + else: + anno_attributes: Dict[str, str] = label_dict["attributes"] + if "Occlusion_State" in anno_attributes: + visibility: str = self._convert_occulusion_to_visibility( + anno_attributes["Occlusion_State"] + ) + elif "occlusion_state" in anno_attributes: + visibility: str = self._convert_occulusion_to_visibility( + anno_attributes["occlusion_state"] + ) + else: + visibility: str = "Not available" + label_t4_dict: Dict[str, Any] = { + "category_name": anno_label_category_id, + "instance_id": anno_label_id, + "attribute_names": [ + f"{name.lower()}.{state}" for name, state in anno_attributes.items() + ], + "visibility_name": visibility, + } + if label_dict["sensor_id"] == "lidar" or label_dict["label_type"] == "3d_bbox": + anno_three_d_bbox: Dict[str, str] = label_dict["three_d_bbox"] + label_t4_dict.update( + { + "three_d_box": { + "translation": { + "x": anno_three_d_bbox["cx"], + "y": anno_three_d_bbox["cy"], + "z": anno_three_d_bbox["cz"], + }, + "size": { + "width": anno_three_d_bbox["w"], + "length": anno_three_d_bbox["l"], + "height": anno_three_d_bbox["h"], + }, + "rotation": { + "w": anno_three_d_bbox["quaternion"]["w"], + "x": anno_three_d_bbox["quaternion"]["x"], + "y": anno_three_d_bbox["quaternion"]["y"], + "z": anno_three_d_bbox["quaternion"]["z"], + }, + }, + "num_lidar_pts": 0, + "num_radar_pts": 0, + } + ) + if label_dict["sensor_id"][:6] == "camera" or label_dict["label_type"] == "box": + anno_two_d_bbox: List = label_dict["box"] + label_t4_dict.update( + { + "two_d_box": [ + anno_two_d_bbox[0], + anno_two_d_bbox[1], + anno_two_d_bbox[0] + anno_two_d_bbox[2], + anno_two_d_bbox[1] + anno_two_d_bbox[3], + ], + "sensor_id": label_dict["sensor_id"][-1], + } + ) + + anno_dict[dataset_id][file_id].append(label_t4_dict) + + return anno_dict diff --git a/perception_dataset/deepen/download_annotations.py b/perception_dataset/deepen/download_annotations.py new file mode 100644 index 00000000..fca73274 --- /dev/null +++ b/perception_dataset/deepen/download_annotations.py @@ -0,0 +1,76 @@ +import argparse +from datetime import date +import json +import os +from typing import List + +import requests +import yaml + +CLIENT_ID = os.environ["DEEPEN_CLIENT_ID"] +ACCESS_TOKEN = os.environ["DEEPEN_ACCESS_TOKEN"] +DATSETS_URL = ( + f"https://tools.deepen.ai/api/v2/clients/{CLIENT_ID}/labels_of_dataset_ids?labelSetId=default" +) + +today = str(date.today()).replace("-", "") + + +# def get_dataset(): +# URL = f"https://tools.deepen.ai/api/v2/datasets/{DATASET_ID}/labels?filter_existing_categories=true&final=true&all=true" +# print(URL) + +# headers = { +# "Authorization": f"Bearer {os.environ['DEEPEN_ACCESS_TOKEN']}", +# } +# response = requests.get(URL, headers=headers) +# print(response.status_code) +# pprint(response.json()) + + +def get_datasets(dataset_ids: List[str], dataset_dir: str, output_name: str): + headers = { + "Authorization": f"Bearer {ACCESS_TOKEN}", + "Content-Type": "application/json", + } + data = {"dataset_ids": dataset_ids} + + try: + response = requests.post(DATSETS_URL, headers=headers, data=json.dumps(data)) + response.raise_for_status() + except requests.exceptions.RequestException as e: + raise SystemExit(e) + + output_file = os.path.join(dataset_dir, output_name) + os.makedirs(os.path.dirname(output_name), exist_ok=True) + with open(output_file, "w") as f: + json.dump(response.json(), f, indent=4) + + print(f"Annotation file is saved: {output_file}") + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument( + "--config", + type=str, + default="config/convert_deepen_to_t4.yaml", + ) + parser.add_argument( + "--output_dir", + type=str, + default=".", + help="the directory where the annotation file is saved.", + ) + args = parser.parse_args() + + with open(args.config) as f: + config = yaml.safe_load(f) + + assert ( + config["task"] == "convert_deepen_to_t4" + ), f"use config file of convert_deepen_to_t4 task: {config['task']}" + dataset_ids = list(config["conversion"]["dataset_corresponding"].values()) + output_name = config["conversion"]["input_anno_file"] + + get_datasets(dataset_ids, args.output_dir, output_name) diff --git a/perception_dataset/deepen/json_format.py b/perception_dataset/deepen/json_format.py new file mode 100644 index 00000000..d05c62eb --- /dev/null +++ b/perception_dataset/deepen/json_format.py @@ -0,0 +1,261 @@ +from abc import ABCMeta, abstractmethod +from collections import defaultdict +import json +import os +import os.path as osp +import shutil +from typing import Any, Dict, List + +from nptyping import NDArray +import numpy as np + +from perception_dataset.constants import EXTENSION_ENUM + + +class AbstractData(metaclass=ABCMeta): + @abstractmethod + def to_dict(self) -> Dict: + raise NotImplementedError() + + +class ImageData(AbstractData): + """ + + e.g.: + { + "fx": 1037.37598, + "fy": 1040.97986, + "cx": 742.10227, + "cy": 600.99113, + "timestamp": 1624164470.899887, + "image_url": "image/0_cam_0.png", + "position": { + "x": 81532.011296, + "y": 50369.700811, + "z": 36.520526 + }, + "heading": { + "x": -0.707906, + "y": -0.070829, + "z": 0.04981, + "w": 0.700979 + }, + "camera_model ": "pinhone", + "k1": 0.0, + "k2": 0.0, + "p1": 0.0, + "p2": 0.0, + "k3": 0.0, + "k4": 0.0, + } + + Args: + AbstractData ([type]): [description] + """ + + def __init__( + self, + frame_index: int, + channel: str, + fileformat: str, + unix_timestamp: float, + device_position: NDArray = None, + device_heading: NDArray = None, + camera_intrinsic_matrix: NDArray = None, + camera_model: str = "pinhole", + camera_distortion: NDArray = None, + ): + assert frame_index > -1, "frame_index must be positive" + assert fileformat in [EXTENSION_ENUM.JPG.value[1:], EXTENSION_ENUM.PNG.value[1:]] + assert camera_model in ["pinhole", "fisheye"] + + self._frame_index: int = frame_index + self._channel: str = channel + self._fileformat: str = fileformat + self._unix_timestamp: float = unix_timestamp + self._camera_model = camera_model + self._fx: float = 0.0 + self._fy: float = 0.0 + self._cx: float = 0.0 + self._cy: float = 0.0 + self._device_position: Dict[str, float] = defaultdict(float) + self._device_heading: Dict[str, float] = defaultdict(float) + self._k1: float = 0.0 + self._k2: float = 0.0 + self._p1: float = 0.0 + self._p2: float = 0.0 + self._k3: float = 0.0 + self._k4: float = 0.0 + + if device_position is not None: + self.add_device_position(device_position) + if device_heading is not None: + self.add_device_heading(device_heading) + if camera_intrinsic_matrix is not None: + self.add_intrinsic_calibration(camera_intrinsic_matrix) + if camera_distortion is not None: + self.add_camera_distortion(camera_distortion) + + @property + def filepath(self): + return osp.join("data", self._channel, f"{self._frame_index}.{self._fileformat}") + + def make_directory(self, output_path): + os.makedirs(osp.join(output_path, osp.dirname(self.filepath)), exist_ok=True) + + def save(self, image_path: str, output_dir: str): + shutil.copy( + image_path, + osp.join(output_dir, self.filepath), + ) + + def add_intrinsic_calibration(self, intrinsic_matrix: NDArray): + assert intrinsic_matrix.shape == (3, 3), "intrinsic_matrix must be the shape of (3, 3)" + intrinsic_matrix = intrinsic_matrix.astype(np.float32).tolist() + self._fx: float = intrinsic_matrix[0][0] + self._fy: float = intrinsic_matrix[1][1] + self._cx: float = intrinsic_matrix[0][2] + self._cy: float = intrinsic_matrix[1][2] + + def add_device_position(self, device_position: NDArray): + """ + Args: + device_position (NDArray): [x. y, z] + """ + assert device_position.shape == (3,), "device_position must be the shape of (3,)" + device_position = device_position.astype(np.float32).tolist() + self._device_position = { + "x": device_position[0], + "y": device_position[1], + "z": device_position[2], + } + + def add_device_heading(self, device_heading: NDArray): + """ + Args: + device_heading (NDArray): [w, x, y, z] + """ + assert device_heading.shape == (4,), "device_heading must be the shape of (4,)" + device_heading = device_heading.astype(np.float32).tolist() + self._device_heading = { + "w": device_heading[0], + "x": device_heading[1], + "y": device_heading[2], + "z": device_heading[3], + } + + def add_camera_distortion(self, camera_distortion: NDArray): + """ + Args: + camera_distortion (NDArray): [k1, k2, p1, p2, k3, k4] + """ + assert camera_distortion.shape == (6,), "camera_distortion must be the shape of (6,)" + camera_distortion = camera_distortion.astype(np.float32).tolist() + self._k1 = camera_distortion[0] + self._k2 = camera_distortion[1] + self._p1 = camera_distortion[2] + self._p2 = camera_distortion[3] + self._k3 = camera_distortion[4] + self._k4 = camera_distortion[5] + + def to_dict(self): + value = { + "fx": self._fx, + "fy": self._fy, + "cx": self._cx, + "cy": self._cy, + "timestamp": self._unix_timestamp, + "image_url": self.filepath, + "position": self._device_position, + "heading": self._device_heading, + "camera_model": self._camera_model, + "k1": self._k1, + "k2": self._k2, + "p1": self._p1, + "p2": self._p2, + "k3": self._k3, + "k4": self._k4, + } + return value + + +class ConfigData(AbstractData): + """ + + Directory structure + data_root + |- {frame_index}.json| + |- data + |- CAM_FRONT + |- {frame_index}.json + |- CAM_FRONT_RIGHT + |- {frame_index}.json + ... + """ + + def __init__( + self, + frame_index: int, + unix_timestamp: float, + points: NDArray = None, + device_position: NDArray = None, + device_heading: NDArray = None, + ): + self._frame_index: int = frame_index + self._image_data_list: List[ImageData] = [] + self._unix_timestamp: float = unix_timestamp + self._points: List[Dict[str, float]] = [] + self._device_position: Dict[str, float] = defaultdict(float) + self._device_heading: Dict[str, float] = defaultdict(float) + + if points is not None: + self.add_points(points) + if device_position is not None: + self.add_device_position(device_position) + if device_heading is not None: + self.add_device_heading(device_heading) + + @property + def filename(self): + return str(self._frame_index) + EXTENSION_ENUM.JSON.value + + def to_dict(self) -> Dict[str, Any]: + value = { + "images": [image.to_dict() for image in self._image_data_list], + "timestamp": self._unix_timestamp, + "device_position": self._device_position, + "device_heading": self._device_heading, + "points": self._points, + } + return value + + def save_json(self, output_path: str): + with open(osp.join(output_path, self.filename), "w") as f: + json.dump(self.to_dict(), f, indent=4) + + def add_image_data(self, image_data: ImageData): + self._image_data_list.append(image_data) + + def add_points(self, points: NDArray): + assert points.ndim == 2 and points.shape[1] >= 3, f"invalid points shape: {points.shape}" + points = points.tolist() + self._points = [{"x": p[0], "y": p[1], "z": p[2]} for p in points] + + def add_device_position(self, device_position: NDArray): + assert device_position.shape == (3,), "device_position must be the shape of (3,)" + device_position = device_position.astype(np.float32).tolist() + self._device_position = { + "x": device_position[0], + "y": device_position[1], + "z": device_position[2], + } + + def add_device_heading(self, device_heading: NDArray): + assert device_heading.shape == (4,), "device_heading must be the shape of (4,)" + device_heading = device_heading.astype(np.float32).tolist() + self._device_heading = { + "w": device_heading[0], + "x": device_heading[1], + "y": device_heading[2], + "z": device_heading[3], + } diff --git a/perception_dataset/deepen/non_annotated_t4_to_deepen_converter.py b/perception_dataset/deepen/non_annotated_t4_to_deepen_converter.py new file mode 100644 index 00000000..1dc56e1c --- /dev/null +++ b/perception_dataset/deepen/non_annotated_t4_to_deepen_converter.py @@ -0,0 +1,167 @@ +from concurrent.futures import ProcessPoolExecutor +import glob +import os +import os.path as osp +import shutil +import time +from typing import Any, Dict + +from nptyping import NDArray +import numpy as np +from nuscenes.nuscenes import NuScenes +from nuscenes.utils.data_classes import LidarPointCloud +from nuscenes.utils.geometry_utils import transform_matrix +from pyquaternion import Quaternion + +from perception_dataset.abstract_converter import AbstractConverter +from perception_dataset.constants import SENSOR_ENUM +from perception_dataset.deepen.json_format import ConfigData, ImageData +from perception_dataset.utils.logger import configure_logger + +logger = configure_logger(modname=__name__) + + +class NonAnnotatedT4ToDeepenConverter(AbstractConverter): + def __init__( + self, + input_base: str, + output_base: str, + camera_sensors: list, + annotation_hz: int = 10, + workers_number: int = 32, + ): + super().__init__(input_base, output_base) + + self._camera_sensor_types = [] + self._annotation_hz = annotation_hz + self._workers_number = workers_number + if isinstance(camera_sensors, list): + for cam in camera_sensors: + self._camera_sensor_types.append(SENSOR_ENUM[cam["channel"]]) + + def convert(self): + start_time = time.time() + + for scene_dir in glob.glob(osp.join(self._input_base, "*")): + if not osp.isdir(scene_dir): + continue + + out_dir = osp.join(self._output_base, osp.basename(scene_dir).replace(".", "-")) + self._convert_one_scene( + scene_dir, + out_dir, + ) + shutil.make_archive(f"{out_dir}", "zip", root_dir=out_dir) + + elapsed_time = time.time() - start_time + logger.info(f"Elapsed time: {elapsed_time:.1f} [sec]") + + def _convert_one_scene(self, input_dir: str, output_dir: str): + os.makedirs(output_dir, exist_ok=True) + nusc = NuScenes(version="annotation", dataroot=input_dir, verbose=False) + + logger.info(f"Converting {input_dir} to {output_dir}") + + with ProcessPoolExecutor(max_workers=self._workers_number) as executor: + future_list = [] + for frame_index, sample in enumerate(nusc.sample): + if frame_index % int(10 / self._annotation_hz) != 0: + continue + future = executor.submit( + self._convert_one_frame, input_dir, output_dir, frame_index + ) + future_list.append(future) + [x.result() for x in future_list] + logger.info(f"Done Conversion: {input_dir} to {output_dir}") + + def _convert_one_frame(self, input_dir: str, output_dir: str, frame_index: int): + if frame_index % 10 == 0: + logger.info(f"frame index: {frame_index}") + nusc = NuScenes(version="annotation", dataroot=input_dir, verbose=False) + sample = nusc.sample[frame_index] + camera_only_mode = SENSOR_ENUM.LIDAR_CONCAT.value["channel"] not in sample["data"] + if not camera_only_mode: + lidar_token: str = sample["data"][SENSOR_ENUM.LIDAR_CONCAT.value["channel"]] + lidar_path: str = nusc.get_sample_data(lidar_token)[0] + data_dict: Dict[str, Any] = self._get_data(nusc, lidar_token) + + pointcloud: LidarPointCloud = LidarPointCloud.from_file(lidar_path) + pointcloud.transform(data_dict["sensor2global_transform"]) + points: NDArray = pointcloud.points.T # (-1, 4) + + config_data = ConfigData( + frame_index=frame_index, + unix_timestamp=data_dict["unix_timestamp"], + points=points, + device_position=data_dict["sensor2global_translation"], + device_heading=data_dict["sensor2global_rotation"], + ) + + for camera_sensor_type in self._camera_sensor_types: + camera_channel = camera_sensor_type.value["channel"] + + if camera_channel in sample["data"]: + camera_token = sample["data"][camera_channel] + else: + sample_data = [s for s in nusc.sample_data if s["sample_token"] == sample["token"]] + for sensor in sample_data: + if sensor["channel"] == camera_channel: + camera_token = sensor["token"] + break + + camera_path, _, cam_intrinsic = nusc.get_sample_data(camera_token) + data_dict: Dict[str, Any] = self._get_data(nusc, camera_token) + + image_data = ImageData( + frame_index=frame_index, + channel=camera_channel, + fileformat=data_dict["fileformat"], + unix_timestamp=data_dict["unix_timestamp"], + device_position=data_dict["sensor2global_translation"], + device_heading=data_dict["sensor2global_rotation"], + camera_intrinsic_matrix=cam_intrinsic, + ) + image_data.make_directory(output_dir) + image_data.save(camera_path, output_dir) + if camera_only_mode: + config_data = ConfigData( + frame_index=frame_index, + unix_timestamp=data_dict["unix_timestamp"], + points=None, + device_position=data_dict["sensor2global_translation"], + device_heading=data_dict["sensor2global_rotation"], + ) + config_data.add_image_data(image_data) + + config_data.save_json(output_dir) + + def _get_data(self, nusc: NuScenes, sensor_channel_token: str) -> Dict[str, Any]: + sd_record = nusc.get("sample_data", sensor_channel_token) + cs_record = nusc.get("calibrated_sensor", sd_record["calibrated_sensor_token"]) + ep_record = nusc.get("ego_pose", sd_record["ego_pose_token"]) + + sensor2ego_transform = transform_matrix( + translation=cs_record["translation"], + rotation=Quaternion(cs_record["rotation"]), + ) + ego2global_transform = transform_matrix( + translation=ep_record["translation"], + rotation=Quaternion(ep_record["rotation"]), + ) + + sensor2global_transform = ego2global_transform @ sensor2ego_transform + sensor2global_translation = sensor2global_transform[:3, 3] + sensor2global_rotation = np.array(list(Quaternion(matrix=sensor2global_transform[:3, :3]))) + + ret_dict = { + "fileformat": sd_record["fileformat"], + "unix_timestamp": self._timestamp_to_sec(sd_record["timestamp"]), + "sensor2global_transform": sensor2global_transform, + "sensor2global_translation": sensor2global_translation, + "sensor2global_rotation": sensor2global_rotation, + } + + return ret_dict + + def _timestamp_to_sec(self, timestamp: int) -> float: + return float(timestamp) * 1e-6 diff --git a/perception_dataset/deepen/pcd_format.py b/perception_dataset/deepen/pcd_format.py new file mode 100644 index 00000000..873589ff --- /dev/null +++ b/perception_dataset/deepen/pcd_format.py @@ -0,0 +1,288 @@ +from abc import ABCMeta, abstractmethod +from collections import defaultdict +import json +import os +import os.path as osp +from typing import Any, Dict, List + +from nptyping import NDArray +import numpy as np + +from perception_dataset.constants import EXTENSION_ENUM, SENSOR_ENUM, SENSOR_MODALITY_ENUM + + +class AbstractData(metaclass=ABCMeta): + @abstractmethod + def to_dict(self) -> Dict: + raise NotImplementedError() + + def save_json(self, output_path: str): + """ + Args: + output_path (str): [description] + """ + assert self.FILENAME is not None, f"This instance cannot be saved: {__class__.__name__}" + + with open(osp.join(output_path, self.FILENAME), "w") as f: + json.dump(self.to_dict(), f, indent=4) + + +class Sensor(AbstractData): + FILENAME = None + + def __init__(self, sensor_type: SENSOR_ENUM, extension: EXTENSION_ENUM = None): + """an object containing the details of all the sensors (lidar and cameras) present in the dataset. + + Args: + data_dir (str): the relative path of the folder where the data for the sensor is present. + for image, {pcf_file_name}.pcd.jpg or {pcd_file_name}.pcd.png + sensor_type (SENSOR_ENUM): lidar or camera + extension (EXTENSION_ENUM, optional): Valid only for sensor_type ‘camera’. + Supported formats are ‘jpg’ and ‘png’. Defaults to None. + """ + if sensor_type.value["modality"] == SENSOR_MODALITY_ENUM.CAMERA.value: + assert extension is not None, "for camera, image extension must be used." + assert extension in [ + EXTENSION_ENUM.JPG, + EXTENSION_ENUM.PNG, + ], "image extension must be jpg or png" + + self._sensor_type: SENSOR_ENUM = sensor_type + self._sensor_fusion: Dict[str, Dict[str, List[float]]] = defaultdict(dict) + self._extension: EXTENSION_ENUM = extension + + @property + def channel(self): + return self._sensor_type.value["channel"] + + @property + def modality(self): + return self._sensor_type.value["modality"] + + @property + def filepath(self) -> str: + """this is `content` value of each sensor in Config.json""" + return osp.join("data", self.channel) + + def get_filename(self, frame_index: int): + """ + the relative path of the folder where the data for the sensor is present. + for image, {pcf_file_name}.pcd.jpg or {pcd_file_name}.pcd.png + + Args: + frame_index (int): [description] + + Returns: + filename (str): [description] + """ + filename = osp.join(self.filepath, f"{frame_index}.pcd") + if self.modality == SENSOR_MODALITY_ENUM.CAMERA.value: + filename += self._extension.value + return filename + + def make_directory(self, output_path: str): + os.makedirs(osp.join(output_path, self.filepath), exist_ok=True) + + def add_sensor_fusion(self, camera_sensor_type: SENSOR_ENUM): + camera_channel = camera_sensor_type.value["channel"] + self._sensor_fusion[camera_channel]["view_matrix"] = np.eye(4).tolist() + + def to_dict(self) -> Dict[str, Any]: + value = { + "content": self.filepath, + "sensor_type": self.modality, + } + if self.modality == SENSOR_MODALITY_ENUM.LIDAR.value: + # value["sensor_fusion"] = self._sensor_fusion + pass + else: + value["extension"] = self._extension.value[1:] + + return value + + +class Config(AbstractData): + """This is the configuration file for the dataset. This contains the information about primary sensor, cameras, + the view matrix for each camera and the directory the images and lidar files are present in. A sample + config.json data is given below: + + { + "primary_sensor_id": "lidar", + "sensors": { + "lidar": { + "content": "lidar", + "sensor_type": "lidar", + "sensor_fusion": { + "camera_0": { + "view_matrix": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + ] + }, + "camera_1": { + "view_matrix": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + ] + } + } + }, + "camera_0": { + "content": "images/camera_0_img", + "sensor_type": "camera", + "extension": "jpg", + }, + "camera_1": { + "content": "images/camera_1_img", + "sensor_type": "camera", + "extension": "jpg", + }, + }, + } + + """ + + FILENAME = "config" + EXTENSION_ENUM.JSON.value + + def __init__(self, lidar_sensor: Sensor): + """ + Args: + primary_sensor_id (int): the name/id of the lidar sensor + """ + self._lidar_sensor: Sensor = lidar_sensor + self._camera_sensors: Dict[str, Sensor] = {} + + @property + def camera_sensors(self) -> List[Sensor]: + return list(self._camera_sensors.values()) + + def make_directories(self, output_path: str): + self._lidar_sensor.make_directory(output_path) + for camera_sensor in self._camera_sensors.values(): + camera_sensor.make_directory(output_path) + + def add_camera(self, camera_sensor_type: SENSOR_ENUM, extension: EXTENSION_ENUM): + camera_sensor = Sensor( + sensor_type=camera_sensor_type, + extension=extension, + ) + self._camera_sensors[camera_sensor.channel] = camera_sensor + self._lidar_sensor.add_sensor_fusion(camera_sensor_type=camera_sensor_type) + + def to_dict(self) -> Dict[str, Any]: + lidar_modality = self._lidar_sensor.modality + # sensor_id of LiDAR must be lidar, if not upload error + value = { + "primary_sensor_id": lidar_modality, + "sensors": { + lidar_modality: self._lidar_sensor.to_dict(), + }, + } + + for camera_channel, camera_sensor in self._camera_sensors.items(): + value["sensors"][camera_channel] = camera_sensor.to_dict() + + return value + + +class LidarTransforms(AbstractData): + """This file contains the transformation matrices used to convert the lidar points in the lidar (local) frame of + reference to the world (global) frame of reference. + """ + + FILENAME = "lidar_transforms" + EXTENSION_ENUM.JSON.value + + def __init__(self): + self._forward_transforms: Dict[int, List[List[float]]] = {} + self._inverse_transforms: Dict[int, List[List[float]]] = {} + + def add_transform(self, frame_index: int, forward_transform: NDArray): + """[summary] + + Args: + frame_index (int): the index of frame in a scene + forward_transform (NDArray): transform the points in the lidar frame to world frame + """ + assert forward_transform.shape == (4, 4), "forward_transform must be (4, 4) matrix." + inverse_transform = np.linalg.inv(forward_transform) + + self._forward_transforms[frame_index] = forward_transform.tolist() + self._inverse_transforms[frame_index] = inverse_transform.tolist() + + def to_dict(self) -> Dict[str, Dict[int, List[List[float]]]]: + value = { + "forward_transforms": self._forward_transforms, + "inverse_transforms": self._inverse_transforms, + } + return value + + +class ViewMatrices(AbstractData): + """This file contains the view_matrices of each camera for each frame. The json object has frame index as the + key and the value similar to sensor_fusion object as mentioned in the config.json + + { + "0": { + "camera_0": { + "view_matrix": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + ] + }, + "camera_1": { + "view_matrix": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + ] + } + }, + "1": { + "camera_0": { + "view_matrix": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + ] + }, + "camera_1": { + "view_matrix": [ + [1.0, 0.0, 0.0, 0.0], + [0.0, 1.0, 0.0, 0.0], + [0.0, 0.0, 0.0, 1.0], + [0.0, 0.0, 1.0, 0.0], + ] + } + }, + ... + } + + """ + + FILENAME = "view_matrices" + EXTENSION_ENUM.JSON.value + + def __init__(self): + self._view_matrices: Dict[str, Dict[str, Dict[str, List[List[float]]]]] = defaultdict(dict) + + def add_view_matrix(self, frame_index: int, sensor_channel: str, view_matrix: NDArray): + """ + + Args: + frame_index (int): the index of frame in a scene + sensor_channel (str): the channel of sensor + view_matrix (NDArray): : the transformation matrix used to transform the points from lidar + coordinates to image coordinates + """ + assert view_matrix.shape == (4, 4), "view_matrix must be (4, 4) matrix." + self._view_matrices[frame_index][sensor_channel] = {"view_matrix": view_matrix.tolist()} + + def to_dict(self) -> Dict: + return self._view_matrices diff --git a/perception_dataset/ros2/__init__.py b/perception_dataset/ros2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/ros2/tf2_geometry_msgs/__init__.py b/perception_dataset/ros2/tf2_geometry_msgs/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/ros2/tf2_geometry_msgs/tf2_geometry_msgs.py b/perception_dataset/ros2/tf2_geometry_msgs/tf2_geometry_msgs.py new file mode 100644 index 00000000..df71ef70 --- /dev/null +++ b/perception_dataset/ros2/tf2_geometry_msgs/tf2_geometry_msgs.py @@ -0,0 +1,415 @@ +# copied from https://github.com/ros2/geometry2/blob/rolling/tf2_geometry_msgs/src/tf2_geometry_msgs/tf2_geometry_msgs.py +# due to fail to importing the package in ro2 environment + + +# Copyright 2008 Willow Garage, Inc. +# +# Redistribution and use in source and binary forms, with or without +# modification, are permitted provided that the following conditions are met: +# +# * Redistributions of source code must retain the above copyright +# notice, this list of conditions and the following disclaimer. +# +# * Redistributions in binary form must reproduce the above copyright +# notice, this list of conditions and the following disclaimer in the +# documentation and/or other materials provided with the distribution. +# +# * Neither the name of the Willow Garage, Inc. nor the names of its +# contributors may be used to endorse or promote products derived from +# this software without specific prior written permission. +# +# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE +# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE +# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR +# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF +# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS +# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN +# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) +# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE +# POSSIBILITY OF SUCH DAMAGE. + + +# author: Wim Meeussen + +from typing import Iterable, Optional, Tuple + +from geometry_msgs.msg import ( + PointStamped, + Pose, + PoseStamped, + PoseWithCovarianceStamped, + TransformStamped, + Vector3Stamped, +) +import numpy as np +import tf2_ros + + +def to_msg_msg(msg): + return msg + + +tf2_ros.ConvertRegistration().add_to_msg(Vector3Stamped, to_msg_msg) +tf2_ros.ConvertRegistration().add_to_msg(PoseStamped, to_msg_msg) +tf2_ros.ConvertRegistration().add_to_msg(PointStamped, to_msg_msg) + + +def from_msg_msg(msg): + return msg + + +tf2_ros.ConvertRegistration().add_from_msg(Vector3Stamped, from_msg_msg) +tf2_ros.ConvertRegistration().add_from_msg(PoseStamped, from_msg_msg) +tf2_ros.ConvertRegistration().add_from_msg(PointStamped, from_msg_msg) + + +def transform_covariance(cov_in, transform): + """ + Apply a given transform to a covariance matrix. + + :param cov_in: Covariance matrix + :param transform: The transform that will be applies + :returns: The transformed covariance matrix + """ + # Converting the Quaternion to a Rotation Matrix first + # Taken from: https://automaticaddison.com/how-to-convert-a-quaternion-to-a-rotation-matrix/ + q0 = transform.transform.rotation.w + q1 = transform.transform.rotation.x + q2 = transform.transform.rotation.y + q3 = transform.transform.rotation.z + + # First row of the rotation matrix + r00 = 2 * (q0 * q0 + q1 * q1) - 1 + r01 = 2 * (q1 * q2 - q0 * q3) + r02 = 2 * (q1 * q3 + q0 * q2) + + # Second row of the rotation matrix + r10 = 2 * (q1 * q2 + q0 * q3) + r11 = 2 * (q0 * q0 + q2 * q2) - 1 + r12 = 2 * (q2 * q3 - q0 * q1) + + # Third row of the rotation matrix + r20 = 2 * (q1 * q3 - q0 * q2) + r21 = 2 * (q2 * q3 + q0 * q1) + r22 = 2 * (q0 * q0 + q3 * q3) - 1 + + # Code reference: https://github.com/ros2/geometry2/pull/430 + # Mathematical Reference: + # A. L. Garcia, “Linear Transformations of Random Vectors,” in Probability, + # Statistics, and Random Processes For Electrical Engineering, 3rd ed., + # Pearson Prentice Hall, 2008, pp. 320–322. + + R = np.array([[r00, r01, r02], [r10, r11, r12], [r20, r21, r22]]) + + R_transpose = np.transpose(R) + + cov_11 = np.array([cov_in[:3], cov_in[6:9], cov_in[12:15]]) + cov_12 = np.array([cov_in[3:6], cov_in[9:12], cov_in[15:18]]) + cov_21 = np.array([cov_in[18:21], cov_in[24:27], cov_in[30:33]]) + cov_22 = np.array([cov_in[21:24], cov_in[27:30], cov_in[33:]]) + + # And we perform the transform + result_11 = R @ cov_11 @ R_transpose + result_12 = R @ cov_12 @ R_transpose + result_21 = R @ cov_21 @ R_transpose + result_22 = R @ cov_22 @ R_transpose + + cov_out = PoseWithCovarianceStamped() + + cov_out.pose.covariance[0] = result_11[0][0] + cov_out.pose.covariance[1] = result_11[0][1] + cov_out.pose.covariance[2] = result_11[0][2] + cov_out.pose.covariance[6] = result_11[1][0] + cov_out.pose.covariance[7] = result_11[1][1] + cov_out.pose.covariance[8] = result_11[1][2] + cov_out.pose.covariance[12] = result_11[2][0] + cov_out.pose.covariance[13] = result_11[2][1] + cov_out.pose.covariance[14] = result_11[2][2] + + cov_out.pose.covariance[3] = result_12[0][0] + cov_out.pose.covariance[4] = result_12[0][1] + cov_out.pose.covariance[5] = result_12[0][2] + cov_out.pose.covariance[9] = result_12[1][0] + cov_out.pose.covariance[10] = result_12[1][1] + cov_out.pose.covariance[11] = result_12[1][2] + cov_out.pose.covariance[15] = result_12[2][0] + cov_out.pose.covariance[16] = result_12[2][1] + cov_out.pose.covariance[17] = result_12[2][2] + + cov_out.pose.covariance[18] = result_21[0][0] + cov_out.pose.covariance[19] = result_21[0][1] + cov_out.pose.covariance[20] = result_21[0][2] + cov_out.pose.covariance[24] = result_21[1][0] + cov_out.pose.covariance[25] = result_21[1][1] + cov_out.pose.covariance[26] = result_21[1][2] + cov_out.pose.covariance[30] = result_21[2][0] + cov_out.pose.covariance[31] = result_21[2][1] + cov_out.pose.covariance[32] = result_21[2][2] + + cov_out.pose.covariance[21] = result_22[0][0] + cov_out.pose.covariance[22] = result_22[0][1] + cov_out.pose.covariance[23] = result_22[0][2] + cov_out.pose.covariance[27] = result_22[1][0] + cov_out.pose.covariance[28] = result_22[1][1] + cov_out.pose.covariance[29] = result_22[1][2] + cov_out.pose.covariance[33] = result_22[2][0] + cov_out.pose.covariance[34] = result_22[2][1] + cov_out.pose.covariance[35] = result_22[2][2] + + return cov_out.pose.covariance + + +def _build_affine( + rotation: Optional[Iterable] = None, translation: Optional[Iterable] = None +) -> np.ndarray: + """ + Build an affine matrix from a quaternion and a translation. + + :param rotation: The quaternion as [w, x, y, z] + :param translation: The translation as [x, y, z] + :returns: The quaternion and the translation array + """ + affine = np.eye(4) + if rotation is not None: + affine[:3, :3] = _get_mat_from_quat(np.asarray(rotation)) + if translation is not None: + affine[:3, 3] = np.asarray(translation) + return affine + + +def _transform_to_affine(transform: TransformStamped) -> np.ndarray: + """ + Convert a `TransformStamped` to a affine matrix. + + :param transform: The transform that should be converted + :returns: The affine transform + """ + transform = transform.transform + transform_rotation_matrix = [ + transform.rotation.w, + transform.rotation.x, + transform.rotation.y, + transform.rotation.z, + ] + transform_translation = [ + transform.translation.x, + transform.translation.y, + transform.translation.z, + ] + return _build_affine(transform_rotation_matrix, transform_translation) + + +def _get_mat_from_quat(quaternion: np.ndarray) -> np.ndarray: + """ + Convert a quaternion to a rotation matrix. + + This method is based on quat2mat from https://github.com + f185e866ecccb66c545559bc9f2e19cb5025e0ab/transforms3d/quaternions.py#L101 , + since that library is not available via rosdep. + + :param quaternion: A numpy array containing the w, x, y, and z components of the quaternion + :returns: The rotation matrix + """ + Nq = np.sum(np.square(quaternion)) + if Nq < np.finfo(np.float64).eps: + return np.eye(3) + + XYZ = quaternion[1:] * 2.0 / Nq + wXYZ = XYZ * quaternion[0] + xXYZ = XYZ * quaternion[1] + yYZ = XYZ[1:] * quaternion[2] + zZ = XYZ[2] * quaternion[3] + + return np.array( + [ + [1.0 - (yYZ[0] + zZ), xXYZ[1] - wXYZ[2], xXYZ[2] + wXYZ[1]], + [xXYZ[1] + wXYZ[2], 1.0 - (xXYZ[0] + zZ), yYZ[1] - wXYZ[0]], + [xXYZ[2] - wXYZ[1], yYZ[1] + wXYZ[0], 1.0 - (xXYZ[0] + yYZ[0])], + ] + ) + + +def _get_quat_from_mat(rot_mat: np.ndarray) -> np.ndarray: + """ + Convert a rotation matrix to a quaternion. + + This method is a copy of mat2quat from https://github.com + f185e866ecccb66c545559bc9f2e19cb5025e0ab/transforms3d/quaternions.py#L150 , + since that library is not available via rosdep. + + Method from + Bar-Itzhack, Itzhack Y. (2000), "New method for extracting the + quaternion from a rotation matrix", AIAA Journal of Guidance, + Control and Dynamics 23(6):1085-1087 (Engineering Note), ISSN + 0731-5090 + + :param rot_mat: A roatation matrix + :returns: An quaternion + """ + # Decompose rotation matrix + Qxx, Qyx, Qzx, Qxy, Qyy, Qzy, Qxz, Qyz, Qzz = rot_mat.flat + # Create matrix + K = ( + np.array( + [ + [Qxx - Qyy - Qzz, 0, 0, 0], + [Qyx + Qxy, Qyy - Qxx - Qzz, 0, 0], + [Qzx + Qxz, Qzy + Qyz, Qzz - Qxx - Qyy, 0], + [Qyz - Qzy, Qzx - Qxz, Qxy - Qyx, Qxx + Qyy + Qzz], + ] + ) + / 3.0 + ) + vals, vecs = np.linalg.eigh(K) + # Select largest eigenvector and reorder to w,x,y,z + q = vecs[[3, 0, 1, 2], np.argmax(vals)] + # Invert quaternion if w is negative (results in positive w) + if q[0] < 0: + q *= -1 + return q + + +def _decompose_affine(affine: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: + """ + Decompose an affine transformation into a quaternion and the translation. + + :param affine: The affine transformation matrix + :returns: The quaternion and the translation array + """ + return _get_quat_from_mat(affine[:3, :3]), affine[:3, 3] + + +# PointStamped +def do_transform_point(point: PointStamped, transform: TransformStamped) -> PointStamped: + """ + Transform a `PointStamped` using a given `TransformStamped`. + + :param point: The point + :param transform: The transform + :returns: The transformed point + """ + _, point = _decompose_affine( + np.matmul( + _transform_to_affine(transform), + _build_affine(translation=[point.point.x, point.point.y, point.point.z]), + ) + ) + + res = PointStamped() + res.point.x = point[0] + res.point.y = point[1] + res.point.z = point[2] + res.header = transform.header + return res + + +tf2_ros.TransformRegistration().add(PointStamped, do_transform_point) + + +# Vector3Stamped +def do_transform_vector3(vector3: Vector3Stamped, transform: TransformStamped) -> Vector3Stamped: + """ + Transform a `Vector3Stamped` using a given `TransformStamped`. + + :param vector3: The vector3 + :param transform: The transform + :returns: The transformed vector3 + """ + transform.transform.translation.x = 0.0 + transform.transform.translation.y = 0.0 + transform.transform.translation.z = 0.0 + _, point = _decompose_affine( + np.matmul( + _transform_to_affine(transform), + _build_affine(translation=[vector3.vector.x, vector3.vector.y, vector3.vector.z]), + ) + ) + res = Vector3Stamped() + res.vector.x = point[0] + res.vector.y = point[1] + res.vector.z = point[2] + res.header = transform.header + return res + + +tf2_ros.TransformRegistration().add(Vector3Stamped, do_transform_vector3) + + +# Pose +def do_transform_pose(pose: Pose, transform: TransformStamped) -> Pose: + """ + Transform a `Pose` using a given `TransformStamped`. + + This method is used to share the tranformation done in + `do_transform_pose_stamped()` and `do_transform_pose_with_covariance_stamped()` + + :param pose: The pose + :param transform: The transform + :returns: The transformed pose + """ + quaternion, point = _decompose_affine( + np.matmul( + _transform_to_affine(transform), + _build_affine( + translation=[pose.position.x, pose.position.y, pose.position.z], + rotation=[ + pose.orientation.w, + pose.orientation.x, + pose.orientation.y, + pose.orientation.z, + ], + ), + ) + ) + res = Pose() + res.position.x = point[0] + res.position.y = point[1] + res.position.z = point[2] + res.orientation.w = quaternion[0] + res.orientation.x = quaternion[1] + res.orientation.y = quaternion[2] + res.orientation.z = quaternion[3] + return res + + +# PoseStamped +def do_transform_pose_stamped(pose: PoseStamped, transform: TransformStamped) -> PoseStamped: + """ + Transform a `PoseStamped` using a given `TransformStamped`. + + :param pose: The stamped pose + :param transform: The transform + :returns: The transformed pose stamped + """ + res = PoseStamped() + res.pose = do_transform_pose(pose.pose, transform) + res.header = transform.header + return res + + +tf2_ros.TransformRegistration().add(PoseStamped, do_transform_pose_stamped) + + +# PoseWithCovarianceStamped +def do_transform_pose_with_covariance_stamped( + pose: PoseWithCovarianceStamped, transform: TransformStamped +) -> PoseWithCovarianceStamped: + """ + Transform a `PoseWithCovarianceStamped` using a given `TransformStamped`. + + :param pose: The pose with covariance stamped + :param transform: The transform + :returns: The transformed pose with covariance stamped + """ + res = PoseWithCovarianceStamped() + res.pose.pose = do_transform_pose(pose.pose.pose, transform) + res.pose.covariance = transform_covariance(pose.pose.covariance, transform) + res.header = transform.header + return res + + +tf2_ros.TransformRegistration().add( + PoseWithCovarianceStamped, do_transform_pose_with_covariance_stamped +) diff --git a/perception_dataset/rosbag2/__init__.py b/perception_dataset/rosbag2/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/rosbag2/autoware_msgs.py b/perception_dataset/rosbag2/autoware_msgs.py new file mode 100644 index 00000000..65fb59fa --- /dev/null +++ b/perception_dataset/rosbag2/autoware_msgs.py @@ -0,0 +1,268 @@ +from typing import Any, Dict, List, Union +import uuid + +from autoware_auto_perception_msgs.msg import ( + DetectedObject, + DetectedObjects, + ObjectClassification, + TrackedObject, + TrackedObjects, + TrafficLight, + TrafficLightRoi, + TrafficLightRoiArray, + TrafficSignal, + TrafficSignalArray, +) + + +def semantic_type_to_class_name(semantic_type: int) -> str: + """https://github.com/tier4/tier4_autoware_msgs/blob/tier4/universe/tier4_perception_msgs/msg/object_recognition/Semantic.msg""" + semantic_to_category: Dict[int, str] = { + 0: "unknown", + 1: "car", + 2: "truck", + 3: "bus", + 4: "bicycle", + 5: "motorbike", + 6: "pedestrian", + 7: "animal", + 11: "bicycle_without_rider", + 12: "motorbike_without_rider", + 21: "street_asset", + } + + return semantic_to_category.get(semantic_type, "unknown") + + +def parse_dynamic_object_array(msg) -> List[Dict[str, Any]]: + """ + The object message is archived, but used for synthetic data. + https://github.com/tier4/autoware_iv_msgs/blob/main/autoware_perception_msgs/msg/object_recognition/DynamicObjectArray.msg + + Args: + msg (autoware_perception_msgs.msg.DynamicObjectArray): autoware perception msg (.iv) + + Returns: + List[Dict[str, Any]]: dict format + """ + scene_annotation_list: List[Dict[str, Any]] = [] + for obj in msg.objects: + # obj: Dynamic Object + + obj_uuid = uuid.UUID(bytes=obj.id.uuid.tobytes()) + category_name = semantic_type_to_class_name(obj.semantic.type) + position: Dict[str, Any] = { + "x": obj.state.pose_covariance.pose.position.x, + "y": obj.state.pose_covariance.pose.position.y, + "z": obj.state.pose_covariance.pose.position.z, + } + orientation: Dict[str, Any] = { + "x": obj.state.pose_covariance.pose.orientation.x, + "y": obj.state.pose_covariance.pose.orientation.y, + "z": obj.state.pose_covariance.pose.orientation.z, + "w": obj.state.pose_covariance.pose.orientation.w, + } + dimension: Dict[str, Any] = { + "width": obj.shape.dimensions.y, + "length": obj.shape.dimensions.x, + "height": obj.shape.dimensions.z, + } + label_dict: Dict[str, Any] = { + "category_name": category_name, + "instance_id": str(obj_uuid), + "attribute_names": [], # not available + "three_d_box": { + "translation": position, + "size": dimension, + "rotation": orientation, + }, + "num_lidar_pts": 1, # placeholder, the value is caluculated in the Rosbag2ToT4Converter + "num_radar_pts": 0, + } + scene_annotation_list.append(label_dict) + + return scene_annotation_list + + +def object_classification_to_category_name(object_classification) -> str: + """https://github.com/tier4/autoware_auto_msgs/blob/tier4/main/autoware_auto_perception_msgs/msg/ObjectClassification.idl""" + cls_to_cat: Dict[int, str] = { + 0: "unknown", + 1: "car", + 2: "truck", + 3: "bus", + 4: "trailer", + 5: "motorcycle", + 6: "bicycle", + 7: "pedestrian", + } + + return cls_to_cat.get(object_classification, "unknown") + + +def parse_perception_objects(msg) -> List[Dict[str, Any]]: + """https://github.com/tier4/autoware_auto_msgs/tree/tier4/main/autoware_auto_perception_msgs + + + Args: + msg (autoware_auto_perception_msgs.msg.DetectedObjects): autoware detection msg (.core/.universe) + + Returns: + List[Dict[str, Any]]: dict format + """ + assert isinstance( + msg, (DetectedObjects, TrackedObjects) + ), f"Invalid object message type: {type(msg)}" + + def get_category_name(classification: List[ObjectClassification]) -> str: + max_score: float = -1.0 + out_class_name: str = "unknown" + for obj_cls in classification: + if obj_cls.probability > max_score: + max_score = obj_cls.probability + out_class_name = object_classification_to_category_name(obj_cls.label) + return out_class_name + + scene_annotation_list: List[Dict[str, Any]] = [] + for obj in msg.objects: + obj: Union[DetectedObject, TrackedObject] + pose = obj.kinematics.pose_with_covariance.pose + + if isinstance(obj, DetectedObject): + obj_uuid = uuid.uuid4() # random uuid + elif isinstance(obj, TrackedObject): + obj_uuid = uuid.UUID(bytes=obj.object_id.uuid.tobytes()) + else: + raise ValueError( + f"Object message is neither DetectedObject nor TrackedObject: {type(obj)}" + ) + + category_name = get_category_name(obj.classification) + position: Dict[str, Any] = { + "x": pose.position.x, + "y": pose.position.y, + "z": pose.position.z, + } + orientation: Dict[str, Any] = { + "x": pose.orientation.x, + "y": pose.orientation.y, + "z": pose.orientation.z, + "w": pose.orientation.w, + } + dimension: Dict[str, Any] = { + "width": obj.shape.dimensions.y, + "length": obj.shape.dimensions.x, + "height": obj.shape.dimensions.z, + } + label_dict: Dict[str, Any] = { + "category_name": category_name, + "instance_id": str(obj_uuid), + "attribute_names": [], # not available + "three_d_box": { + "translation": position, + "size": dimension, + "rotation": orientation, + }, + "num_lidar_pts": 1, # TODO(yukke42): impl + "num_radar_pts": 0, + } + scene_annotation_list.append(label_dict) + + return scene_annotation_list + + +def parse_traffic_lights( + roi_msg: TrafficLightRoiArray, signal_msg: TrafficSignalArray +) -> List[Dict[str, Any]]: + """https://github.com/tier4/autoware_auto_msgs/tree/tier4/main/autoware_auto_perception_msgs + + + Args: + msg (autoware_auto_perception_msgs.msg.DetectedObjects): autoware detection msg (.core/.universe) + + Returns: + List[Dict[str, Any]]: dict format + """ + color_to_str: Dict[int, str] = { + TrafficLight.GREEN: "green", + TrafficLight.RED: "red", + TrafficLight.AMBER: "yellow", + TrafficLight.WHITE: "white", + } + shape_to_str: Dict[int, str] = { + TrafficLight.CIRCLE: "circle", + TrafficLight.DOWN_ARROW: "down", + TrafficLight.DOWN_LEFT_ARROW: "down_left", + TrafficLight.DOWN_RIGHT_ARROW: "down_right", + TrafficLight.LEFT_ARROW: "left", + TrafficLight.RIGHT_ARROW: "right", + TrafficLight.CROSS: "cross", + TrafficLight.UP_ARROW: "straight", + } + + def get_category_name(signal: TrafficSignal): + # list for saving the status of the lights + blubs: List[int] = [] + + category: str = "" + for light in signal.lights: + light: TrafficLight + if light.color == TrafficLight.UNKNOWN: + blubs.append(TrafficLight.UNKNOWN) + elif light.shape == TrafficLight.CIRCLE: + assert light.color in color_to_str + # if the shape is circle, save the color + blubs.append(light.color) + else: + # if the shape is not circle, the color must be green + # in this case, save the shape + assert light.color == TrafficLight.GREEN and light.shape in shape_to_str + blubs.append(light.shape) + # we want the category name to have the format "color-shape1", + # and the color constants are smaller than shape constants, + # we can simply achieve this by sort() + blubs.sort() + blubs_str: List[str] = [] + for blub in blubs: + if blub == TrafficLight.UNKNOWN: + blubs_str.append("unknown") + elif blub in color_to_str: + blubs_str.append(color_to_str[blub]) + else: + blubs_str.append(shape_to_str[blub]) + category: str = "_".join(blubs_str) + return category + + assert isinstance( + roi_msg, TrafficLightRoiArray + ), f"Invalid object message type: {type(roi_msg)}" + assert isinstance( + signal_msg, TrafficSignalArray + ), f"Invalid object message type: {type(signal_msg)}" + + scene_annotation_list: List[Dict[str, Any]] = [] + for roi in roi_msg.rois: + roi: TrafficLightRoi + for signal in signal_msg.signals: + signal: TrafficSignal + if roi.id == signal._map_primitive_id: + category_name = get_category_name(signal) + label_dict: Dict[str, Any] = { + "category_name": category_name, + # this sensor_id would not be saved to the final dataset, + # considering traffic light would generally used camera_only mode and one camera, + # setting as "0" would be no problem + "sensor_id": 0, + "instance_id": str(signal.map_primitive_id), + "attribute_names": [], # not available + "two_d_box": [ + roi.roi.x_offset, + roi.roi.y_offset, + roi.roi.x_offset + roi.roi.width, + roi.roi.y_offset + roi.roi.height, + ], + } + scene_annotation_list.append(label_dict) + break + + return scene_annotation_list diff --git a/perception_dataset/rosbag2/converter_params.py b/perception_dataset/rosbag2/converter_params.py new file mode 100644 index 00000000..f52ec35b --- /dev/null +++ b/perception_dataset/rosbag2/converter_params.py @@ -0,0 +1,88 @@ +import enum +from typing import Dict, List, Optional + +from pydantic import BaseModel, validator + +from perception_dataset.utils.logger import configure_logger + +logger = configure_logger(modname=__name__) + + +class DataType(enum.Enum): + REAL = "real" + SYNTHETIC = "synthetic" + + +class Rosbag2ConverterParams(BaseModel): + task: str + input_base: str # path to the input rosbag2 directory (multiple rosbags in the directory) + input_bag_path: Optional[str] # path to the input rosbag2 (a single rosbag) + output_base: str # path to the output directory + overwrite_mode: bool = False + without_compress: bool = False + workers_number: int = 1 + + # rosbag data type + data_type: DataType = DataType.REAL # real or synthetic + + # rosbag config + lidar_sensor: Dict[str, str] = { + "topic": "", + "channel": "", + } # lidar_sensor, {topic: , channel, } + camera_sensors: List[Dict[str, str]] = [] # camera sensors, + object_topic_name: str = "" + object_msg_type: str = "" + traffic_light_signal_topic_name: str = "" + traffic_light_rois_topic_name: str = "" + world_frame_id: str = "map" + with_camera: bool = True + + # rosbag reader + num_load_frames: int # the number of frames to be loaded. if the value isn't positive, read all messages. + skip_timestamp: float # not read for the second after the first topic + start_timestamp_sec: float = 0.0 # conversion start timestamp in sec + crop_frames_unit: int = 1 # crop frames from the end so that the number of frames is divisible by crop_frames_unit. Set to 0 or 1 so as not to crop any frames. + camera_latency_sec: float = ( + 0.0 # camera latency in seconds between the header.stamp and shutter trigger + ) + # in synthetic data (from AWSIM) it may be the case that there is no ego transform available at the beginning of rosbag + ignore_no_ego_transform_at_rosbag_beginning: bool = False + generate_frame_every: int = 1 # pick frames out of every this number. + generate_frame_every_meter: float = 5.0 # pick frames when ego vehicle moves certain meters + + def __init__(self, **args): + super().__init__(**args) + + if len(self.camera_sensors) == 0: + logger.warning( + "The config of `camera_sensors` field is empty, so disable to load camera data." + ) + self.with_camera = False + + @validator("workers_number") + def check_workers_number(cls, v): + if v < 1: + logger.warning("workers_number must be positive, replaced to 1.") + v = 1 + return v + + @validator("skip_timestamp") + def check_skip_timestamp(cls, v): + if v < 0: + logger.warning("skip_timestamp must be positive or zero, replaced to 0.") + v = 0 + return v + + @validator("crop_frames_unit") + def check_crop_frames_unit(cls, v): + if v <= 0: + logger.warning("crop_frames_unit must be positive, replaced to 1.") + v = 1 + return v + + @validator("object_msg_type") + def check_object_msg_type(cls, v): + if v not in ["DynamicObjectArray", "DetectedObjects", "TrackedObjects", "TrafficLights"]: + raise ValueError(f"Unexpected object message type: {type(v)}") + return v diff --git a/perception_dataset/rosbag2/rosbag2_converter.py b/perception_dataset/rosbag2/rosbag2_converter.py new file mode 100644 index 00000000..f44ac0ce --- /dev/null +++ b/perception_dataset/rosbag2/rosbag2_converter.py @@ -0,0 +1,86 @@ +import re +import sys +from typing import Dict, List, Text + +from rosbag2_py import StorageFilter + +from perception_dataset.utils.rosbag2 import create_reader, create_writer, get_topic_count, reindex + + +class Rosbag2Converter: + MANDATORY_TOPICS = [ + "pointcloud", + "/tf", + "/tf_static", + ] + + def __init__( + self, + input_bag_dir: Text, + output_bag_dir: Text, + topic_list: List = [], + start_time_sec: float = 0, + end_time_sec: float = sys.float_info.max, + mandatory_topics: List = MANDATORY_TOPICS, + ): + self._input_bag_dir: str = input_bag_dir + self._output_bag_dir: str = output_bag_dir + self._topic_list: str = topic_list + self._start_time_sec = start_time_sec + self._end_time_sec = end_time_sec + self._mandatory_topics = mandatory_topics + self._check_topic_count() + + def _check_topic_count(self): + topic_count: Dict[str, int] = get_topic_count(self._input_bag_dir) + + for topic in self._mandatory_topics: + if topic not in topic_count.keys(): + for key in topic_count.keys(): + if re.search(topic, key): + topic_count[topic] = topic_count[key] + break + try: + if topic_count[topic] == 0: + raise ValueError( + f"{topic} topic count is 0. The input rosbag must contain {topic}." + ) + except KeyError: + raise ValueError( + f"There is no {topic} in the rosbag. The input rosbag must contain {topic}." + ) + + def convert(self): + writer = create_writer(self._output_bag_dir) + reader = create_reader(self._input_bag_dir) + if len(self._topic_list) != 0: + reader.set_filter(StorageFilter(topics=self._topic_list)) + + for topic in reader.get_all_topics_and_types(): + if "traffic_light" not in topic.name: + topic.name = topic.name.replace("image_raw", "image_rect_color") + writer.create_topic(topic) + + write_topic_count = 0 + while reader.has_next(): + topic_name, data, timestamp = reader.read_next() + message_time = timestamp * 1e-9 + if topic_name == "/tf_static": + writer.write(topic_name, data, timestamp) + elif message_time <= self._start_time_sec: + continue + elif message_time <= self._end_time_sec: + if "traffic_light" not in topic_name: + topic_name = topic_name.replace("image_raw", "image_rect_color") + writer.write(topic_name, data, timestamp) + write_topic_count += 1 + else: + break + del writer + # Reindex for cleanup metadata + reindex(self._output_bag_dir) + + if write_topic_count == 0: + raise ValueError( + "Total topic count in rosbag is 0. The input rosbag timestamp might not match the timestamp in dataset." + ) diff --git a/perception_dataset/rosbag2/rosbag2_reader.py b/perception_dataset/rosbag2/rosbag2_reader.py new file mode 100644 index 00000000..040bec30 --- /dev/null +++ b/perception_dataset/rosbag2/rosbag2_reader.py @@ -0,0 +1,143 @@ +"""from https://github.com/tier4/ros2bag_extensions/blob/main/ros2bag_extensions/ros2bag_extensions/verb/__init__.py""" + +from typing import Any, Dict, List + +import builtin_interfaces.msg +from geometry_msgs.msg import TransformStamped +from rclpy.duration import Duration +from rclpy.serialization import deserialize_message +from rclpy.time import Time +from rosbag2_py import StorageFilter +from rosidl_runtime_py.utilities import get_message +import tf2_ros + +from perception_dataset.utils.rosbag2 import create_reader, get_topic_count, get_topic_type_dict + + +class Rosbag2Reader: + def __init__(self, bag_dir: str): + self._bag_dir: str = bag_dir + + self._topic_name_to_topic_type = get_topic_type_dict(self._bag_dir) + self._topic_name_to_topic_count = get_topic_count(self._bag_dir) + + # start time in seconds + self.start_timestamp = self._get_starting_time() + # set the duration long enough for handling merged bag files + self._tf_buffer = tf2_ros.BufferCore(Duration(seconds=1e9)) + self._set_tf_buffer() + + self.sensor_topic_to_frame_id: Dict[str, str] = {} + self.camera_info: Dict[str, str] = {} + self._set_camera_info() + + def _get_starting_time(self) -> float: + reader = create_reader(self._bag_dir) + while reader.has_next(): + topic_name, data, timestamp = reader.read_next() + topic_type = self._topic_name_to_topic_type[topic_name] + + # fail to deserialize Marker messages + # https://docs.ros.org/en/rolling/Releases/Release-Humble-Hawksbill.html#support-textures-and-embedded-meshes-for-marker-messages + if topic_type.startswith("visualization_msgs"): + continue + + try: + msg_type = get_message(topic_type) + except ModuleNotFoundError: + continue + except AttributeError: + print("Sourced message type is differ from the one in rosbag") + continue + + msg = deserialize_message(data, msg_type) + + # get timestamp from header.stamp + if hasattr(msg, "header"): + msg_stamp = msg.header.stamp + else: + continue + # this might happen for some topics like "/map/vector_map", "/diagnostics_agg", "" + if msg_stamp.sec == 0 and msg_stamp.nanosec == 0: + continue + msg_stamp = msg_stamp.sec + msg_stamp.nanosec / 1e9 + + return msg_stamp + + def _set_tf_buffer(self): + """set /tf and /tf_static to tf_buffer""" + for message in self.read_messages(topics=["/tf"]): + for transform in message.transforms: + self._tf_buffer.set_transform(transform, "default_authority") + + for message in self.read_messages(topics=["/tf_static"]): + for transform in message.transforms: + self._tf_buffer.set_transform_static(transform, "default_authority") + + def _set_camera_info(self): + """set /camera_info to self.camera_info""" + for topic_name, message in self.read_camera_info(): + self.camera_info[topic_name] = message + + def get_topic_count(self, topic_name: str) -> int: + return self._topic_name_to_topic_count.get(topic_name, 0) + + def read_camera_info(self) -> Any: + reader = create_reader(self._bag_dir) + while reader.has_next(): + topic_name, data, timestamp = reader.read_next() + topic_type = self._topic_name_to_topic_type[topic_name] + + if "sensor_msgs/msg/" in topic_type: + msg_type = get_message(topic_type) + msg = deserialize_message(data, msg_type) + if hasattr(msg, "header"): + msg_frame_id = msg.header.frame_id + self.sensor_topic_to_frame_id[topic_name] = msg_frame_id + else: + continue + + if topic_type != "sensor_msgs/msg/CameraInfo": + continue + + msg_type = get_message(topic_type) + msg = deserialize_message(data, msg_type) + yield topic_name, msg + + def read_messages( + self, topics: List[str], start_time: builtin_interfaces.msg.Time = None + ) -> Any: + if start_time is not None: + start_time = Time.from_msg(start_time) + + reader = create_reader(self._bag_dir) + if len(topics) != 0: + reader.set_filter(StorageFilter(topics=topics)) + + while reader.has_next(): + topic_name, data, timestamp = reader.read_next() + topic_type = self._topic_name_to_topic_type[topic_name] + + # fails to deserialize Marker messages + # https://docs.ros.org/en/rolling/Releases/Release-Humble-Hawksbill.html#support-textures-and-embedded-meshes-for-marker-messages + if topic_type.startswith("visualization_msgs"): + continue + + message = deserialize_message(data, get_message(topic_type)) + + if start_time is not None: + # FIXME(yukke42): if message is tf, message value is list + message_time = Time.from_msg(message.header.stamp) + + if message_time < start_time: + continue + + yield message + + def get_transform_stamped( + self, + target_frame: str, + source_frame: str, + stamp: builtin_interfaces.msg.Time, + ) -> TransformStamped: + return self._tf_buffer.lookup_transform_core(target_frame, source_frame, stamp) diff --git a/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py new file mode 100644 index 00000000..ce2dbbc6 --- /dev/null +++ b/perception_dataset/rosbag2/rosbag2_to_non_annotated_t4_converter.py @@ -0,0 +1,724 @@ +import copy +import glob +import json +import os +import os.path as osp +import shutil +import sys +from typing import Dict, List, Tuple +import warnings + +import builtin_interfaces.msg +import cv2 +import numpy as np +from sensor_msgs.msg import CompressedImage, PointCloud2 + +from perception_dataset.abstract_converter import AbstractConverter +from perception_dataset.constants import ( + EXTENSION_ENUM, + SENSOR_ENUM, + SENSOR_MODALITY_ENUM, + T4_FORMAT_DIRECTORY_NAME, +) +from perception_dataset.rosbag2.converter_params import Rosbag2ConverterParams +from perception_dataset.rosbag2.rosbag2_reader import Rosbag2Reader +from perception_dataset.t4_dataset.classes.abstract_class import AbstractTable +from perception_dataset.t4_dataset.classes.attribute import AttributeTable +from perception_dataset.t4_dataset.classes.calibrated_sensor import CalibratedSensorTable +from perception_dataset.t4_dataset.classes.category import CategoryTable +from perception_dataset.t4_dataset.classes.ego_pose import EgoPoseTable +from perception_dataset.t4_dataset.classes.instance import InstanceTable +from perception_dataset.t4_dataset.classes.log import LogTable +from perception_dataset.t4_dataset.classes.map import MapTable +from perception_dataset.t4_dataset.classes.sample import SampleRecord, SampleTable +from perception_dataset.t4_dataset.classes.sample_annotation import SampleAnnotationTable +from perception_dataset.t4_dataset.classes.sample_data import SampleDataRecord, SampleDataTable +from perception_dataset.t4_dataset.classes.scene import SceneRecord, SceneTable +from perception_dataset.t4_dataset.classes.sensor import SensorTable +from perception_dataset.t4_dataset.classes.visibility import VisibilityTable +from perception_dataset.utils.logger import configure_logger +import perception_dataset.utils.misc as misc_utils +import perception_dataset.utils.rosbag2 as rosbag2_utils + +logger = configure_logger(modname=__name__) + + +class Rosbag2ToNonAnnotatedT4Converter(AbstractConverter): + def __init__(self, params: Rosbag2ConverterParams) -> None: + super().__init__(params.input_base, params.output_base) + + self._params = params + self._overwrite_mode = params.overwrite_mode + + def _get_bag_dirs(self): + ret_bag_files: List[str] = [] + for bag_dir in glob.glob(osp.join(self._input_base, "*")): + if not osp.isdir(bag_dir): + continue + + db3_file = osp.join(bag_dir, "metadata.yaml") + if not osp.exists(db3_file): + logger.warning(f"{bag_dir} is directory, but metadata.yaml doesn't exist.") + continue + + ret_bag_files.append(bag_dir) + + return ret_bag_files + + def convert(self): + bag_dirs: List[str] = self._get_bag_dirs() + + if not self._overwrite_mode: + dir_exist: bool = False + for bag_dir in bag_dirs: + bag_name: str = osp.basename(bag_dir) + + output_dir = osp.join(self._output_base, bag_name) + if osp.exists(output_dir): + logger.error(f"{output_dir} already exists.") + dir_exist = True + + if dir_exist: + raise ValueError("If you want to overwrite files, use --overwrite option.") + + for bag_dir in bag_dirs: + self._params.input_bag_path = bag_dir + bag_converter = _Rosbag2ToNonAnnotatedT4Converter(self._params) + bag_converter.convert() + + +class _Rosbag2ToNonAnnotatedT4Converter: + def __init__(self, params: Rosbag2ConverterParams) -> None: + self._input_bag: str = params.input_bag_path + self._output_base: str = params.output_base + self._skip_timestamp: float = params.skip_timestamp + self._num_load_frames: int = params.num_load_frames + self._crop_frames_unit: int = params.crop_frames_unit + self._without_compress: bool = params.without_compress + self._camera_latency: float = params.camera_latency_sec + self._start_timestamp: float = params.start_timestamp_sec + self._ignore_no_ego_transform_at_rosbag_beginning: bool = ( + params.ignore_no_ego_transform_at_rosbag_beginning + ) + self._generate_frame_every: float = params.generate_frame_every + self._generate_frame_every_meter: float = params.generate_frame_every_meter + + # frame_id of coordinate transformation + self._ego_pose_target_frame: str = "map" + self._ego_pose_source_frame: str = "base_link" + self._calibrated_sensor_target_frame: str = "base_link" + + # Note: To determine if there is any message dropout, including a delay tolerance of 10Hz. + self._TIMESTAMP_DIFF = 0.15 + + self._lidar_sensor: Dict[str, str] = params.lidar_sensor + self._camera_sensors: List[Dict[str, str]] = params.camera_sensors + self._sensor_enums: List = [] + self._set_sensors() + self._camera_only_mode: bool = False + if self._lidar_sensor["topic"] == "": + self._camera_only_mode = True + + # init directories + self._bag_name = osp.basename(self._input_bag) + self._output_scene_dir = osp.join(self._output_base, self._bag_name) + self._output_anno_dir = osp.join( + self._output_scene_dir, T4_FORMAT_DIRECTORY_NAME.ANNOTATION.value + ) + self._output_data_dir = osp.join( + self._output_scene_dir, T4_FORMAT_DIRECTORY_NAME.DATA.value + ) + + shutil.rmtree(self._output_scene_dir, ignore_errors=True) + self._make_directories() + + self._bag_reader = Rosbag2Reader(self._input_bag) + self._calc_actual_num_load_frames() + + def _calc_actual_num_load_frames(self): + topic_names: List[str] = [s["topic"] for s in self._camera_sensors] + if not self._camera_only_mode: + topic_names.append(self._lidar_sensor["topic"]) + + num_frames_in_bag = min([self._bag_reader.get_topic_count(t) for t in topic_names]) + freq = 10 + num_frames_to_skip = int(self._skip_timestamp * freq) + max_num_frames = num_frames_in_bag - num_frames_to_skip + num_frames_to_crop = 0 + + if not (self._num_load_frames > 0 and self._num_load_frames <= max_num_frames): + self._num_load_frames = max_num_frames + logger.info( + f"max. possible number of frames will be loaded based on topic count" + f" since the value in config is not in (0, num_frames_in_bag - num_frames_to_skip = {max_num_frames}> range." + ) + + num_frames_to_crop = self._num_load_frames % self._crop_frames_unit + self._num_load_frames -= num_frames_to_crop + + logger.info( + f"frames in bag: {num_frames_in_bag}, actual number of frames to load: {self._num_load_frames}, " + f"skipped: {num_frames_to_skip}, cropped: {num_frames_to_crop})" + ) + + def _set_sensors(self): + sensors: List[Dict[str, str]] = [self._lidar_sensor] + self._camera_sensors + for sensor in sensors: + sensor_channel = sensor["channel"] + if SENSOR_ENUM.has_channel(sensor_channel): + self._sensor_enums.append(getattr(SENSOR_ENUM, sensor_channel)) + + def _make_directories(self): + os.makedirs(self._output_anno_dir, exist_ok=True) + os.makedirs(self._output_data_dir, exist_ok=True) + + for sensor_enum in self._sensor_enums: + os.makedirs( + osp.join(self._output_data_dir, sensor_enum.value["channel"]), exist_ok=True + ) + + def _init_tables(self): + # vehicle + self._log_table = LogTable() + self._map_table = MapTable() + self._sensor_table = SensorTable( + channel_to_modality={ + enum.value["channel"]: enum.value["modality"] for enum in self._sensor_enums + } + ) + self._calibrated_sensor_table = CalibratedSensorTable() + # extraction + self._scene_table = SceneTable() + self._sample_table = SampleTable() + self._sample_data_table = SampleDataTable() + self._ego_pose_table = EgoPoseTable() + # annotation (empty file) + self._instance_table = InstanceTable() + self._sample_annotation_table = SampleAnnotationTable() + # taxonomy (empty file) + self._category_table = CategoryTable(name_to_description={}, default_value="") + self._attribute_table = AttributeTable(name_to_description={}, default_value="") + self._visibility_table = VisibilityTable(level_to_description={}, default_value="") + + def convert(self): + self._convert() + self._save_tables() + self._save_config() + if not self._without_compress: + self._compress_directory() + + def _save_tables(self): + for cls_attr in self.__dict__.values(): + if isinstance(cls_attr, AbstractTable): + print(f"{cls_attr.FILENAME}: #rows {len(cls_attr)}") + cls_attr.save_json(self._output_anno_dir) + + def _save_config(self): + config_data = { + key: getattr(self, key) + for key in filter( + lambda o: not o.startswith("__") + and "sensor_enum" not in o + and not o.endswith("_table") + and not o.endswith("_dir") + and not o.endswith("_base") + and o != "_input_bag" + and o != "_bag_reader", + self.__dict__, + ) + } + config_data = {"rosbag2_to_non_annotated_t4_converter": config_data} + with open(osp.join(self._output_scene_dir, "status.json"), "w") as f: + json.dump(config_data, f, indent=4, default=lambda o: getattr(o, "__dict__", str(o))) + + def _compress_directory(self): + shutil.make_archive( + self._output_scene_dir, + "zip", + root_dir=os.path.dirname(self._output_scene_dir), + base_dir=self._bag_name, + ) + shutil.make_archive(self._input_bag, "zip", root_dir=self._input_bag) + + def _convert(self) -> Tuple[str, Dict[str, str]]: + """ + 1. init tables + - log: dummy file + - map: dummy file + - sensor: fixed sensor configuration + - calibrated_sensor: /tf_static + - scene: dummy file, name = rosbag filename + - sample: lidar + - sample_data: lidar + camera x 6 + - ego_pose: /tf + - others: empty data file + 2. read pointcloud + - add to sample, sample_data, and ego_pose + - is_key_frame=True + - fill in next/prev + 3. Load the images sequentially according to their mounting positions. + - figure out sample_data and ego_pose + - is_key_frame=True + - fill in next/prev + """ + if self._start_timestamp < sys.float_info.epsilon: + start_timestamp = self._bag_reader.start_timestamp + self._skip_timestamp + else: + start_timestamp = self._start_timestamp + logger.info(f"set start_timestamp to {start_timestamp}") + + sensor_channel_to_sample_data_token_list: Dict[str, List[str]] = {} + + self._init_tables() + scene_token = self._convert_static_data() + + if not self._camera_only_mode: + lidar_sensor_channel = self._lidar_sensor["channel"] + sensor_channel_to_sample_data_token_list[ + lidar_sensor_channel + ] = self._convert_pointcloud( + start_timestamp=start_timestamp, + sensor_channel=lidar_sensor_channel, + topic=self._lidar_sensor["topic"], + scene_token=scene_token, + ) + + # Note: Align the loading order of the cameras with the shutter sequence. + # Note: The timing of lidar scan initiation and the first camera data acquisition is the same, but the camera has a delay due to data transfer and edge processing on the timestamp. + first_sample_data_record: SampleDataRecord = self._sample_data_table.to_records()[0] + + if self._camera_only_mode: + # temporaly use start_timestamp instead of recorded timestamp for non synced data + camera_start_timestamp = start_timestamp + else: + camera_start_timestamp = ( + misc_utils.nusc_timestamp_to_unix_timestamp(first_sample_data_record.timestamp) + + self._camera_latency + ) + + for camera_sensor in self._camera_sensors: + sensor_channel = camera_sensor["channel"] + sensor_channel_to_sample_data_token_list[sensor_channel] = self._convert_image( + start_timestamp=camera_start_timestamp, + sensor_channel=camera_sensor["channel"], + topic=camera_sensor["topic"], + scene_token=scene_token, + ) + + first_sample_data_token: str = sensor_channel_to_sample_data_token_list[ + sensor_channel + ][0] + first_sample_data_record: SampleDataRecord = ( + self._sample_data_table.select_record_from_token(first_sample_data_token) + ) + camera_start_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp( + first_sample_data_record.timestamp + ) + + self._set_scene_data() + self._connect_sample_in_scene() + self._connect_sample_data_in_scene(sensor_channel_to_sample_data_token_list) + + def _convert_static_data(self): + # Log, Map + log_token = self._log_table.insert_into_table( + logfile="", + vehicle="", + data_captured="", + location="", + ) + self._map_table.insert_into_table(log_tokens=[log_token], category="", filename="") + + # Scene + scene_token = self._scene_table.insert_into_table( + name=self._bag_name, + description="", + log_token=log_token, + ) + + return scene_token + + def _convert_pointcloud( + self, + start_timestamp: float, + sensor_channel: str, + topic: str, + scene_token: str, + ) -> List[str]: + sample_data_token_list: List[str] = [] + + prev_frame_unix_timestamp: float = 0.0 + frame_index: int = 0 + + start_time_in_time = rosbag2_utils.unix_timestamp_to_stamp(start_timestamp) + calibrated_sensor_token = self._generate_calibrated_sensor( + sensor_channel, start_time_in_time, topic + ) + for pointcloud_msg in self._bag_reader.read_messages( + topics=[topic], + start_time=start_time_in_time, + ): + pointcloud_msg: PointCloud2 + + try: + ego_pose_token = self._generate_ego_pose(pointcloud_msg.header.stamp) + except Exception as e: + if self._ignore_no_ego_transform_at_rosbag_beginning: + warnings.warn( + f"Skipping frame with header stamp: {pointcloud_msg.header.stamp}" + ) + continue + else: + raise e + + if frame_index >= self._num_load_frames: + break + + unix_timestamp = rosbag2_utils.stamp_to_unix_timestamp(pointcloud_msg.header.stamp) + if frame_index > 0: + # Note: Message drops are not tolerated. + print( + f"frame_index:{frame_index}, unix_timestamp - prev_frame_unix_timestamp: {unix_timestamp - prev_frame_unix_timestamp}" + ) + if ( + unix_timestamp - prev_frame_unix_timestamp + ) > self._TIMESTAMP_DIFF or unix_timestamp < prev_frame_unix_timestamp: + raise ValueError( + f"PointCloud message is dropped [{frame_index}]: cur={unix_timestamp} prev={prev_frame_unix_timestamp}" + ) + + nusc_timestamp = rosbag2_utils.stamp_to_nusc_timestamp(pointcloud_msg.header.stamp) + sample_token = self._sample_table.insert_into_table( + timestamp=nusc_timestamp, scene_token=scene_token + ) + + fileformat = EXTENSION_ENUM.PCDBIN.value[1:] + filename = misc_utils.get_sample_data_filename(sensor_channel, frame_index, fileformat) + sample_data_token = self._sample_data_table.insert_into_table( + sample_token=sample_token, + ego_pose_token=ego_pose_token, + calibrated_sensor_token=calibrated_sensor_token, + filename=filename, + fileformat=fileformat, + timestamp=nusc_timestamp, + is_key_frame=True, + ) + sample_data_record: SampleDataRecord = ( + self._sample_data_table.select_record_from_token(sample_data_token) + ) + + # TODO(yukke42): Save data in the PCD file format, which allows flexible field configuration. + points_arr = rosbag2_utils.pointcloud_msg_to_numpy(pointcloud_msg) + points_arr.tofile(osp.join(self._output_scene_dir, sample_data_record.filename)) + + sample_data_token_list.append(sample_data_token) + prev_frame_unix_timestamp = unix_timestamp + frame_index += 1 + + return sample_data_token_list + + def _convert_image( + self, + start_timestamp: float, + sensor_channel: str, + topic: str, + scene_token: str, + ): + """convert image topic to raw image data""" + sample_data_token_list: List[str] = [] + sample_records: List[SampleRecord] = self._sample_table.to_records() + + if not self._camera_only_mode: + prev_frame_unix_timestamp: float = ( + misc_utils.nusc_timestamp_to_unix_timestamp(sample_records[0].timestamp) + + self._camera_latency + ) + else: + prev_frame_unix_timestamp: float = start_timestamp + frame_index: int = 0 + generated_frame_index: int = 0 + + start_time_in_time = rosbag2_utils.unix_timestamp_to_stamp(start_timestamp) + calibrated_sensor_token = self._generate_calibrated_sensor( + sensor_channel, start_time_in_time, topic + ) + for image_msg in self._bag_reader.read_messages( + topics=[topic], + start_time=start_time_in_time, + ): + image_msg: CompressedImage + if generated_frame_index >= self._num_load_frames: + break + + image_unix_timestamp = rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp) + + if not self._camera_only_mode: + sample_record: SampleRecord = sample_records[frame_index] + sample_token: str = sample_record.token + lidar_unix_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp( + sample_record.timestamp + ) + while (image_unix_timestamp - prev_frame_unix_timestamp) > self._TIMESTAMP_DIFF: + dummy_image_timestamp = self._insert_dummy_image_frame( + image_msg, + sensor_channel, + generated_frame_index, + image_unix_timestamp, + prev_frame_unix_timestamp, + calibrated_sensor_token, + sample_token, + ) + frame_index += 1 + generated_frame_index += 1 + prev_frame_unix_timestamp = dummy_image_timestamp + if generated_frame_index >= self._num_load_frames: + return sample_data_token_list + + sample_record: SampleRecord = sample_records[generated_frame_index] + sample_token: str = sample_record.token + lidar_unix_timestamp = misc_utils.nusc_timestamp_to_unix_timestamp( + sample_record.timestamp + ) + + if (image_unix_timestamp - lidar_unix_timestamp) > ( + self._camera_latency + self._TIMESTAMP_DIFF + ): + raise ValueError( + f"{topic} message may be dropped at [{generated_frame_index}]: lidar_timestamp={lidar_unix_timestamp} image_timestamp={image_unix_timestamp}" + ) + + print( + f"frame{generated_frame_index}, stamp = {image_unix_timestamp}, diff cam - lidar = {image_unix_timestamp - lidar_unix_timestamp:0.3f} sec" + ) + else: + if (frame_index % self._generate_frame_every) == 0: + nusc_timestamp = rosbag2_utils.stamp_to_nusc_timestamp(image_msg.header.stamp) + sample_token: str = self._sample_table.insert_into_table( + timestamp=nusc_timestamp, scene_token=scene_token + ) + + if (frame_index % self._generate_frame_every) == 0: + sample_data_token = self._generate_image_data( + image_msg, + sample_token, + calibrated_sensor_token, + sensor_channel, + generated_frame_index, + ) + sample_data_token_list.append(sample_data_token) + generated_frame_index += 1 + prev_frame_unix_timestamp = image_unix_timestamp + frame_index += 1 + + assert len(sample_data_token_list) > 0 + + return sample_data_token_list + + def _insert_dummy_image_frame( + self, + image_msg, + sensor_channel, + generated_frame_index, + image_unix_timestamp, + prev_frame_unix_timestamp, + calibrated_sensor_token, + sample_token, + ) -> float: + print( + f"Image message is dropped [{sensor_channel}-No.{generated_frame_index}]: cur={image_unix_timestamp} prev={prev_frame_unix_timestamp}" + ) + dummy_image_msg = copy.deepcopy(image_msg) + dummy_image_timestamp = image_unix_timestamp + while (dummy_image_timestamp - prev_frame_unix_timestamp) > self._TIMESTAMP_DIFF: + dummy_image_timestamp -= 0.1 + + dummy_image_msg.header.stamp = rosbag2_utils.unix_timestamp_to_stamp(dummy_image_timestamp) + self._generate_image_data( + dummy_image_msg, + sample_token, + calibrated_sensor_token, + sensor_channel, + generated_frame_index, + output_blank_image=True, + is_key_frame=False, + ) + print( + f"Blank image is generated since the massage may be dropped [{generated_frame_index}]: cur={image_unix_timestamp} prev={prev_frame_unix_timestamp}" + ) + return dummy_image_timestamp + + def _generate_image_data( + self, + image_msg: CompressedImage, + sample_token: str, + calibrated_sensor_token: str, + sensor_channel: str, + frame_index: int, + output_blank_image: bool = False, + is_key_frame: bool = True, + ): + ego_pose_token = self._generate_ego_pose(image_msg.header.stamp) + image_arr = rosbag2_utils.compressed_msg_to_numpy(image_msg) + if output_blank_image: + image_arr = np.zeros(shape=image_arr.shape, dtype=np.uint8) + image_unix_timestamp = rosbag2_utils.stamp_to_unix_timestamp(image_msg.header.stamp) + + fileformat = EXTENSION_ENUM.JPG.value[1:] # Note: png for all images + filename = misc_utils.get_sample_data_filename(sensor_channel, frame_index, fileformat) + sample_data_token = self._sample_data_table.insert_into_table( + sample_token=sample_token, + ego_pose_token=ego_pose_token, + calibrated_sensor_token=calibrated_sensor_token, + filename=filename, + fileformat=fileformat, + timestamp=misc_utils.unix_timestamp_to_nusc_timestamp(image_unix_timestamp), + is_key_frame=is_key_frame, + height=image_arr.shape[0], + width=image_arr.shape[1], + is_valid=is_key_frame and (not output_blank_image), + ) + + sample_data_record: SampleDataRecord = self._sample_data_table.select_record_from_token( + sample_data_token + ) + cv2.imwrite(osp.join(self._output_scene_dir, sample_data_record.filename), image_arr) + + return sample_data_token + + def _generate_ego_pose(self, stamp: builtin_interfaces.msg.Time) -> str: + transform_stamped = self._bag_reader.get_transform_stamped( + target_frame=self._ego_pose_target_frame, + source_frame=self._ego_pose_source_frame, + stamp=stamp, + ) + + ego_pose_token = self._ego_pose_table.insert_into_table( + translation={ + "x": transform_stamped.transform.translation.x, + "y": transform_stamped.transform.translation.y, + "z": transform_stamped.transform.translation.z, + }, + rotation={ + "w": transform_stamped.transform.rotation.w, + "x": transform_stamped.transform.rotation.x, + "y": transform_stamped.transform.rotation.y, + "z": transform_stamped.transform.rotation.z, + }, + timestamp=rosbag2_utils.stamp_to_nusc_timestamp(transform_stamped.header.stamp), + ) + + return ego_pose_token + + def _generate_calibrated_sensor( + self, sensor_channel: str, start_timestamp: builtin_interfaces.msg.Time, topic_name="" + ) -> str: + calibrated_sensor_token = str() + for sensor_enum in self._sensor_enums: + channel = sensor_enum.value["channel"] + modality = sensor_enum.value["modality"] + + if channel != sensor_channel: + continue + + sensor_token = self._sensor_table.insert_into_table( + channel=channel, + modality=modality, + ) + + translation = {"x": 0.0, "y": 0.0, "z": 0.0} + rotation = {"w": 1.0, "x": 0.0, "y": 0.0, "z": 0.0} + frame_id = self._bag_reader.sensor_topic_to_frame_id.get(topic_name) + print( + f"generate_calib_sensor, start_timestamp:{start_timestamp}, topic name:{topic_name}, frame id:{frame_id}" + ) + if frame_id is not None: + transform_stamped = self._bag_reader.get_transform_stamped( + target_frame=self._calibrated_sensor_target_frame, + source_frame=frame_id, + stamp=start_timestamp, + ) + translation = { + "x": transform_stamped.transform.translation.x, + "y": transform_stamped.transform.translation.y, + "z": transform_stamped.transform.translation.z, + } + rotation = { + "w": transform_stamped.transform.rotation.w, + "x": transform_stamped.transform.rotation.x, + "y": transform_stamped.transform.rotation.y, + "z": transform_stamped.transform.rotation.z, + } + + if modality == SENSOR_MODALITY_ENUM.LIDAR.value: + calibrated_sensor_token = self._calibrated_sensor_table.insert_into_table( + sensor_token=sensor_token, + translation=translation, + rotation=rotation, + camera_intrinsic=[], + camera_distortion=[], + ) + elif modality == SENSOR_MODALITY_ENUM.CAMERA.value: + cam_info_topic = topic_name.rsplit("/", 2)[0] + "/camera_info" + info = self._bag_reader.camera_info.get(cam_info_topic) + if info is None: + continue + camera_intrinsic = np.delete(info.p.reshape(3, 4), 3, 1).tolist() + camera_distortion = info.d.tolist() + + calibrated_sensor_token = self._calibrated_sensor_table.insert_into_table( + sensor_token=sensor_token, + translation=translation, + rotation=rotation, + camera_intrinsic=camera_intrinsic, + camera_distortion=camera_distortion, + ) + else: + raise ValueError(f"Unexpected sensor modality: {modality}") + + return calibrated_sensor_token + + def _set_scene_data(self): + scene_records: List[SceneRecord] = self._scene_table.to_records() + assert len(scene_records) == 1, "#scene_records must be 1." + + sample_token_list: List[str] = [rec.token for rec in self._sample_table.to_records()] + scene_record: SceneRecord = scene_records[0] + + scene_record.nbr_samples = len(sample_token_list) + scene_record.first_sample_token = sample_token_list[0] + scene_record.last_sample_token = sample_token_list[-1] + + def _connect_sample_in_scene(self): + """add prev/next of Sample""" + sample_token_list: List[str] = [rec.token for rec in self._sample_table.to_records()] + + for token_i in range(1, len(sample_token_list)): + prev_token: str = sample_token_list[token_i - 1] + cur_token: str = sample_token_list[token_i] + + prev_rec: SampleRecord = self._sample_table.select_record_from_token(prev_token) + prev_rec.next = cur_token + self._sample_table.set_record_to_table(prev_rec) + + cur_rec: SampleRecord = self._sample_table.select_record_from_token(cur_token) + cur_rec.prev = prev_token + self._sample_table.set_record_to_table(cur_rec) + + def _connect_sample_data_in_scene( + self, sensor_channel_to_sample_data_token_list: Dict[str, List[str]] + ): + """add prev/next of SampleData""" + for sample_data_token_list in sensor_channel_to_sample_data_token_list.values(): + for token_i in range(1, len(sample_data_token_list)): + prev_token: str = sample_data_token_list[token_i - 1] + cur_token: str = sample_data_token_list[token_i] + + prev_rec: SampleRecord = self._sample_data_table.select_record_from_token( + prev_token + ) + prev_rec.next = cur_token + self._sample_data_table.set_record_to_table(prev_rec) + + cur_rec: SampleRecord = self._sample_data_table.select_record_from_token(cur_token) + cur_rec.prev = prev_token + self._sample_data_table.set_record_to_table(cur_rec) diff --git a/perception_dataset/rosbag2/rosbag2_to_t4_converter.py b/perception_dataset/rosbag2/rosbag2_to_t4_converter.py new file mode 100644 index 00000000..5fb93606 --- /dev/null +++ b/perception_dataset/rosbag2/rosbag2_to_t4_converter.py @@ -0,0 +1,208 @@ +import copy +import glob +from multiprocessing import Pool +import os +import sys +from typing import Any, Dict, List + +from perception_dataset.abstract_converter_to_t4 import AbstractAnnotatedToT4Converter +from perception_dataset.ros2.tf2_geometry_msgs.tf2_geometry_msgs import do_transform_pose +from perception_dataset.rosbag2.converter_params import Rosbag2ConverterParams +from perception_dataset.rosbag2.rosbag2_to_non_annotated_t4_converter import ( + _Rosbag2ToNonAnnotatedT4Converter, +) +from perception_dataset.t4_dataset.annotation_files_generator import AnnotationFilesGenerator +from perception_dataset.t4_dataset.classes.calibrated_sensor import CalibratedSensorTable +from perception_dataset.t4_dataset.classes.ego_pose import EgoPoseTable +from perception_dataset.t4_dataset.classes.log import LogTable +from perception_dataset.t4_dataset.classes.map import MapTable +from perception_dataset.t4_dataset.classes.sample import SampleRecord, SampleTable +from perception_dataset.t4_dataset.classes.sample_data import SampleDataTable +from perception_dataset.t4_dataset.classes.scene import SceneTable +from perception_dataset.t4_dataset.classes.sensor import SensorTable +from perception_dataset.utils.calculate_num_points import calculate_num_points +from perception_dataset.utils.logger import configure_logger +import perception_dataset.utils.rosbag2 as rosbag2_utils + +from .autoware_msgs import parse_dynamic_object_array, parse_perception_objects + +logger = configure_logger(modname=__name__) + + +class Rosbag2ToT4Converter(AbstractAnnotatedToT4Converter): + def __init__(self, params: Rosbag2ConverterParams) -> None: + super().__init__(params.input_base, params.output_base) + + self._params = params + self._overwrite_mode = params.overwrite_mode + + def convert(self): + bag_dirs: List[str] = self._search_bag_dirs() + + if not self._overwrite_mode: + # check if already exists + exist_dir = False + for bag_dir in bag_dirs: + path_to_output = os.path.join(self._output_base, os.path.basename(bag_dir)) + if os.path.exists(path_to_output): + logger.error(f"{path_to_output} already exists.") + exist_dir = True + break + if exist_dir: + raise ValueError("Use --overwrite option to overwrite files.") + else: + logger.info("All files does not exist. Will be created") + + # parallel rosbag conversion + with Pool(processes=self._params.workers_number) as pool: + pool.map(self._convert_bag, bag_dirs) + + def _convert_bag(self, bag_dir: str): + try: + params = copy.deepcopy(self._params) + params.input_bag_path = bag_dir + converter = _Rosbag2ToT4Converter(params) + converter.convert() + except Exception: + logger.exception(f"{bag_dir} failed with exception") + raise + + def _search_bag_dirs(self): + ret_bag_files: List[str] = [] + logger.info(f"Searching bag files in {self._input_base}") + for bag_dir in glob.glob(os.path.join(self._input_base, "*")): + if not os.path.isdir(bag_dir): + continue + logger.info(f"Found bag dir: {bag_dir}") + + meta_file = os.path.join(bag_dir, "metadata.yaml") + if not os.path.exists(meta_file): + logger.warning(f"{bag_dir} is directory, but metadata.yaml doesn't exist.") + continue + + ret_bag_files.append(bag_dir) + + return ret_bag_files + + +class _Rosbag2ToT4Converter(_Rosbag2ToNonAnnotatedT4Converter): + def __init__(self, params: Rosbag2ConverterParams) -> None: + super().__init__(params) + + self._object_topic_name: str = params.object_topic_name + + # frame_id of coordinate transformation + self._object_msg_type: str = params.object_msg_type + self._ego_pose_target_frame: str = params.world_frame_id + self._ego_pose_source_frame: str = "base_link" + self._calibrated_sensor_target_frame: str = "base_link" + + self._annotation_files_generator = AnnotationFilesGenerator(with_camera=params.with_camera) + + def _init_tables(self): + # vehicle + self._log_table = LogTable() + self._map_table = MapTable() + self._sensor_table = SensorTable( + channel_to_modality={ + enum.value["channel"]: enum.value["modality"] for enum in self._sensor_enums + } + ) + self._calibrated_sensor_table = CalibratedSensorTable() + # extraction + self._scene_table = SceneTable() + self._sample_table = SampleTable() + self._sample_data_table = SampleDataTable() + self._ego_pose_table = EgoPoseTable() + + def convert(self): + if self._start_timestamp < sys.float_info.epsilon: + start_timestamp = self._bag_reader.start_timestamp + else: + start_timestamp = self._start_timestamp + start_timestamp = start_timestamp + self._skip_timestamp + + assert ( + self._bag_reader.get_topic_count(self._object_topic_name) > 0 + ), f"No object topic name: {self._object_topic_name}" + + self._save_config() + self._convert() + self._convert_objects(start_timestamp) + self._save_tables() + self._annotation_files_generator.save_tables(self._output_anno_dir) + # Calculate and overwrite num_lidar_prs in annotations + self._calculate_num_points() + + def _calculate_num_points(self): + logger.info("Calculating number of points...") + annotation_table = self._annotation_files_generator._sample_annotation_table + calculate_num_points( + self._output_scene_dir, + lidar_sensor_channel=self._lidar_sensor["channel"], + annotation_table=annotation_table, + ) + annotation_table.save_json(self._output_anno_dir) + + def _convert_objects(self, start_timestamp: float): + """read object bbox ground truth from rosbag""" + start_time_in_time = rosbag2_utils.unix_timestamp_to_stamp(start_timestamp) + scene_timestamp_objects_pair_list: List[Dict[str, Any]] = [] + for message in self._bag_reader.read_messages( + topics=[self._object_topic_name], + start_time=start_time_in_time, + ): + if self._object_msg_type == "DynamicObjectArray": + scene_annotation_list = parse_dynamic_object_array(message) + elif self._object_msg_type in ("DetectedObjects", "TrackedObjects"): + if message.header.frame_id != self._ego_pose_target_frame: + transform_stamped = self._bag_reader.get_transform_stamped( + target_frame=self._ego_pose_target_frame, + source_frame=message.header.frame_id, + stamp=message.header.stamp, + ) + for obj in message.objects: + obj.kinematics.pose_with_covariance.pose = do_transform_pose( + obj.kinematics.pose_with_covariance.pose, transform_stamped + ) + + scene_annotation_list = parse_perception_objects(message) + else: + raise ValueError(f"Invalid Object message type: {self._object_msg_type}") + + timestamp = rosbag2_utils.stamp_to_nusc_timestamp(message.header.stamp) + scene_timestamp_objects_pair = { + "timestamp": timestamp, + "scene_annotation_list": scene_annotation_list, + } + scene_timestamp_objects_pair_list.append(scene_timestamp_objects_pair) + + assert len(scene_timestamp_objects_pair_list) > 0, "There are NO objects." + + # use the objects closest to the timestamp of the lidar + scene_anno_dict: Dict[int, List[Dict[str, Any]]] = {} + frame_index_to_sample_token: Dict[int, str] = {} + for idx, sample in enumerate(self._sample_table.to_records()): + sample: SampleRecord + object_dict = self._get_closest_timestamp( + scene_timestamp_objects_pair_list, + sample.timestamp, + ) + scene_anno_dict[idx] = object_dict["scene_annotation_list"] + frame_index_to_sample_token[idx] = sample.token + + self._annotation_files_generator.convert_annotations( + scene_anno_dict=scene_anno_dict, + frame_index_to_sample_token=frame_index_to_sample_token, + dataset_name="synthetic", + mask=None, + frame_index_to_sample_data_token=None, + ) + + def _get_closest_timestamp(self, objects_list: List, timestamp: float): + """Get the closest element to 'timestamp' from the input list.""" + res = min( + objects_list, + key=lambda objects_list_item: abs(objects_list_item["timestamp"] - timestamp), + ) + return res diff --git a/perception_dataset/t4_dataset/__init__.py b/perception_dataset/t4_dataset/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/t4_dataset/annotation_files_generator.py b/perception_dataset/t4_dataset/annotation_files_generator.py new file mode 100644 index 00000000..b1f75caa --- /dev/null +++ b/perception_dataset/t4_dataset/annotation_files_generator.py @@ -0,0 +1,305 @@ +import base64 +from collections import defaultdict +import os.path as osp +from typing import Any, Dict, List, Optional + +from nptyping import NDArray +from nuimages import NuImages +import numpy as np +from nuscenes.nuscenes import NuScenes +from pycocotools import mask as cocomask + +from perception_dataset.constants import SENSOR_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractTable +from perception_dataset.t4_dataset.classes.attribute import AttributeTable +from perception_dataset.t4_dataset.classes.category import CategoryTable +from perception_dataset.t4_dataset.classes.instance import InstanceRecord, InstanceTable +from perception_dataset.t4_dataset.classes.object_ann import ObjectAnnTable +from perception_dataset.t4_dataset.classes.sample_annotation import ( + SampleAnnotationRecord, + SampleAnnotationTable, +) +from perception_dataset.t4_dataset.classes.surface_ann import SurfaceAnnTable +from perception_dataset.t4_dataset.classes.visibility import VisibilityTable +from perception_dataset.utils.calculate_num_points import calculate_num_points + + +class AnnotationFilesGenerator: + def __init__(self, with_camera: bool = True, description: Dict[str, Dict[str, str]] = {}): + # TODO(yukke42): remove the hard coded attribute description + self._attribute_table = AttributeTable( + name_to_description={}, + default_value="", + ) + # TODO(yukke42): remove the hard coded category description + self._category_table = CategoryTable( + name_to_description={}, + default_value="", + ) + self._instance_table = InstanceTable() + self._visibility_table = VisibilityTable( + level_to_description=description.get( + "visibility", + { + "v0-40": "visibility of whole object is between 0 and 40%", + "v40-60": "visibility of whole object is between 40 and 60%", + "v60-80": "visibility of whole object is between 60 and 80%", + "v80-100": "visibility of whole object is between 80 and 100%", + "none": "visibility isn't available", + }, + ), + default_value="", + ) + self._sample_annotation_table = SampleAnnotationTable() + self._object_ann_table = ObjectAnnTable() + self._surface_ann_table = SurfaceAnnTable() + + self._instance_token_to_annotation_token_list: Dict[str, List[str]] = defaultdict(list) + + if with_camera: + self._camera2idx = description.get("camera_index") + + def save_tables(self, anno_dir: str): + for cls_attr in self.__dict__.values(): + if isinstance(cls_attr, AbstractTable): + print(f"{cls_attr.FILENAME}: #rows {len(cls_attr)}") + cls_attr.save_json(anno_dir) + + def convert_one_scene( + self, + input_dir: str, + output_dir: str, + scene_anno_dict: Dict[int, List[Dict[str, Any]]], + dataset_name: str, + ): + anno_dir = osp.join(output_dir, "annotation") + if not osp.exists(anno_dir): + raise ValueError(f"Annotations files doesn't exist in {anno_dir}") + + nusc = NuScenes(version="annotation", dataroot=input_dir, verbose=False) + frame_index_to_sample_token: Dict[int, str] = {} + for frame_index, sample in enumerate(nusc.sample): + frame_index_to_sample_token[frame_index] = sample["token"] + try: + if "LIDAR_TOP" in sample["data"]: + lidar_sensor_channel = SENSOR_ENUM.LIDAR_TOP.value["channel"] + else: + lidar_sensor_channel = SENSOR_ENUM.LIDAR_CONCAT.value["channel"] + except KeyError as e: + print(e) + + nuim = NuImages(version="annotation", dataroot=input_dir, verbose=False) + frame_index_to_sample_data_token: List[Dict[int, str]] = [{} for x in range(6)] + mask: List[Dict[int, str]] = [{} for x in range(6)] + + has_2d_annotation: bool = False + for frame_index in sorted(scene_anno_dict.keys()): + anno_list: List[Dict[str, Any]] = scene_anno_dict[frame_index] + for anno in anno_list: + if "two_d_box" in anno.keys(): + has_2d_annotation = True + + if has_2d_annotation: + for frame_index_nuim, sample_nuim in enumerate(nuim.sample_data): + if ( + sample_nuim["fileformat"] == "png" or sample_nuim["fileformat"] == "jpg" + ) and sample_nuim["is_key_frame"]: + cam = sample_nuim["filename"].split("/")[1] + cam_idx = self._camera2idx[cam] + + frame_index = int((sample_nuim["filename"].split("/")[2]).split(".")[0]) + frame_index_to_sample_data_token[cam_idx].update( + {frame_index: sample_nuim["token"]} + ) + + width: int = sample_nuim["width"] + height: int = sample_nuim["height"] + object_mask: NDArray = np.array( + [[0 for _ in range(height)] for __ in range(width)], dtype=np.uint8 + ) + object_mask = cocomask.encode(np.asfortranarray(object_mask)) + object_mask["counts"] = repr(base64.b64encode(object_mask["counts"]))[2:] + mask[cam_idx].update({frame_index: object_mask}) + + self.convert_annotations( + scene_anno_dict=scene_anno_dict, + frame_index_to_sample_token=frame_index_to_sample_token, + dataset_name=dataset_name, + frame_index_to_sample_data_token=frame_index_to_sample_data_token, + mask=mask, + ) + + self._attribute_table.save_json(anno_dir) + self._category_table.save_json(anno_dir) + self._instance_table.save_json(anno_dir) + self._sample_annotation_table.save_json(anno_dir) + self._visibility_table.save_json(anno_dir) + self._object_ann_table.save_json(anno_dir) + self._surface_ann_table.save_json(anno_dir) + # Calculate and overwrite number of points in lidar cuboid bounding box in annotations + calculate_num_points(output_dir, lidar_sensor_channel, self._sample_annotation_table) + self._sample_annotation_table.save_json(anno_dir) + + def convert_annotations( + self, + scene_anno_dict: Dict[int, List[Dict[str, Any]]], + frame_index_to_sample_token: Dict[int, str], + dataset_name: str, + frame_index_to_sample_data_token: Optional[List[Dict[int, str]]] = None, + mask: Optional[List[Dict[int, str]]] = None, + ): + self._convert_to_t4_format( + scene_anno_dict=scene_anno_dict, + frame_index_to_sample_token=frame_index_to_sample_token, + dataset_name=dataset_name, + frame_index_to_sample_data_token=frame_index_to_sample_data_token, + mask=mask, + ) + self._connect_annotations_in_scene() + + def _convert_to_t4_format( + self, + scene_anno_dict: Dict[int, List[Dict[str, Any]]], + frame_index_to_sample_token: Dict[int, str], + dataset_name: str, + frame_index_to_sample_data_token: List[Dict[int, str]], + mask: List[Dict[int, str]], + ): + """Convert the annotations to the NuScenes format. + + Args: + scene_anno_dict (Dict[int, List[Dict[str, Any]]]): [description] + frame_index_to_sample_token (Dict[int, str]): [description] + frame_index_to_sample_data_token (Dict[int, str]): + + scene_anno_dict: + { + 0: [ + { + "category_name" (str): category name of object, + "instance_id" (str): instance id of object, + "attribute_names" (List[str]): list of object attributes, + "three_d_box": { + "translation": { + "x" (float): x of object location, + "y" (float): y of object location, + "z" (float): z of object location, + }, + "size": { + "width" (float): width of object size, + "length" (float): length of object size, + "height" (float): height of object size, + }, + "rotation": { + "w" (float): w of object quaternion, + "x" (float): x of object quaternion, + "y" (float): y of object quaternion. + "z" (float): z of object quaternion, + }, + }, + "two_d_box": [ + "x" (float): x of left top corner, + "y" (float): y of left top corner, + "w" (float): width of bbox, + "h" (float): height of bbox, + ] + "sensor_id": id of the camera + "num_lidar_pts" (int): the number of lidar points in object, + "num_radar_pts" (int): the number of radar points in object, + }, + ... + ], + 1: []. ... + } + + """ + for frame_index in sorted(scene_anno_dict.keys()): + anno_list: List[Dict[str, Any]] = scene_anno_dict[frame_index] + for anno in anno_list: + # Category + category_token: str = self._category_table.get_token_from_name( + name=anno["category_name"] + ) + + # Instance + instance_token: str = self._instance_table.get_token_from_id( + instance_id=anno["instance_id"], + category_token=category_token, + dataset_name=dataset_name, + ) + + # Attribute + attribute_tokens: List[str] = [ + self._attribute_table.get_token_from_name(name=attr_name) + for attr_name in anno["attribute_names"] + ] + + # Visibility + visibility_token: str = self._visibility_table.get_token_from_level( + level=anno.get("visibility_name", "none") + ) + + # Sample Annotation + if "three_d_box" in anno.keys(): + anno_three_d_bbox: Dict[str, float] = anno["three_d_box"] + sample_annotation_token: str = self._sample_annotation_table.insert_into_table( + sample_token=frame_index_to_sample_token[frame_index], + instance_token=instance_token, + attribute_tokens=attribute_tokens, + visibility_token=visibility_token, + translation=anno_three_d_bbox["translation"], + size=anno_three_d_bbox["size"], + rotation=anno_three_d_bbox["rotation"], + num_lidar_pts=anno["num_lidar_pts"], + num_radar_pts=anno["num_radar_pts"], + ) + self._instance_token_to_annotation_token_list[instance_token].append( + sample_annotation_token + ) + + # Object Annotation + if "two_d_box" in anno.keys(): + anno_two_d_box: List[float] = anno["two_d_box"] + sensor_id: int = int(anno["sensor_id"]) + self._object_ann_table.insert_into_table( + sample_data_token=frame_index_to_sample_data_token[sensor_id][frame_index], + instance_token=instance_token, + category_token=category_token, + attribute_tokens=attribute_tokens, + bbox=anno_two_d_box, + mask=mask[sensor_id][frame_index], + ) + + def _connect_annotations_in_scene(self): + """Annotation for Instance and SampleAnnotation. This function adds the relationship between annotations.""" + for ( + instance_token, + annotation_token_list, + ) in self._instance_token_to_annotation_token_list.items(): + # set info in instance + inst_rec: InstanceRecord = self._instance_table.select_record_from_token( + instance_token + ) + inst_rec.set_annotation_info( + nbr_annotations=len(annotation_token_list), + first_annotation_token=annotation_token_list[0], + last_annotation_token=annotation_token_list[-1], + ) + self._instance_table.set_record_to_table(inst_rec) + + # set next/prev of sample_annotation + for token_i in range(1, len(annotation_token_list)): + prev_token: str = annotation_token_list[token_i - 1] + cur_token: str = annotation_token_list[token_i] + + prev_rec: SampleAnnotationRecord = ( + self._sample_annotation_table.select_record_from_token(prev_token) + ) + prev_rec.next_token = cur_token + self._sample_annotation_table.set_record_to_table(prev_rec) + + cur_rec: SampleAnnotationRecord = ( + self._sample_annotation_table.select_record_from_token(cur_token) + ) + cur_rec.prev_token = prev_token + self._sample_annotation_table.set_record_to_table(cur_rec) diff --git a/perception_dataset/t4_dataset/classes/__init__.py b/perception_dataset/t4_dataset/classes/__init__.py new file mode 100644 index 00000000..d7a9bda8 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/__init__.py @@ -0,0 +1,31 @@ +from .attribute import AttributeRecord, AttributeTable # noqa: F401, F403 +from .calibrated_sensor import CalibratedSensorRecord, CalibratedSensorTable # noqa: F401, F403 +from .category import CategoryRecord, CategoryTable # noqa: F401, F403 +from .ego_pose import EgoPoseRecord, EgoPoseTable # noqa: F401, F403 +from .instance import InstanceRecord, InstanceTable # noqa: F401, F403 +from .log import LogRecord, LogTable # noqa: F401, F403 +from .map import MapRecord, MapTable # noqa: F401, F403 +from .object_ann import ObjectAnnRecord, ObjectAnnTable # noqa: F401, F403 +from .sample import SampleRecord, SampleTable # noqa: F401, F403 +from .sample_annotation import SampleAnnotationRecord, SampleAnnotationTable # noqa: F401, F403 +from .sample_data import SampleDataRecord, SampleDataTable # noqa: F401, F403 +from .scene import SceneRecord, SceneTable # noqa: F401, F403 +from .sensor import SensorRecord, SensorTable # noqa: F401, F403 +from .surface_ann import SurfaceAnnRecord, SurfaceAnnTable # noqa: F401, F403 +from .visibility import VisibilityRecord, VisibilityTable # noqa: F401, F403 + +schema_names = [ + "attribute", + "calibrated_sensor", + "category", + "ego_pose", + "instance", + "log", + "map", + "sample", + "sample_annotation", + "sample_data", + "scene", + "sensor", + "visibility", +] diff --git a/perception_dataset/t4_dataset/classes/abstract_class.py b/perception_dataset/t4_dataset/classes/abstract_class.py new file mode 100644 index 00000000..b62f87e9 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/abstract_class.py @@ -0,0 +1,70 @@ +from abc import ABCMeta, abstractmethod +import json +import os.path as osp +from typing import Any, Dict, List + +from perception_dataset.utils.gen_tokens import generate_token + + +class AbstractRecord(metaclass=ABCMeta): + def __init__(self): + self._token: str = generate_token(16, "hex") + + @property + def token(self) -> str: + return self._token + + @abstractmethod + def to_dict(self) -> Dict[str, Any]: + raise NotImplementedError() + + +class AbstractTable(metaclass=ABCMeta): + FILENAME = "" + + def __init__(self): + self._token_to_record: Dict[str, AbstractRecord] = {} + + def __len__(self) -> int: + return len(self._token_to_record) + + @abstractmethod + def _to_record(self, **kwargs) -> AbstractRecord: + """Return the instance of RecordClass""" + raise NotImplementedError() + + def set_record_to_table(self, record: AbstractRecord): + self._token_to_record[record.token] = record + + def insert_into_table(self, **kwargs) -> str: + record = self._to_record(**kwargs) + assert isinstance( + record, AbstractRecord + ), "_to_record function must return the instance of RecordClass" + self.set_record_to_table(record) + return record.token + + def select_record_from_token(self, token: str) -> AbstractRecord: + assert ( + token in self._token_to_record + ), f"Token {token} isn't in table {self.__class__.__name__}." + return self._token_to_record[token] + + def to_data(self) -> List[Dict[str, Any]]: + return [rec.to_dict() for rec in self._token_to_record.values()] + + def to_records(self) -> List[AbstractRecord]: + return list(self._token_to_record.values()) + + def to_tokens(self) -> List[str]: + return list(self._token_to_record.keys()) + + def save_json(self, output_dir: str): + """Save table data to json file + + Args: + output_dir (str): path to directory + """ + table_data = self.to_data() + with open(osp.join(output_dir, self.FILENAME), "w") as f: + json.dump(table_data, f, indent=4) diff --git a/perception_dataset/t4_dataset/classes/attribute.py b/perception_dataset/t4_dataset/classes/attribute.py new file mode 100644 index 00000000..ce15fd69 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/attribute.py @@ -0,0 +1,48 @@ +from typing import Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class AttributeRecord(AbstractRecord): + def __init__(self, name: str, description: str): + super().__init__() + self.name: str = name + self.description: str = description + + def to_dict(self) -> Dict[str, str]: + d = { + "token": self.token, + "name": self.name, + "description": self.description, + } + return d + + +class AttributeTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#attributejson""" + + FILENAME = "attribute" + EXTENSION_ENUM.JSON.value + + def __init__(self, name_to_description: Dict[str, str], default_value: str): + super().__init__() + self._name_to_token: Dict[str, str] = {} + self._name_to_description: Dict[str, str] = name_to_description + self._description_default_value: str = default_value + + def _to_record(self, name: str, description: str) -> AttributeRecord: + record = AttributeRecord( + name=name, + description=description, + ) + return record + + def get_token_from_name(self, name: str) -> str: + if name in self._name_to_token: + token = self._name_to_token[name] + else: + description = self._name_to_description.get(name, self._description_default_value) + token = self.insert_into_table(name=name, description=description) + self._name_to_token[name] = token + + return token diff --git a/perception_dataset/t4_dataset/classes/calibrated_sensor.py b/perception_dataset/t4_dataset/classes/calibrated_sensor.py new file mode 100644 index 00000000..3b500d97 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/calibrated_sensor.py @@ -0,0 +1,60 @@ +from typing import Any, Dict, List + +import numpy as np + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class CalibratedSensorRecord(AbstractRecord): + def __init__( + self, + sensor_token: str, + translation: Dict[str, float], + rotation: Dict[str, float], + camera_intrinsic: List[List[float]], + camera_distortion: Dict[str, float], + ): + super().__init__() + + assert {"x", "y", "z"} == set(translation.keys()) + assert {"w", "x", "y", "z"} == set(rotation.keys()) + assert len(camera_intrinsic) == 0 or np.array(camera_intrinsic).shape == (3, 3) + + self.sensor_token: str = sensor_token + self.translation: Dict[str, float] = translation + self.rotation: Dict[str, float] = rotation + self.camera_intrinsic: List[List[float]] = camera_intrinsic + self.camera_distortion: List[float] = camera_distortion + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "sensor_token": self.sensor_token, + "translation": [ + self.translation["x"], + self.translation["y"], + self.translation["z"], + ], + "rotation": [ + self.rotation["w"], + self.rotation["x"], + self.rotation["y"], + self.rotation["z"], + ], + "camera_intrinsic": self.camera_intrinsic, + "camera_distortion": self.camera_distortion, + } + return d + + +class CalibratedSensorTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#calibrated_sensorjson""" + + FILENAME = "calibrated_sensor" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> CalibratedSensorRecord: + return CalibratedSensorRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/category.py b/perception_dataset/t4_dataset/classes/category.py new file mode 100644 index 00000000..5168c4c6 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/category.py @@ -0,0 +1,48 @@ +from typing import Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class CategoryRecord(AbstractRecord): + def __init__(self, name: str, description: str): + super().__init__() + self.name: str = name + self.description: str = description + + def to_dict(self) -> Dict[str, str]: + d = { + "token": self.token, + "name": self.name, + "description": self.description, + } + return d + + +class CategoryTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#categoryjson""" + + FILENAME = "category" + EXTENSION_ENUM.JSON.value + + def __init__(self, name_to_description: Dict[str, str], default_value: str): + super().__init__() + self._name_to_token: Dict[str, str] = {} + self._name_to_description: Dict[str, str] = name_to_description + self._description_default_value: str = default_value + + def _to_record(self, name: str, description: str): + record = CategoryRecord( + name=name, + description=description, + ) + return record + + def get_token_from_name(self, name: str) -> str: + if name in self._name_to_token: + token = self._name_to_token[name] + else: + description = self._name_to_description.get(name, self._description_default_value) + token = self.insert_into_table(name=name, description=description) + self._name_to_token[name] = token + + return token diff --git a/perception_dataset/t4_dataset/classes/ego_pose.py b/perception_dataset/t4_dataset/classes/ego_pose.py new file mode 100644 index 00000000..8be4b4ca --- /dev/null +++ b/perception_dataset/t4_dataset/classes/ego_pose.py @@ -0,0 +1,51 @@ +from typing import Any, Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class EgoPoseRecord(AbstractRecord): + def __init__( + self, + translation: Dict[str, float], + rotation: Dict[str, float], + timestamp: int, + ): + super().__init__() + + assert {"x", "y", "z"} == set(translation.keys()) + assert {"w", "x", "y", "z"} == set(rotation.keys()) + + self.translation: Dict[str, float] = translation + self.rotation: Dict[str, float] = rotation + self.timestamp: int = timestamp + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "translation": [ + self.translation["x"], + self.translation["y"], + self.translation["z"], + ], + "rotation": [ + self.rotation["w"], + self.rotation["x"], + self.rotation["y"], + self.rotation["z"], + ], + "timestamp": self.timestamp, + } + return d + + +class EgoPoseTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#ego_posejson""" + + FILENAME = "ego_pose" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> EgoPoseRecord: + return EgoPoseRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/instance.py b/perception_dataset/t4_dataset/classes/instance.py new file mode 100644 index 00000000..bcdac951 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/instance.py @@ -0,0 +1,56 @@ +from typing import Any, Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class InstanceRecord(AbstractRecord): + def __init__(self, category_token: str, instance_name: str = ""): + super().__init__() + self._category_token: str = category_token + self._instance_name: str = instance_name + self._nbr_annotations: int = 0 + self._first_annotation_token: str = "" + self._last_annotation_token: str = "" + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "category_token": self._category_token, + "instance_name": self._instance_name, + "nbr_annotations": self._nbr_annotations, + "first_annotation_token": self._first_annotation_token, + "last_annotation_token": self._last_annotation_token, + } + return d + + def set_annotation_info( + self, nbr_annotations: int, first_annotation_token: str, last_annotation_token: str + ): + self._nbr_annotations = nbr_annotations + self._first_annotation_token = first_annotation_token + self._last_annotation_token = last_annotation_token + + +class InstanceTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#instancejson""" + + FILENAME = "instance" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + self._id_to_token: Dict[str, str] = {} + + def _to_record(self, category_token: str, instance_name: str = ""): + return InstanceRecord(category_token=category_token, instance_name=instance_name) + + def get_token_from_id(self, instance_id: str, category_token: str, dataset_name: str) -> str: + if instance_id in self._id_to_token: + token = self._id_to_token[instance_id] + else: + token = self.insert_into_table( + category_token=category_token, instance_name=dataset_name + "::" + str(instance_id) + ) + self._id_to_token[instance_id] = token + + return token diff --git a/perception_dataset/t4_dataset/classes/log.py b/perception_dataset/t4_dataset/classes/log.py new file mode 100644 index 00000000..0b74d3fa --- /dev/null +++ b/perception_dataset/t4_dataset/classes/log.py @@ -0,0 +1,42 @@ +from typing import Any, Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class LogRecord(AbstractRecord): + def __init__( + self, + logfile: str, + vehicle: str, + data_captured: str, + location: str, + ): + super().__init__() + + self.logfile: str = logfile + self.vehicle: str = vehicle + self.data_captured: str = data_captured + self.location: str = location + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "logfile": self.logfile, + "vehicle": self.vehicle, + "data_captured": self.data_captured, + "location": self.location, + } + return d + + +class LogTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#logjson""" + + FILENAME = "log" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> LogRecord: + return LogRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/map.py b/perception_dataset/t4_dataset/classes/map.py new file mode 100644 index 00000000..d9f16b1a --- /dev/null +++ b/perception_dataset/t4_dataset/classes/map.py @@ -0,0 +1,39 @@ +from typing import Any, Dict, List + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class MapRecord(AbstractRecord): + def __init__( + self, + log_tokens: List[str], + category: str, + filename: str, + ): + super().__init__() + + self.log_tokens: List[str] = log_tokens + self.category: str = category + self.filename: str = filename + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "log_tokens": self.log_tokens, + "category": self.category, + "filename": self.filename, + } + return d + + +class MapTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#mapjson""" + + FILENAME = "map" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> MapRecord: + return MapRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/object_ann.py b/perception_dataset/t4_dataset/classes/object_ann.py new file mode 100644 index 00000000..d2a33d2b --- /dev/null +++ b/perception_dataset/t4_dataset/classes/object_ann.py @@ -0,0 +1,71 @@ +from typing import Dict, List + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class ObjectAnnRecord(AbstractRecord): + def __init__( + self, + sample_data_token: str, + instance_token: str, + category_token: str, + attribute_tokens: str, + bbox: List[float], + mask: Dict[str, any], + ): + super().__init__() + + assert len(bbox) == 4 + + self._sample_data_token: str = sample_data_token + self._instance_token: str = instance_token + self._category_token: str = category_token + self._attribute_tokens: List[str] = attribute_tokens + self._bbox: List[float] = bbox + self._mask: Dict[str, any] = mask + + def to_dict(self): + d = { + "token": self.token, + "sample_data_token": self._sample_data_token, + "instance_token": self._instance_token, + "category_token": self._category_token, + "attribute_tokens": self._attribute_tokens, + "bbox": [ + self._bbox[0], + self._bbox[1], + self._bbox[2], + self._bbox[3], + ], + "mask": self._mask, + } + return d + + +class ObjectAnnTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#sample_annotationjson""" + + FILENAME = "object_ann" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record( + self, + sample_data_token: str, + instance_token: str, + category_token: str, + attribute_tokens: str, + bbox: List[float], + mask: Dict[str, any], + ): + record = ObjectAnnRecord( + sample_data_token=sample_data_token, + instance_token=instance_token, + category_token=category_token, + attribute_tokens=attribute_tokens, + bbox=bbox, + mask=mask, + ) + return record diff --git a/perception_dataset/t4_dataset/classes/sample.py b/perception_dataset/t4_dataset/classes/sample.py new file mode 100644 index 00000000..42ccdc90 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/sample.py @@ -0,0 +1,40 @@ +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class SampleRecord(AbstractRecord): + def __init__( + self, + timestamp: int, + scene_token: str, + next_token: str = "", + prev_token: str = "", + ): + super().__init__() + + self.timestamp: int = timestamp + self.scene_token: str = scene_token + self.next: str = next_token + self.prev: str = prev_token + + def to_dict(self): + d = { + "token": self.token, + "timestamp": self.timestamp, + "scene_token": self.scene_token, + "next": self.next, + "prev": self.prev, + } + return d + + +class SampleTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#samplejson""" + + FILENAME = "sample" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> SampleRecord: + return SampleRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/sample_annotation.py b/perception_dataset/t4_dataset/classes/sample_annotation.py new file mode 100644 index 00000000..1d172168 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/sample_annotation.py @@ -0,0 +1,116 @@ +from typing import Dict, List + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class SampleAnnotationRecord(AbstractRecord): + def __init__( + self, + sample_token: str, + instance_token: str, + attribute_tokens: List[str], + visibility_token: str, + translation: Dict[str, float], + size: Dict[str, float], + rotation: Dict[str, float], + num_lidar_pts: int, + num_radar_pts: int, + ): + super().__init__() + + assert {"x", "y", "z"} == set(translation.keys()) + assert {"width", "length", "height"} == set(size.keys()) + assert {"w", "x", "y", "z"} == set(rotation.keys()) + + self._sample_token: str = sample_token + self._instance_token: str = instance_token + self._attribute_tokens: List[str] = attribute_tokens + self._visibility_token: str = visibility_token + self._translation: Dict[str, float] = translation + self._size: Dict[str, float] = size + self._rotation: Dict[str, float] = rotation + self._num_lidar_pts: int = num_lidar_pts + self._num_radar_pts: int = num_radar_pts + self._next: str = "" + self._prev: str = "" + + @property + def next_token(self): + return self._next + + @next_token.setter + def next_token(self, value: str): + self._next = value + + @property + def prev_token(self): + return self._prev + + @prev_token.setter + def prev_token(self, value: str): + self._prev = value + + def to_dict(self): + d = { + "token": self.token, + "sample_token": self._sample_token, + "instance_token": self._instance_token, + "attribute_tokens": self._attribute_tokens, + "visibility_token": self._visibility_token, + "translation": [ + self._translation["x"], + self._translation["y"], + self._translation["z"], + ], + "size": [ + self._size["width"], + self._size["length"], + self._size["height"], + ], + "rotation": [ + self._rotation["w"], + self._rotation["x"], + self._rotation["y"], + self._rotation["z"], + ], + "num_lidar_pts": self._num_lidar_pts, + "num_radar_pts": self._num_radar_pts, + "next": self._next, + "prev": self._prev, + } + return d + + +class SampleAnnotationTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#sample_annotationjson""" + + FILENAME = "sample_annotation" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record( + self, + sample_token: str, + instance_token: str, + attribute_tokens: str, + visibility_token: str, + translation: Dict[str, float], + size: Dict[str, float], + rotation: Dict[str, float], + num_lidar_pts: int, + num_radar_pts: int, + ): + record = SampleAnnotationRecord( + sample_token=sample_token, + instance_token=instance_token, + attribute_tokens=attribute_tokens, + visibility_token=visibility_token, + translation=translation, + size=size, + rotation=rotation, + num_lidar_pts=num_lidar_pts, + num_radar_pts=num_radar_pts, + ) + return record diff --git a/perception_dataset/t4_dataset/classes/sample_data.py b/perception_dataset/t4_dataset/classes/sample_data.py new file mode 100644 index 00000000..aca8d5cd --- /dev/null +++ b/perception_dataset/t4_dataset/classes/sample_data.py @@ -0,0 +1,64 @@ +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class SampleDataRecord(AbstractRecord): + def __init__( + self, + sample_token: str, + ego_pose_token: str, + calibrated_sensor_token: str, + filename: str, + fileformat: str, + timestamp: int, + is_key_frame: bool, + width: int = 0, + height: int = 0, + next_token: str = "", + prev_token: str = "", + is_valid: bool = True, + ): + super().__init__() + + self.sample_token: str = sample_token + self.ego_pose_token: str = ego_pose_token + self.calibrated_sensor_token: str = calibrated_sensor_token + self.filename: str = filename + self.fileformat: str = fileformat + self.width: int = width + self.height: int = height + self.timestamp: int = timestamp + self.is_key_frame: bool = is_key_frame + self.next: str = next_token + self.prev: str = prev_token + self._is_valid: bool = is_valid + + def to_dict(self): + d = { + "token": self.token, + "sample_token": self.sample_token, + "ego_pose_token": self.ego_pose_token, + "calibrated_sensor_token": self.calibrated_sensor_token, + "filename": self.filename, + "fileformat": self.fileformat, + "width": self.width, + "height": self.height, + "timestamp": self.timestamp, + "is_key_frame": self.is_key_frame, + "next": self.next, + "prev": self.prev, + "is_valid": self._is_valid, + } + return d + + +class SampleDataTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#sample_datajson""" + + FILENAME = "sample_data" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> SampleDataRecord: + return SampleDataRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/scene.py b/perception_dataset/t4_dataset/classes/scene.py new file mode 100644 index 00000000..0580da05 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/scene.py @@ -0,0 +1,48 @@ +from typing import Any, Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class SceneRecord(AbstractRecord): + def __init__( + self, + name: str, + description: str, + log_token: str, + nbr_samples: int = 0, + first_sample_token: str = "", + last_sample_token: str = "", + ): + super().__init__() + + self.name: str = name + self.description: str = description + self.log_token: str = log_token + self.nbr_samples: int = nbr_samples + self.first_sample_token: str = first_sample_token + self.last_sample_token: str = last_sample_token + + def to_dict(self) -> Dict[str, Any]: + d = { + "token": self.token, + "name": self.name, + "description": self.description, + "log_token": self.log_token, + "nbr_samples": self.nbr_samples, + "first_sample_token": self.first_sample_token, + "last_sample_token": self.last_sample_token, + } + return d + + +class SceneTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#scenejson""" + + FILENAME = "scene" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> SceneRecord: + return SceneRecord(**kwargs) diff --git a/perception_dataset/t4_dataset/classes/sensor.py b/perception_dataset/t4_dataset/classes/sensor.py new file mode 100644 index 00000000..51513bc0 --- /dev/null +++ b/perception_dataset/t4_dataset/classes/sensor.py @@ -0,0 +1,48 @@ +from typing import Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class SensorRecord(AbstractRecord): + def __init__( + self, + channel: str, + modality: str, + ): + super().__init__() + + self._channel: str = channel + self._modality: str = modality + + def to_dict(self): + d = { + "token": self.token, + "channel": self._channel, + "modality": self._modality, + } + return d + + +class SensorTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#sensorjson""" + + FILENAME = "sensor" + EXTENSION_ENUM.JSON.value + + def __init__(self, channel_to_modality: Dict[str, str]): + super().__init__() + self._channel_to_modality: Dict[str, str] = channel_to_modality + self._channel_to_token: Dict[str, str] = {} + + def _to_record(self, **kwargs) -> SensorRecord: + return SensorRecord(**kwargs) + + def get_token_from_channel(self, channel: str): + if channel in self._channel_to_token: + token = self._channel_to_token[channel] + else: + modality = self._channel_to_modality[channel] + token = self.insert_into_table(channel=channel, modality=modality) + self._channel_to_token[channel] = token + + return token diff --git a/perception_dataset/t4_dataset/classes/surface_ann.py b/perception_dataset/t4_dataset/classes/surface_ann.py new file mode 100644 index 00000000..d17756fa --- /dev/null +++ b/perception_dataset/t4_dataset/classes/surface_ann.py @@ -0,0 +1,49 @@ +from typing import Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class SurfaceAnnRecord(AbstractRecord): + def __init__( + self, + category_token: str, + mask: Dict[str, any], + sample_data_token: str, + ): + super().__init__() + + self._category_token: str = category_token + self._mask: Dict[str, any] = mask + self._sample_data_token: str = sample_data_token + + def to_dict(self): + d = { + "token": self.token, + "category_token": self._category_token, + "mask": self._mask, + "sample_data_token": self._sample_data_token, + } + return d + + +class SurfaceAnnTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#sample_annotationjson""" + + FILENAME = "surface_ann" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record( + self, + category_token: str, + mask: Dict[str, any], + sample_data_token: str, + ): + record = SurfaceAnnRecord( + category_token=category_token, + mask=mask, + sample_data_token=sample_data_token, + ) + return record diff --git a/perception_dataset/t4_dataset/classes/visibility.py b/perception_dataset/t4_dataset/classes/visibility.py new file mode 100644 index 00000000..25c58b4b --- /dev/null +++ b/perception_dataset/t4_dataset/classes/visibility.py @@ -0,0 +1,48 @@ +from typing import Dict + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class VisibilityRecord(AbstractRecord): + def __init__(self, level: str, description: str): + super().__init__() + self._level: str = level + self._description: str = description + + def to_dict(self) -> Dict[str, str]: + d = { + "token": self.token, + "level": self._level, + "description": self._description, + } + return d + + +class VisibilityTable(AbstractTable): + """https://github.com/tier4/tier4_perception_dataset/blob/main/docs/t4_format_3d_detailed.md#visibilityjson""" + + FILENAME = "visibility" + EXTENSION_ENUM.JSON.value + + def __init__(self, level_to_description: Dict[str, str], default_value: str): + super().__init__() + self._level_to_token: Dict[str, str] = {} + self._level_to_description: Dict[str, str] = level_to_description + self._description_default_value: str = default_value + + def _to_record(self, level: str, description: str) -> VisibilityRecord: + record = VisibilityRecord( + level=level, + description=description, + ) + return record + + def get_token_from_level(self, level: str) -> str: + if level in self._level_to_token: + token = self._level_to_token[level] + else: + description = self._level_to_description.get(level, self._description_default_value) + token = self.insert_into_table(level=level, description=description) + self._level_to_token[level] = token + + return token diff --git a/perception_dataset/t4_dataset/data_validator.py b/perception_dataset/t4_dataset/data_validator.py new file mode 100644 index 00000000..7fb71bb9 --- /dev/null +++ b/perception_dataset/t4_dataset/data_validator.py @@ -0,0 +1,32 @@ + +from loguru import logger +from nuscenes.nuscenes import NuScenes +from perception_dataset.t4_dataset.format_validator import _logger_wrapper + +@_logger_wrapper +def validate_data_hz(nusc: NuScenes): + def get_first_sample_data_list(): + sample_data_list = [] + for sample_data in nusc.sample_data: + if sample_data["prev"] == "": + sample_data_list.append(sample_data) + return sample_data_list + + for sample_data in get_first_sample_data_list(): + if not sample_data["is_valid"]: + continue + first_filename: str = sample_data["filename"] + first_timestamp: int = sample_data["timestamp"] + sample_data_counter: int = 0 + while sample_data["next"]: + sample_data_counter += 1 + sample_data = nusc.get("sample_data", sample_data["next"]) + + data_duration_sec: float = float(sample_data["timestamp"] - first_timestamp) * 1e-6 + data_hz: float = sample_data_counter / data_duration_sec + + logger.info(f"{first_filename}") + logger.info(f"Duration: {data_duration_sec} sec") + logger.info(f"Hz: {data_hz}") + + assert data_hz > 9.0 diff --git a/perception_dataset/t4_dataset/format_validator.py b/perception_dataset/t4_dataset/format_validator.py new file mode 100644 index 00000000..d61a6503 --- /dev/null +++ b/perception_dataset/t4_dataset/format_validator.py @@ -0,0 +1,235 @@ +from pathlib import Path + +from loguru import logger +from nuscenes.nuscenes import NuScenes + +from perception_dataset.constants import EXTENSION_ENUM, T4_FORMAT_DIRECTORY_NAME +from perception_dataset.t4_dataset.classes import schema_names + + +def validate_format(nusc: NuScenes, root_path: str): + validate_scene(nusc) + validate_sample(nusc) + validate_sample_data(nusc, Path(root_path)) + validate_ego_pose(nusc) + validate_calibrated_sensor(nusc) + validate_instance(nusc) + validate_sample_annotation(nusc) + + +def find_in_table(nusc: NuScenes, table_name: str, token: str) -> bool: + """This function is the modification of `get` of NuScenes not to raise an error. + https://github.com/nutonomy/nuscenes-devkit/blob/28765b8477dbd3331bacd922fada867c2c4db1d7/python-sdk/nuscenes/nuscenes.py#L207-L225 + """ + assert table_name in nusc._token2ind, f"{table_name} not found" + + token_ind = nusc._token2ind[table_name].get(token) + if token_ind is None: + return False + + table = getattr(nusc, table_name) + if token_ind < 0 and token_ind >= len(table): + return False + + return True + + +def _logger_wrapper(func): + def wrapper(*args, **kwargs): + ret = func(*args, **kwargs) + logger.info(f"`{func.__name__}` has passed!") + return ret + + return wrapper + + +@_logger_wrapper +def validate_directory_structure(root_path: str): + root_path = Path(root_path) + anno_path = root_path / T4_FORMAT_DIRECTORY_NAME.ANNOTATION.value + data_path = root_path / T4_FORMAT_DIRECTORY_NAME.DATA.value + + assert anno_path.is_dir(), f"{anno_path} isn't found" + assert data_path.is_dir(), f"{data_path} isn't found" + + for schema_name in schema_names: + json_path = anno_path / (schema_name + EXTENSION_ENUM.JSON.value) + assert json_path.is_file(), f"{schema_name} isn't found" + + +@_logger_wrapper +def validate_scene(nusc: NuScenes): + assert len(nusc.scene) == 1, "T4Dataset must have 1 scene." + + scene = nusc.scene[0] + assert find_in_table(nusc, "log", scene["log_token"]), "scene.log_token isn't found in log" + assert find_in_table( + nusc, "sample", scene["first_sample_token"] + ), "scene.first_sample_token isn't found in log" + assert find_in_table( + nusc, "sample", scene["last_sample_token"] + ), "scene.last_sample_token isn't found in log" + + +@_logger_wrapper +def validate_sample(nusc: NuScenes): + assert len(nusc.sample) > 0, "There are no sample." + + no_next_token_count: int = 0 + no_prev_token_count: int = 0 + for sample in nusc.sample: + assert find_in_table( + nusc, "scene", sample["scene_token"] + ), "sample.scene_token isn't found in scene." + + next_token = sample["next"] + if next_token == "": + no_next_token_count += 1 + else: + assert find_in_table(nusc, "sample", next_token), "sample.next isn't found in sample." + + prev_token = sample["prev"] + if prev_token == "": + no_prev_token_count += 1 + else: + assert find_in_table( + nusc, "sample", prev_token + ), "sample.prev_token isn't found in sample." + + assert no_next_token_count == len( + nusc.scene + ), f"There are more than {len(nusc.scene)} sample of empty scene.next." + assert no_prev_token_count == len( + nusc.scene + ), f"There are more than {len(nusc.scene)} sample of empty scene.prev." + + +@_logger_wrapper +def validate_sample_data(nusc: NuScenes, root_path: Path): + assert len(nusc.sample_data), "There are no sample_data." + + no_next_token_count: int = 0 + no_prev_token_count: int = 0 + for sample_data in nusc.sample_data: + if not sample_data["is_valid"]: + continue + assert find_in_table( + nusc, "sample", sample_data["sample_token"] + ), "sample_data.sample_token isn't found in sample." + assert find_in_table( + nusc, "ego_pose", sample_data["ego_pose_token"] + ), "sample_data.ego_pose_token isn't found in sample." + assert find_in_table( + nusc, "calibrated_sensor", sample_data["calibrated_sensor_token"] + ), "sample_data.calibrated_pose_token isn't found in sample." + + filename: str = sample_data["filename"] + assert (root_path / filename).is_file(), f"{filename} isn't found." + + next_token = sample_data["next"] + if next_token == "": + no_next_token_count += 1 + else: + assert find_in_table( + nusc, "sample_data", next_token + ), "sample_data.next isn't found in sample." + + prev_token = sample_data["prev"] + if prev_token == "": + no_prev_token_count += 1 + else: + assert find_in_table( + nusc, "sample_data", prev_token + ), "sample_data.prev_token isn't found in sample." + + # NOTE(yukke42): There are len(nusc.calibrated_sensor) sensors for each scene. + expected_count = len(nusc.calibrated_sensor) * len(nusc.scene) + assert ( + no_next_token_count == expected_count + ), f"There are more than {expected_count} sample of empty scene.next." + assert ( + no_prev_token_count == expected_count + ), f"There are more than {expected_count} sample of empty scene.prev." + + +@_logger_wrapper +def validate_ego_pose(nusc: NuScenes): + assert len(nusc.ego_pose) > 0, "There are no ego_pose." + + +@_logger_wrapper +def validate_calibrated_sensor(nusc: NuScenes): + assert len(nusc.calibrated_sensor) > 0, "There are no calibrated_sensor." + + for calibrated_sensor in nusc.calibrated_sensor: + assert find_in_table( + nusc, "sensor", calibrated_sensor["sensor_token"] + ), "calibrated_sensor.sensor_token isn't found in sensor." + + +@_logger_wrapper +def validate_instance(nusc: NuScenes): + assert len(nusc.instance) > 0, "There are no instance." + + for instance in nusc.instance: + if instance["nbr_annotations"] == 0: + logger.warning(f"instance:{instance['token']} has no 3D annotation") + continue + assert find_in_table( + nusc, "category", instance["category_token"] + ), "instance.category_token isn't found in category." + assert find_in_table( + nusc, "sample_annotation", instance["first_annotation_token"] + ), "instance.first_annotation_token isn't found in sample_annotation." + assert find_in_table( + nusc, "sample_annotation", instance["last_annotation_token"] + ), "instance.last_annotation_token isn't found in sample_annotation." + + +@_logger_wrapper +def validate_sample_annotation(nusc: NuScenes): + assert len(nusc.sample_annotation) > 0, "There are no sample_annotation." + + no_next_token_count: int = 0 + no_prev_token_count: int = 0 + for sample_annotation in nusc.sample_annotation: + assert find_in_table( + nusc, "sample", sample_annotation["sample_token"] + ), "sample_annotation.sample_token isn't found in sample." + assert find_in_table( + nusc, "instance", sample_annotation["instance_token"] + ), "sample_annotation.instance_token isn't found in instance." + + for i, attribute_token in enumerate(sample_annotation["attribute_tokens"]): + assert find_in_table( + nusc, "attribute", attribute_token + ), f"sample_annotation.attribute_tokens[{i}] isn't found in attribute." + assert find_in_table( + nusc, "visibility", sample_annotation["visibility_token"] + ), "sample_annotation.visibility_token isn't found in visibility." + + next_token = sample_annotation["next"] + if next_token == "": + no_next_token_count += 1 + else: + assert find_in_table( + nusc, "sample_annotation", next_token + ), "sample_annotation.next isn't found in sample." + + prev_token = sample_annotation["prev"] + if prev_token == "": + no_prev_token_count += 1 + else: + assert find_in_table( + nusc, "sample_annotation", prev_token + ), "sample_annotation.prev_token isn't found in sample." + + # NOTE(yukke42): There are len(nusc.instance) instances for a scene. + instance_3d = [instance for instance in nusc.instance if instance["nbr_annotations"] != 0] + expected_count = len(instance_3d) * len(nusc.scene) + assert ( + no_next_token_count == expected_count + ), f"There are more than {expected_count} sample of empty scene.next." + assert ( + no_prev_token_count == expected_count + ), f"There are more than {expected_count} sample of empty scene.prev." diff --git a/perception_dataset/utils/__init__.py b/perception_dataset/utils/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/perception_dataset/utils/box_np_ops.py b/perception_dataset/utils/box_np_ops.py new file mode 100644 index 00000000..102c1c17 --- /dev/null +++ b/perception_dataset/utils/box_np_ops.py @@ -0,0 +1,265 @@ +# Copied from mmdetection3d@v0.18.1: +# https://github.com/open-mmlab/mmdetection3d/blob/v0.18.1/mmdet3d/core/bbox/box_np_ops.py +# +# Copyright (c) OpenMMLab. All rights reserved. +# TODO: clean the functions in this file and move the APIs into box structures +# in the future + + +import numba +import numpy as np + + +def corners_nd(dims, origin=0.5): + """Generate relative box corners based on length per dim and origin point. + + Args: + dims (np.ndarray, shape=[N, ndim]): Array of length per dim + origin (list or array or float, optional): origin point relate to + smallest point. Defaults to 0.5 + + Returns: + np.ndarray, shape=[N, 2 ** ndim, ndim]: Returned corners. + point layout example: (2d) x0y0, x0y1, x1y0, x1y1; + (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 + where x0 < x1, y0 < y1, z0 < z1. + """ + ndim = int(dims.shape[1]) + corners_norm = np.stack(np.unravel_index(np.arange(2**ndim), [2] * ndim), axis=1).astype( + dims.dtype + ) + # now corners_norm has format: (2d) x0y0, x0y1, x1y0, x1y1 + # (3d) x0y0z0, x0y0z1, x0y1z0, x0y1z1, x1y0z0, x1y0z1, x1y1z0, x1y1z1 + # so need to convert to a format which is convenient to do other computing. + # for 2d boxes, format is clockwise start with minimum point + # for 3d boxes, please draw lines by your hand. + if ndim == 2: + # generate clockwise box corners + corners_norm = corners_norm[[0, 1, 3, 2]] + elif ndim == 3: + corners_norm = corners_norm[[0, 1, 3, 2, 4, 5, 7, 6]] + corners_norm = corners_norm - np.array(origin, dtype=dims.dtype) + corners = dims.reshape([-1, 1, ndim]) * corners_norm.reshape([1, 2**ndim, ndim]) + return corners + + +def rotation_3d_in_axis(points, angles, axis=0): + """Rotate points in specific axis. + + Args: + points (np.ndarray, shape=[N, point_size, 3]]): + angles (np.ndarray, shape=[N]]): + axis (int, optional): Axis to rotate at. Defaults to 0. + + Returns: + np.ndarray: Rotated points. + """ + # points: [N, point_size, 3] + rot_sin = np.sin(angles) + rot_cos = np.cos(angles) + ones = np.ones_like(rot_cos) + zeros = np.zeros_like(rot_cos) + if axis == 1: + rot_mat_T = np.stack( + [[rot_cos, zeros, -rot_sin], [zeros, ones, zeros], [rot_sin, zeros, rot_cos]] + ) + elif axis == 2 or axis == -1: + rot_mat_T = np.stack( + [[rot_cos, -rot_sin, zeros], [rot_sin, rot_cos, zeros], [zeros, zeros, ones]] + ) + elif axis == 0: + rot_mat_T = np.stack( + [[zeros, rot_cos, -rot_sin], [zeros, rot_sin, rot_cos], [ones, zeros, zeros]] + ) + else: + raise ValueError("axis should in range") + + return np.einsum("aij,jka->aik", points, rot_mat_T) + + +def center_to_corner_box3d(centers, dims, angles=None, origin=(0.5, 1.0, 0.5), axis=1): + """Convert kitti locations, dimensions and angles to corners. + + Args: + centers (np.ndarray): Locations in kitti label file with shape (N, 3). + dims (np.ndarray): Dimensions in kitti label file with shape (N, 3). + angles (np.ndarray, optional): Rotation_y in kitti label file with + shape (N). Defaults to None. + origin (list or array or float, optional): Origin point relate to + smallest point. Use (0.5, 1.0, 0.5) in camera and (0.5, 0.5, 0) + in lidar. Defaults to (0.5, 1.0, 0.5). + axis (int, optional): Rotation axis. 1 for camera and 2 for lidar. + Defaults to 1. + + Returns: + np.ndarray: Corners with the shape of (N, 8, 3). + """ + # 'length' in kitti format is in x axis. + # yzx(hwl)(kitti label file)<->xyz(lhw)(camera)<->z(-x)(-y)(wlh)(lidar) + # center in kitti format is [0.5, 1.0, 0.5] in xyz. + corners = corners_nd(dims, origin=origin) + # corners: [N, 8, 3] + if angles is not None: + corners = rotation_3d_in_axis(corners, angles, axis=axis) + corners += centers.reshape([-1, 1, 3]) + return corners + + +@numba.jit(nopython=True) +def corner_to_surfaces_3d_jit(corners): + """Convert 3d box corners from corner function above to surfaces that + normal vectors all direct to internal. + + Args: + corners (np.ndarray): 3d box corners with the shape of (N, 8, 3). + + Returns: + np.ndarray: Surfaces with the shape of (N, 6, 4, 3). + """ + # box_corners: [N, 8, 3], must from corner functions in this module + num_boxes = corners.shape[0] + surfaces = np.zeros((num_boxes, 6, 4, 3), dtype=corners.dtype) + corner_idxes = np.array( + [0, 1, 2, 3, 7, 6, 5, 4, 0, 3, 7, 4, 1, 5, 6, 2, 0, 4, 5, 1, 3, 2, 6, 7] + ).reshape(6, 4) + for i in range(num_boxes): + for j in range(6): + for k in range(4): + surfaces[i, j, k] = corners[i, corner_idxes[j, k]] + return surfaces + + +def corner_to_surfaces_3d(corners): + """convert 3d box corners from corner function above to surfaces that + normal vectors all direct to internal. + + Args: + corners (np.ndarray): 3D box corners with shape of (N, 8, 3). + + Returns: + np.ndarray: Surfaces with the shape of (N, 6, 4, 3). + """ + # box_corners: [N, 8, 3], must from corner functions in this module + surfaces = np.array( + [ + [corners[:, 0], corners[:, 1], corners[:, 2], corners[:, 3]], + [corners[:, 7], corners[:, 6], corners[:, 5], corners[:, 4]], + [corners[:, 0], corners[:, 3], corners[:, 7], corners[:, 4]], + [corners[:, 1], corners[:, 5], corners[:, 6], corners[:, 2]], + [corners[:, 0], corners[:, 4], corners[:, 5], corners[:, 1]], + [corners[:, 3], corners[:, 2], corners[:, 6], corners[:, 7]], + ] + ).transpose([2, 0, 1, 3]) + return surfaces + + +def points_in_rbbox(points, rbbox, z_axis=2, origin=(0.5, 0.5, 0)): + """Check points in rotated bbox and return indicces. + + Args: + points (np.ndarray, shape=[N, 3+dim]): Points to query. + rbbox (np.ndarray, shape=[M, 7]): Boxes3d with rotation. + z_axis (int, optional): Indicate which axis is height. + Defaults to 2. + origin (tuple[int], optional): Indicate the position of + box center. Defaults to (0.5, 0.5, 0). + + Returns: + np.ndarray, shape=[N, M]: Indices of points in each box. + """ + # TODO: this function is different from PointCloud3D, be careful + # when start to use nuscene, check the input + rbbox_corners = center_to_corner_box3d( + rbbox[:, :3], rbbox[:, 3:6], rbbox[:, 6], origin=origin, axis=z_axis + ) + surfaces = corner_to_surfaces_3d(rbbox_corners) + indices = points_in_convex_polygon_3d_jit(points[:, :3], surfaces) + return indices + + +def surface_equ_3d(polygon_surfaces): + """ + + Args: + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + [num_polygon, max_num_surfaces, max_num_points_of_surface, 3]. + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + + Returns: + tuple: normal vector and its direction. + """ + # return [a, b, c], d in ax+by+cz+d=0 + # polygon_surfaces: [num_polygon, num_surfaces, num_points_of_polygon, 3] + surface_vec = polygon_surfaces[:, :, :2, :] - polygon_surfaces[:, :, 1:3, :] + # normal_vec: [..., 3] + normal_vec = np.cross(surface_vec[:, :, 0, :], surface_vec[:, :, 1, :]) + # print(normal_vec.shape, points[..., 0, :].shape) + # d = -np.inner(normal_vec, points[..., 0, :]) + d = np.einsum("aij, aij->ai", normal_vec, polygon_surfaces[:, :, 0, :]) + return normal_vec, -d + + +@numba.njit +def _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces): + """ + Args: + points (np.ndarray): Input points with shape of (num_points, 3). + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + (num_polygon, max_num_surfaces, max_num_points_of_surface, 3). + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + normal_vec (np.ndarray): Normal vector of polygon_surfaces. + d (int): Directions of normal vector. + num_surfaces (np.ndarray): Number of surfaces a polygon contains + shape of (num_polygon). + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3] + num_points = points.shape[0] + num_polygons = polygon_surfaces.shape[0] + ret = np.ones((num_points, num_polygons), dtype=np.bool_) + sign = 0.0 + for i in range(num_points): + for j in range(num_polygons): + for k in range(max_num_surfaces): + if k > num_surfaces[j]: + break + sign = ( + points[i, 0] * normal_vec[j, k, 0] + + points[i, 1] * normal_vec[j, k, 1] + + points[i, 2] * normal_vec[j, k, 2] + + d[j, k] + ) + if sign >= 0: + ret[i, j] = False + break + return ret + + +def points_in_convex_polygon_3d_jit(points, polygon_surfaces, num_surfaces=None): + """Check points is in 3d convex polygons. + + Args: + points (np.ndarray): Input points with shape of (num_points, 3). + polygon_surfaces (np.ndarray): Polygon surfaces with shape of + (num_polygon, max_num_surfaces, max_num_points_of_surface, 3). + All surfaces' normal vector must direct to internal. + Max_num_points_of_surface must at least 3. + num_surfaces (np.ndarray, optional): Number of surfaces a polygon + contains shape of (num_polygon). Defaults to None. + + Returns: + np.ndarray: Result matrix with the shape of [num_points, num_polygon]. + """ + max_num_surfaces, max_num_points_of_surface = polygon_surfaces.shape[1:3] + # num_points = points.shape[0] + num_polygons = polygon_surfaces.shape[0] + if num_surfaces is None: + num_surfaces = np.full((num_polygons,), 9999999, dtype=np.int64) + normal_vec, d = surface_equ_3d(polygon_surfaces[:, :, :3, :]) + # normal_vec: [num_polygon, max_num_surfaces, 3] + # d: [num_polygon, max_num_surfaces] + return _points_in_convex_polygon_3d_jit(points, polygon_surfaces, normal_vec, d, num_surfaces) diff --git a/perception_dataset/utils/calculate_num_points.py b/perception_dataset/utils/calculate_num_points.py new file mode 100644 index 00000000..741fcc49 --- /dev/null +++ b/perception_dataset/utils/calculate_num_points.py @@ -0,0 +1,78 @@ +import numpy as np +from nuscenes import NuScenes + +from perception_dataset.t4_dataset.classes.sample_annotation import ( + SampleAnnotationRecord, + SampleAnnotationTable, +) +from perception_dataset.utils import box_np_ops +from perception_dataset.utils.logger import configure_logger + +logger = configure_logger(modname=__name__) + + +def calculate_num_points( + dataroot: str, lidar_sensor_channel: str, annotation_table: SampleAnnotationTable +): + """Calcluate number of points in each box and overwrite the annotation table""" + nusc = NuScenes(version="annotation", dataroot=dataroot, verbose=False) + for sample in nusc.sample: + lidar_token = sample["data"][lidar_sensor_channel] + lidar_path, boxes, _ = nusc.get_sample_data(lidar_token) + + points = np.fromfile(lidar_path, dtype=np.float32) + points = points.reshape(-1, 5) + + # taken from perception_det3d/dataset_converter/t4dataset_converter.py + locs = np.array([b.center for b in boxes]).reshape(-1, 3) + dims = np.array([b.wlh for b in boxes]).reshape(-1, 3) + rots = np.array([b.orientation.yaw_pitch_roll[0] for b in boxes]).reshape(-1, 1) + + gt_boxes = np.concatenate([locs, dims, -rots - np.pi / 2], axis=1) + + indices = box_np_ops.points_in_rbbox( + points[:, :3], + gt_boxes[:, :7], + ) + num_points = indices.sum(0) + + for box, num in zip(boxes, num_points): + # Create new record with num_lidar_pts and overwrite the original one + record: SampleAnnotationRecord = annotation_table._token_to_record[box.token] + new_record = SampleAnnotationRecord( + sample_token=record._sample_token, + instance_token=record._instance_token, + attribute_tokens=record._attribute_tokens, + visibility_token=record._visibility_token, + translation=record._translation, + size=record._size, + rotation=record._rotation, + num_lidar_pts=int(num), + num_radar_pts=record._num_radar_pts, + ) + new_record._token = box.token # overwrite record token with old one + annotation_table._token_to_record[box.token] = new_record + + # connect next/prev tokens + for instance in nusc.instance: + try: + prev_sample_data: str = annotation_table._token_to_record[ + instance["first_annotation_token"] + ] + annotation_data_list = [ + v + for v in annotation_table._token_to_record.values() + if v._instance_token == instance["token"] + ] + annotation_data_list[0].prev = "" + for sample_data_i in range(1, len(annotation_data_list)): + cur_sample_data: str = annotation_data_list[sample_data_i] + if prev_sample_data._instance_token != cur_sample_data._instance_token: + prev_sample_data.next_token = "" + cur_sample_data.prev_token = "" + else: + prev_sample_data.next_token = cur_sample_data.token + cur_sample_data.prev_token = prev_sample_data.token + prev_sample_data: str = cur_sample_data + except KeyError as e: + logger.error(f"no key {e} in annotation table") diff --git a/perception_dataset/utils/gen_tokens.py b/perception_dataset/utils/gen_tokens.py new file mode 100644 index 00000000..69c73b8b --- /dev/null +++ b/perception_dataset/utils/gen_tokens.py @@ -0,0 +1,15 @@ +from secrets import token_bytes, token_hex, token_urlsafe +from typing import Any + + +def generate_token(nbytes: int = 16, mode: str = "hex") -> Any: + if nbytes < 16: + raise ValueError(f"nbytes {nbytes} is too short. Give >= 16.") + if mode == "bytes": + return token_bytes(nbytes=nbytes) + elif mode == "hex": + return token_hex(nbytes=nbytes) + elif mode == "urlsafe": + return token_urlsafe(nbytes=nbytes) + else: + raise ValueError(f"Invalid argument 'mode'='{mode}'") diff --git a/perception_dataset/utils/label_converter.py b/perception_dataset/utils/label_converter.py new file mode 100644 index 00000000..f73b93d2 --- /dev/null +++ b/perception_dataset/utils/label_converter.py @@ -0,0 +1,156 @@ +from typing import Dict + + +class LabelConverter: + def __init__(self): + self.label_map: Dict[str, str] = LabelConverter._set_label_map() + self.attribute_map: Dict[str, str] = LabelConverter._set_attribute_map() + + @staticmethod + def _set_label_map() -> Dict[str, str]: + label_map: Dict[str, str] = { + "bicycle": "bicycle", + "BICYCLE": "bicycle", + "vehicle.bicycle": "bicycle", + "bus": "bus", + "BUS": "bus", + "vehicle.bus (bendy & rigid)": "bus", + "vehicle.bus": "bus", + "car": "car", + "CAR": "car", + "vehicle.car": "car", + "vehicle.construction": "car", + "vehicle.emergency (ambulance & police)": "car", + "motorbike": "motorcycle", + "MOTORBIKE": "motorcycle", + "vehicle.motorcycle": "motorcycle", + "pedestrian": "pedestrian", + "PEDESTRIAN": "pedestrian", + "pedestrian.adult": "pedestrian", + "pedestrian.child": "pedestrian", + "pedestrian.construction_worker": "construction_worker", + "pedestrian.personal_mobility": "pedestrian", + "pedestrian.police_officer": "police_officer", + "pedestrian.stroller": "stroller", + "pedestrian.wheelchair": "wheelchair", + "truck": "truck", + "TRUCK": "truck", + "vehicle.truck": "truck", + "vehicle.trailer": "trailer", + "vehicle.ambulance": "ambulance", + "vehicle.fire": "fire_truck", + "vehicle.police": "police_car", + "animal": "animal", + "ANIMAL": "animal", + "unknown": "unknown", + "UNKNOWN": "unknown", + "movable_object.barrier": "barrier", + "movable_object.debris": "debris", + "movable_object.pushable_pullable": "pushable_pullable", + "movable_object.trafficcone": "cone", + "movable_object.traffic_cone": "cone", + "static_object.bicycle rack": "unknown", + "static_object.bicycle_rack": "unknown", + "static_object.bollard": "bollard", + "trailer": "trailer", + "motorcycle": "motorcycle", + "vehicle": "car", + "construction_worker": "construction_worker", + "stroller": "stroller", + "police_officer": "police_officer", + "wheelchair": "wheelchair", + "police_car": "police_car", + "fire_truck": "fire_truck", + "ambulance": "ambulance", + "forklift": "forklift", + "barrier": "barrier", + "pushable_pullable": "pushable_pullable", + "traffic_cone": "traffic_cone", + "bollard": "bollard", + "protruding_object": "protruding_object", + } + return label_map + + @staticmethod + def _set_attribute_map() -> Dict[str, str]: + attribute_map: Dict[str, str] = { + "pedestrian_state.siting_lying_down": "pedestrian_state.siting", + "pedestrian_state.sitting": "pedestrian_state.siting", + "pedestrian_state.standing": "pedestrian_state.standing", + "pedestrian_state.moving": "pedestrian_state.standing", + "vehicle_state.driving": "vehicle_state.driving", + "vehicle_state.moving": "vehicle_state.driving", + "vehicle_state.stopped": "vehicle_state.driving", + "vehicle_state.driving": "vehicle_state.driving", + "vehicle_state.parked": "vehicle_state.parked", + "occlusion_state.none": "occlusion_state.none", + "occlusion_state.partial": "occlusion_state.partial", + "occlusion_state.most": "occlusion_state.most", + "occlusion_state.full": "occlusion_state.full", + "cycle_state.without_rider": "cycle_state.without_rider", + "cycle_state.with_rider": "cycle_state.with_rider", + "motorcycle_state.without_rider": "motorcycle_state.without_rider", + "motorcycle_state.with_rider": "motorcycle_state.with_rider", + "extremities_state.none": "extremities_state.none", + "extremities_state.open_door": "extremities_state.protruding_object", + "extremities_state.protruding_object": "extremities_state.protruding_object", + "emergency_vehicle_lights_state.on": "emergency_vehicle_lights_state.on", + "emergency_vehicle_lights_state.off": "emergency_vehicle_lights_state.off", + "emergency_vehicle_lights_state.unknown": "emergency_vehicle_lights_state.unknown", + "object_state.still": "object_state.still", + } + return attribute_map + + def convert_label( + self, + label: str, + ) -> str: + return_label: str = self.label_map[label] + return return_label + + def convert_attribute( + self, + attribute: str, + ) -> str: + return_attribute: str = self.attribute_map[attribute] + return return_attribute + + +class TrafficLightLabelConverter: + def __init__(self): + self.label_map: Dict[str, str] = TrafficLightLabelConverter._set_label_map() + + @staticmethod + def _set_label_map() -> Dict[str, str]: + label_map: Dict[str, str] = { + "unknown": "unknown", + "green": "green", + "green_straight": "green_straight", + "green_left": "green_left", + "green_right": "green_right", + "yellow": "yellow", + "yellow_straight": "yellow_straight", + "yellow_left": "yellow_left", + "yellow_right": "yellow_right", + "yellow_left_straight": "yellow_straight_left", + "yellow_right_straight": "yellow_straight_right", + "yellow_left_right_straight": "yellow_straight_left_right", + "red": "red", + "red_straight": "red_straight", + "red_left": "red_left", + "red_right": "red_right", + "red_left_straight": "red_straight_left", + "red_straight_left": "red_straight_left", + "red_right_straight": "red_straight_right", + "red_left_right_straight": "red_straight_left_right", + "red_rightdiagonal": "red_rightdiagonal", + "red_leftdiagonal": "red_leftdiagonal", + } + return label_map + + def convert_label( + self, + label: str, + ) -> str: + return_label: str = self.label_map[label] + return return_label diff --git a/perception_dataset/utils/logger.py b/perception_dataset/utils/logger.py new file mode 100644 index 00000000..d2227b69 --- /dev/null +++ b/perception_dataset/utils/logger.py @@ -0,0 +1,116 @@ +import datetime +import logging +from logging import FileHandler, StreamHandler, getLogger +import os +import uuid + +from pythonjsonlogger import jsonlogger + +from perception_dataset.configurations import Configurations + + +def CustomTextFormatter(): + return logging.Formatter( + "[%(asctime)s] [%(levelname)s] [process] %(process)s %(processName)s [thread] %(thread)d %(threadName)s [file] %(pathname)s [func] %(funcName)s [line] %(lineno)d [%(message)s]" + ) + + +class CustomJsonFormatter(jsonlogger.JsonFormatter): + def parse(self): + return [ + "timestamp", + "level", + "process", + "processName", + "thread", + "threadName", + "pathname", + "funcName", + "lineno", + "message", + ] + + def add_fields(self, log_record, record, message_dict): + super(CustomJsonFormatter, self).add_fields(log_record, record, message_dict) + if not log_record.get("timestamp"): + now = datetime.datetime.utcnow().strftime("%Y-%m-%dT%H:%M:%S.%fZ") + log_record["timestamp"] = now + if log_record.get("level"): + log_record["level"] = log_record["level"].upper() + else: + log_record["level"] = record.levelname + + +class SensitiveWordFilter(logging.Filter): + def filter(self, record): # noqa A003 + sensitive_words = [ + "password", + "auth_token", + "token", + "ingest.sentry.io", + "secret", + ] + log_message = record.getMessage() + for word in sensitive_words: + if word in log_message: + return False + return True + + +def configure_logger( + log_file_path=Configurations.log_file_path, + modname=__name__, +): + log_directory = os.path.dirname(log_file_path) + os.makedirs(log_directory, exist_ok=True) + + logger = getLogger(modname) + logger.addFilter(SensitiveWordFilter()) + logger.setLevel(Configurations.log_level) + + sh = StreamHandler() + sh.setLevel(Configurations.log_level) + + if Configurations.log_format == "json": + formatter = CustomJsonFormatter() + elif Configurations.log_format == "text": + formatter = CustomTextFormatter() + else: + formatter = CustomJsonFormatter() + sh.setFormatter(formatter) + logger.addHandler(sh) + + fh = FileHandler(log_file_path) + fh.setLevel(Configurations.log_level) + + if Configurations.log_format == "json": + fh_formatter = CustomJsonFormatter() + elif Configurations.log_format == "text": + fh_formatter = CustomTextFormatter() + else: + fh_formatter = CustomJsonFormatter() + fh.setFormatter(fh_formatter) + logger.addHandler(fh) + + return logger + + +def log_decorator(logger=configure_logger()): + def _log_decorator(func): + def wrapper(*args, **kwargs): + job_id = str(uuid.uuid4())[:8] + logger.debug(f"START {job_id} func:{func.__name__} args:{args} kwargs:{kwargs}") + res = func(*args, **kwargs) + logger.debug(f"RETURN FROM {job_id} return:{res}") + return res + + return wrapper + + return _log_decorator + + +class Configurations(object): + log_format = os.getenv("LOG_FORMAT", "text") + log_level = os.getenv("LOG_LEVEL", "INFO") + log_file_path = os.getenv("LOG_FILE_PATH", f"/tmp/log/{datetime.date.today()}.log") + slack_token = os.getenv("SLACK_TOKEN", None) diff --git a/perception_dataset/utils/misc.py b/perception_dataset/utils/misc.py new file mode 100644 index 00000000..70e3efb2 --- /dev/null +++ b/perception_dataset/utils/misc.py @@ -0,0 +1,20 @@ +import os + +from perception_dataset.constants import T4_FORMAT_DIRECTORY_NAME + + +def unix_timestamp_to_nusc_timestamp(timestamp: float) -> int: + return int(timestamp * 1e6) + + +def nusc_timestamp_to_unix_timestamp(timestamp: int) -> float: + return float(timestamp) * 1e-6 + + +def get_sample_data_filename(sensor_channel: str, frame_index: int, fileformat: str): + filename = os.path.join( + T4_FORMAT_DIRECTORY_NAME.DATA.value, + sensor_channel, + f"{frame_index}.{fileformat}", + ) + return filename diff --git a/perception_dataset/utils/rosbag2.py b/perception_dataset/utils/rosbag2.py new file mode 100644 index 00000000..3d609a17 --- /dev/null +++ b/perception_dataset/utils/rosbag2.py @@ -0,0 +1,137 @@ +"""some implementations are from https://github.com/tier4/ros2bag_extensions/blob/main/ros2bag_extensions/ros2bag_extensions/verb/__init__.py""" + +import os.path as osp +from pathlib import Path +from typing import Dict, Optional, Tuple + +import builtin_interfaces.msg +import cv2 +from nptyping import NDArray +import numpy as np +from rclpy.time import Time +from rosbag2_py import ( + ConverterOptions, + Reindexer, + SequentialReader, + SequentialWriter, + StorageOptions, +) +from sensor_msgs.msg import CompressedImage, PointCloud2 +import sensor_msgs_py.point_cloud2 +import yaml + +from perception_dataset.utils.misc import unix_timestamp_to_nusc_timestamp + + +def get_options( + bag_dir: str, + storage_options: Optional[StorageOptions] = None, + converter_options: Optional[ConverterOptions] = None, +) -> Tuple[StorageOptions, ConverterOptions]: + storage_options = storage_options if storage_options else get_default_storage_options(bag_dir) + converter_options = converter_options if converter_options else get_default_converter_options() + return storage_options, converter_options + + +def create_reader( + bag_dir: str, + storage_options: Optional[StorageOptions] = None, + converter_options: Optional[ConverterOptions] = None, +) -> SequentialReader: + storage_options, converter_options = get_options(bag_dir, storage_options, converter_options) + reader = SequentialReader() + reader.open(storage_options, converter_options) + + return reader + + +def create_writer(bag_dir: str) -> SequentialWriter: + storage_options = StorageOptions(uri=bag_dir, storage_id="sqlite3") + converter_options = ConverterOptions( + input_serialization_format="cdr", output_serialization_format="cdr" + ) + writer = SequentialWriter() + writer.open(storage_options, converter_options) + + return writer + + +def reindex(bag_dir: str): + storage_options = get_default_storage_options(bag_dir) + Reindexer().reindex(storage_options) + + +def get_topic_type_dict(bag_dir: str) -> Dict[str, str]: + reader = create_reader(bag_dir) + + topic_name_to_topic_type: Dict[str, str] = {} + for topic in reader.get_all_topics_and_types(): + topic_name_to_topic_type[topic.name] = topic.type + + return topic_name_to_topic_type + + +def get_topic_count(bag_dir: str) -> Dict[str, int]: + with open(osp.join(bag_dir, "metadata.yaml")) as f: + bagfile_metadata = yaml.safe_load(f)["rosbag2_bagfile_information"] + topic_name_to_topic_count: Dict[str, int] = {} + for topic in bagfile_metadata["topics_with_message_count"]: + topic_name_to_topic_count[topic["topic_metadata"]["name"]] = topic["message_count"] + return topic_name_to_topic_count + + +def get_default_converter_options() -> ConverterOptions: + return ConverterOptions( + input_serialization_format="cdr", + output_serialization_format="cdr", + ) + + +def infer_storage_id(bag_dir: str, storage_ids={".db3": "sqlite3", ".mcap": "mcap"}) -> str: + bag_dir_path = Path(bag_dir) + data_file = next(p for p in bag_dir_path.glob("*") if p.suffix in storage_ids) + if data_file.suffix not in storage_ids: + raise ValueError(f"Unsupported storage id: {data_file.suffix}") + return storage_ids[data_file.suffix] + + +def get_default_storage_options(bag_dir: str) -> StorageOptions: + storage_id = infer_storage_id(bag_dir) + return StorageOptions(uri=bag_dir, storage_id=storage_id) + + +def pointcloud_msg_to_numpy( + pointcloud_msg: PointCloud2, +) -> NDArray: + """numpy ver. of https://github.com/ros2/common_interfaces/blob/master/sensor_msgs_py/sensor_msgs_py/point_cloud2.py#L119""" + points_arr = np.array( + [tuple(p) for p in sensor_msgs_py.point_cloud2.read_points(pointcloud_msg)], + dtype=np.float32, + ) + if len(points_arr[0]) > 5: + points_arr = np.delete(points_arr, np.s_[5:], axis=1) + while len(points_arr[0]) < 5: + points_arr = np.insert(points_arr, len(points_arr[0]), -1, axis=1) + return points_arr + + +def compressed_msg_to_numpy(compressed_image_msg: CompressedImage) -> NDArray: + image_buf = np.ndarray( + shape=(1, len(compressed_image_msg.data)), dtype=np.uint8, buffer=compressed_image_msg.data + ) + image = cv2.imdecode(image_buf, cv2.IMREAD_ANYCOLOR) + return image + + +def stamp_to_unix_timestamp(stamp: builtin_interfaces.msg.Time) -> float: + return stamp.sec + stamp.nanosec * 1e-9 + + +def unix_timestamp_to_stamp(timestamp: float) -> builtin_interfaces.msg.Time: + sec_int = int(timestamp) + nano_sec_int = (timestamp - sec_int) * 1e9 + return Time(seconds=sec_int, nanoseconds=nano_sec_int).to_msg() + + +def stamp_to_nusc_timestamp(stamp: builtin_interfaces.msg.Time) -> int: + return unix_timestamp_to_nusc_timestamp(stamp_to_unix_timestamp(stamp)) diff --git a/perception_dataset/validate_t4_dataset.py b/perception_dataset/validate_t4_dataset.py new file mode 100644 index 00000000..c21955ae --- /dev/null +++ b/perception_dataset/validate_t4_dataset.py @@ -0,0 +1,38 @@ +"""This tool validates that the token of schema can be accessed to the target schema and the camera and lidar data is more than 9 Hz. +2D annotation format is't supported yet. +""" +import argparse + +from loguru import logger +from nuscenes.nuscenes import NuScenes + +from perception_dataset.constants import T4_FORMAT_DIRECTORY_NAME +from perception_dataset.t4_dataset.data_validator import validate_data_hz +from perception_dataset.t4_dataset.format_validator import ( + validate_directory_structure, + validate_format, +) + + +@logger.catch +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--root-path", type=str, help="path to T4Dataset") + args = parser.parse_args() + + logger.info(f"Load {args.root_path}") + + validate_directory_structure(args.root_path) + + nusc = NuScenes( + version=T4_FORMAT_DIRECTORY_NAME.ANNOTATION.value, + dataroot=args.root_path, + verbose=False, + ) + + validate_format(nusc, args.root_path) + validate_data_hz(nusc) + + +if __name__ == "__main__": + main() diff --git a/poetry.lock b/poetry.lock new file mode 100644 index 00000000..ce72f522 --- /dev/null +++ b/poetry.lock @@ -0,0 +1,3206 @@ +# This file is automatically @generated by Poetry 1.5.1 and should not be changed by hand. + +[[package]] +name = "addict" +version = "2.4.0" +description = "Addict is a dictionary whose items can be set using both attribute and item syntax." +optional = false +python-versions = "*" +files = [ + {file = "addict-2.4.0-py3-none-any.whl", hash = "sha256:249bb56bbfd3cdc2a004ea0ff4c2b6ddc84d53bc2194761636eb314d5cfa5dfc"}, + {file = "addict-2.4.0.tar.gz", hash = "sha256:b3b2210e0e067a281f5646c8c5db92e99b7231ea8b0eb5f74dbdf9e259d4e494"}, +] + +[[package]] +name = "anyio" +version = "3.7.0" +description = "High level compatibility layer for multiple asynchronous event loop implementations" +optional = false +python-versions = ">=3.7" +files = [ + {file = "anyio-3.7.0-py3-none-any.whl", hash = "sha256:eddca883c4175f14df8aedce21054bfca3adb70ffe76a9f607aef9d7fa2ea7f0"}, + {file = "anyio-3.7.0.tar.gz", hash = "sha256:275d9973793619a5374e1c89a4f4ad3f4b0a5510a2b5b939444bee8f4c4d37ce"}, +] + +[package.dependencies] +exceptiongroup = {version = "*", markers = "python_version < \"3.11\""} +idna = ">=2.8" +sniffio = ">=1.1" + +[package.extras] +doc = ["Sphinx (>=6.1.0)", "packaging", "sphinx-autodoc-typehints (>=1.2.0)", "sphinx-rtd-theme", "sphinxcontrib-jquery"] +test = ["anyio[trio]", "coverage[toml] (>=4.5)", "hypothesis (>=4.0)", "mock (>=4)", "psutil (>=5.9)", "pytest (>=7.0)", "pytest-mock (>=3.6.1)", "trustme", "uvloop (>=0.17)"] +trio = ["trio (<0.22)"] + +[[package]] +name = "appnope" +version = "0.1.3" +description = "Disable App Nap on macOS >= 10.9" +optional = false +python-versions = "*" +files = [ + {file = "appnope-0.1.3-py2.py3-none-any.whl", hash = "sha256:265a455292d0bd8a72453494fa24df5a11eb18373a60c7c0430889f22548605e"}, + {file = "appnope-0.1.3.tar.gz", hash = "sha256:02bd91c4de869fbb1e1c50aafc4098827a7a54ab2f39d9dcba6c9547ed920e24"}, +] + +[[package]] +name = "argon2-cffi" +version = "21.3.0" +description = "The secure Argon2 password hashing algorithm." +optional = false +python-versions = ">=3.6" +files = [ + {file = "argon2-cffi-21.3.0.tar.gz", hash = "sha256:d384164d944190a7dd7ef22c6aa3ff197da12962bd04b17f64d4e93d934dba5b"}, + {file = "argon2_cffi-21.3.0-py3-none-any.whl", hash = "sha256:8c976986f2c5c0e5000919e6de187906cfd81fb1c72bf9d88c01177e77da7f80"}, +] + +[package.dependencies] +argon2-cffi-bindings = "*" + +[package.extras] +dev = ["cogapp", "coverage[toml] (>=5.0.2)", "furo", "hypothesis", "pre-commit", "pytest", "sphinx", "sphinx-notfound-page", "tomli"] +docs = ["furo", "sphinx", "sphinx-notfound-page"] +tests = ["coverage[toml] (>=5.0.2)", "hypothesis", "pytest"] + +[[package]] +name = "argon2-cffi-bindings" +version = "21.2.0" +description = "Low-level CFFI bindings for Argon2" +optional = false +python-versions = ">=3.6" +files = [ + {file = "argon2-cffi-bindings-21.2.0.tar.gz", hash = "sha256:bb89ceffa6c791807d1305ceb77dbfacc5aa499891d2c55661c6459651fc39e3"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:ccb949252cb2ab3a08c02024acb77cfb179492d5701c7cbdbfd776124d4d2367"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9524464572e12979364b7d600abf96181d3541da11e23ddf565a32e70bd4dc0d"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b746dba803a79238e925d9046a63aa26bf86ab2a2fe74ce6b009a1c3f5c8f2ae"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:58ed19212051f49a523abb1dbe954337dc82d947fb6e5a0da60f7c8471a8476c"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:bd46088725ef7f58b5a1ef7ca06647ebaf0eb4baff7d1d0d177c6cc8744abd86"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_i686.whl", hash = "sha256:8cd69c07dd875537a824deec19f978e0f2078fdda07fd5c42ac29668dda5f40f"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:f1152ac548bd5b8bcecfb0b0371f082037e47128653df2e8ba6e914d384f3c3e"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win32.whl", hash = "sha256:603ca0aba86b1349b147cab91ae970c63118a0f30444d4bc80355937c950c082"}, + {file = "argon2_cffi_bindings-21.2.0-cp36-abi3-win_amd64.whl", hash = "sha256:b2ef1c30440dbbcba7a5dc3e319408b59676e2e039e2ae11a8775ecf482b192f"}, + {file = "argon2_cffi_bindings-21.2.0-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:e415e3f62c8d124ee16018e491a009937f8cf7ebf5eb430ffc5de21b900dad93"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3e385d1c39c520c08b53d63300c3ecc28622f076f4c2b0e6d7e796e9f6502194"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2c3e3cc67fdb7d82c4718f19b4e7a87123caf8a93fde7e23cf66ac0337d3cb3f"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6a22ad9800121b71099d0fb0a65323810a15f2e292f2ba450810a7316e128ee5"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:f9f8b450ed0547e3d473fdc8612083fd08dd2120d6ac8f73828df9b7d45bb351"}, + {file = "argon2_cffi_bindings-21.2.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:93f9bf70084f97245ba10ee36575f0c3f1e7d7724d67d8e5b08e61787c320ed7"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:3b9ef65804859d335dc6b31582cad2c5166f0c3e7975f324d9ffaa34ee7e6583"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d4966ef5848d820776f5f562a7d45fdd70c2f330c961d0d745b784034bd9f48d"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:20ef543a89dee4db46a1a6e206cd015360e5a75822f76df533845c3cbaf72670"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ed2937d286e2ad0cc79a7087d3c272832865f779430e0cc2b4f3718d3159b0cb"}, + {file = "argon2_cffi_bindings-21.2.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:5e00316dabdaea0b2dd82d141cc66889ced0cdcbfa599e8b471cf22c620c329a"}, +] + +[package.dependencies] +cffi = ">=1.0.1" + +[package.extras] +dev = ["cogapp", "pre-commit", "pytest", "wheel"] +tests = ["pytest"] + +[[package]] +name = "arrow" +version = "1.2.3" +description = "Better dates & times for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "arrow-1.2.3-py3-none-any.whl", hash = "sha256:5a49ab92e3b7b71d96cd6bfcc4df14efefc9dfa96ea19045815914a6ab6b1fe2"}, + {file = "arrow-1.2.3.tar.gz", hash = "sha256:3934b30ca1b9f292376d9db15b19446088d12ec58629bc3f0da28fd55fb633a1"}, +] + +[package.dependencies] +python-dateutil = ">=2.7.0" + +[[package]] +name = "asttokens" +version = "2.2.1" +description = "Annotate AST trees with source code positions" +optional = false +python-versions = "*" +files = [ + {file = "asttokens-2.2.1-py2.py3-none-any.whl", hash = "sha256:6b0ac9e93fb0335014d382b8fa9b3afa7df546984258005da0b9e7095b3deb1c"}, + {file = "asttokens-2.2.1.tar.gz", hash = "sha256:4622110b2a6f30b77e1473affaa97e711bc2f07d3f10848420ff1898edbe94f3"}, +] + +[package.dependencies] +six = "*" + +[package.extras] +test = ["astroid", "pytest"] + +[[package]] +name = "attrs" +version = "23.1.0" +description = "Classes Without Boilerplate" +optional = false +python-versions = ">=3.7" +files = [ + {file = "attrs-23.1.0-py3-none-any.whl", hash = "sha256:1f28b4522cdc2fb4256ac1a020c78acf9cba2c6b461ccd2c126f3aa8e8335d04"}, + {file = "attrs-23.1.0.tar.gz", hash = "sha256:6279836d581513a26f1bf235f9acd333bc9115683f14f7e8fae46c98fc50e015"}, +] + +[package.extras] +cov = ["attrs[tests]", "coverage[toml] (>=5.3)"] +dev = ["attrs[docs,tests]", "pre-commit"] +docs = ["furo", "myst-parser", "sphinx", "sphinx-notfound-page", "sphinxcontrib-towncrier", "towncrier", "zope-interface"] +tests = ["attrs[tests-no-zope]", "zope-interface"] +tests-no-zope = ["cloudpickle", "hypothesis", "mypy (>=1.1.1)", "pympler", "pytest (>=4.3.0)", "pytest-mypy-plugins", "pytest-xdist[psutil]"] + +[[package]] +name = "backcall" +version = "0.2.0" +description = "Specifications for callback functions passed in to an API" +optional = false +python-versions = "*" +files = [ + {file = "backcall-0.2.0-py2.py3-none-any.whl", hash = "sha256:fbbce6a29f263178a1f7915c1940bde0ec2b2a967566fe1c65c1dfb7422bd255"}, + {file = "backcall-0.2.0.tar.gz", hash = "sha256:5cbdbf27be5e7cfadb448baf0aa95508f91f2bbc6c6437cd9cd06e2a4c215e1e"}, +] + +[[package]] +name = "beautifulsoup4" +version = "4.12.2" +description = "Screen-scraping library" +optional = false +python-versions = ">=3.6.0" +files = [ + {file = "beautifulsoup4-4.12.2-py3-none-any.whl", hash = "sha256:bd2520ca0d9d7d12694a53d44ac482d181b4ec1888909b035a3dbf40d0f57d4a"}, + {file = "beautifulsoup4-4.12.2.tar.gz", hash = "sha256:492bbc69dca35d12daac71c4db1bfff0c876c00ef4a2ffacce226d4638eb72da"}, +] + +[package.dependencies] +soupsieve = ">1.2" + +[package.extras] +html5lib = ["html5lib"] +lxml = ["lxml"] + +[[package]] +name = "bleach" +version = "6.0.0" +description = "An easy safelist-based HTML-sanitizing tool." +optional = false +python-versions = ">=3.7" +files = [ + {file = "bleach-6.0.0-py3-none-any.whl", hash = "sha256:33c16e3353dbd13028ab4799a0f89a83f113405c766e9c122df8a06f5b85b3f4"}, + {file = "bleach-6.0.0.tar.gz", hash = "sha256:1a1a85c1595e07d8db14c5f09f09e6433502c51c595970edc090551f0db99414"}, +] + +[package.dependencies] +six = ">=1.9.0" +webencodings = "*" + +[package.extras] +css = ["tinycss2 (>=1.1.0,<1.2)"] + +[[package]] +name = "cachetools" +version = "5.3.1" +description = "Extensible memoizing collections and decorators" +optional = false +python-versions = ">=3.7" +files = [ + {file = "cachetools-5.3.1-py3-none-any.whl", hash = "sha256:95ef631eeaea14ba2e36f06437f36463aac3a096799e876ee55e5cdccb102590"}, + {file = "cachetools-5.3.1.tar.gz", hash = "sha256:dce83f2d9b4e1f732a8cd44af8e8fab2dbe46201467fc98b3ef8f269092bf62b"}, +] + +[[package]] +name = "certifi" +version = "2023.5.7" +description = "Python package for providing Mozilla's CA Bundle." +optional = false +python-versions = ">=3.6" +files = [ + {file = "certifi-2023.5.7-py3-none-any.whl", hash = "sha256:c6c2e98f5c7869efca1f8916fed228dd91539f9f1b444c314c06eef02980c716"}, + {file = "certifi-2023.5.7.tar.gz", hash = "sha256:0f0d56dc5a6ad56fd4ba36484d6cc34451e1c6548c61daad8c320169f91eddc7"}, +] + +[[package]] +name = "cffi" +version = "1.15.1" +description = "Foreign Function Interface for Python calling C code." +optional = false +python-versions = "*" +files = [ + {file = "cffi-1.15.1-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:a66d3508133af6e8548451b25058d5812812ec3798c886bf38ed24a98216fab2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_i686.whl", hash = "sha256:470c103ae716238bbe698d67ad020e1db9d9dba34fa5a899b5e21577e6d52ed2"}, + {file = "cffi-1.15.1-cp27-cp27m-manylinux1_x86_64.whl", hash = "sha256:9ad5db27f9cabae298d151c85cf2bad1d359a1b9c686a275df03385758e2f914"}, + {file = "cffi-1.15.1-cp27-cp27m-win32.whl", hash = "sha256:b3bbeb01c2b273cca1e1e0c5df57f12dce9a4dd331b4fa1635b8bec26350bde3"}, + {file = "cffi-1.15.1-cp27-cp27m-win_amd64.whl", hash = "sha256:e00b098126fd45523dd056d2efba6c5a63b71ffe9f2bbe1a4fe1716e1d0c331e"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_i686.whl", hash = "sha256:d61f4695e6c866a23a21acab0509af1cdfd2c013cf256bbf5b6b5e2695827162"}, + {file = "cffi-1.15.1-cp27-cp27mu-manylinux1_x86_64.whl", hash = "sha256:ed9cb427ba5504c1dc15ede7d516b84757c3e3d7868ccc85121d9310d27eed0b"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:39d39875251ca8f612b6f33e6b1195af86d1b3e60086068be9cc053aa4376e21"}, + {file = "cffi-1.15.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:285d29981935eb726a4399badae8f0ffdff4f5050eaa6d0cfc3f64b857b77185"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3eb6971dcff08619f8d91607cfc726518b6fa2a9eba42856be181c6d0d9515fd"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:21157295583fe8943475029ed5abdcf71eb3911894724e360acff1d61c1d54bc"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5635bd9cb9731e6d4a1132a498dd34f764034a8ce60cef4f5319c0541159392f"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:2012c72d854c2d03e45d06ae57f40d78e5770d252f195b93f581acf3ba44496e"}, + {file = "cffi-1.15.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd86c085fae2efd48ac91dd7ccffcfc0571387fe1193d33b6394db7ef31fe2a4"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:fa6693661a4c91757f4412306191b6dc88c1703f780c8234035eac011922bc01"}, + {file = "cffi-1.15.1-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:59c0b02d0a6c384d453fece7566d1c7e6b7bae4fc5874ef2ef46d56776d61c9e"}, + {file = "cffi-1.15.1-cp310-cp310-win32.whl", hash = "sha256:cba9d6b9a7d64d4bd46167096fc9d2f835e25d7e4c121fb2ddfc6528fb0413b2"}, + {file = "cffi-1.15.1-cp310-cp310-win_amd64.whl", hash = "sha256:ce4bcc037df4fc5e3d184794f27bdaab018943698f4ca31630bc7f84a7b69c6d"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3d08afd128ddaa624a48cf2b859afef385b720bb4b43df214f85616922e6a5ac"}, + {file = "cffi-1.15.1-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:3799aecf2e17cf585d977b780ce79ff0dc9b78d799fc694221ce814c2c19db83"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a591fe9e525846e4d154205572a029f653ada1a78b93697f3b5a8f1f2bc055b9"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3548db281cd7d2561c9ad9984681c95f7b0e38881201e157833a2342c30d5e8c"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:91fc98adde3d7881af9b59ed0294046f3806221863722ba7d8d120c575314325"}, + {file = "cffi-1.15.1-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:94411f22c3985acaec6f83c6df553f2dbe17b698cc7f8ae751ff2237d96b9e3c"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:03425bdae262c76aad70202debd780501fabeaca237cdfddc008987c0e0f59ef"}, + {file = "cffi-1.15.1-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cc4d65aeeaa04136a12677d3dd0b1c0c94dc43abac5860ab33cceb42b801c1e8"}, + {file = "cffi-1.15.1-cp311-cp311-win32.whl", hash = "sha256:a0f100c8912c114ff53e1202d0078b425bee3649ae34d7b070e9697f93c5d52d"}, + {file = "cffi-1.15.1-cp311-cp311-win_amd64.whl", hash = "sha256:04ed324bda3cda42b9b695d51bb7d54b680b9719cfab04227cdd1e04e5de3104"}, + {file = "cffi-1.15.1-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:50a74364d85fd319352182ef59c5c790484a336f6db772c1a9231f1c3ed0cbd7"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e263d77ee3dd201c3a142934a086a4450861778baaeeb45db4591ef65550b0a6"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:cec7d9412a9102bdc577382c3929b337320c4c4c4849f2c5cdd14d7368c5562d"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:4289fc34b2f5316fbb762d75362931e351941fa95fa18789191b33fc4cf9504a"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:173379135477dc8cac4bc58f45db08ab45d228b3363adb7af79436135d028405"}, + {file = "cffi-1.15.1-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6975a3fac6bc83c4a65c9f9fcab9e47019a11d3d2cf7f3c0d03431bf145a941e"}, + {file = "cffi-1.15.1-cp36-cp36m-win32.whl", hash = "sha256:2470043b93ff09bf8fb1d46d1cb756ce6132c54826661a32d4e4d132e1977adf"}, + {file = "cffi-1.15.1-cp36-cp36m-win_amd64.whl", hash = "sha256:30d78fbc8ebf9c92c9b7823ee18eb92f2e6ef79b45ac84db507f52fbe3ec4497"}, + {file = "cffi-1.15.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:198caafb44239b60e252492445da556afafc7d1e3ab7a1fb3f0584ef6d742375"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:5ef34d190326c3b1f822a5b7a45f6c4535e2f47ed06fec77d3d799c450b2651e"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8102eaf27e1e448db915d08afa8b41d6c7ca7a04b7d73af6514df10a3e74bd82"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:5df2768244d19ab7f60546d0c7c63ce1581f7af8b5de3eb3004b9b6fc8a9f84b"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a8c4917bd7ad33e8eb21e9a5bbba979b49d9a97acb3a803092cbc1133e20343c"}, + {file = "cffi-1.15.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0e2642fe3142e4cc4af0799748233ad6da94c62a8bec3a6648bf8ee68b1c7426"}, + {file = "cffi-1.15.1-cp37-cp37m-win32.whl", hash = "sha256:e229a521186c75c8ad9490854fd8bbdd9a0c9aa3a524326b55be83b54d4e0ad9"}, + {file = "cffi-1.15.1-cp37-cp37m-win_amd64.whl", hash = "sha256:a0b71b1b8fbf2b96e41c4d990244165e2c9be83d54962a9a1d118fd8657d2045"}, + {file = "cffi-1.15.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:320dab6e7cb2eacdf0e658569d2575c4dad258c0fcc794f46215e1e39f90f2c3"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1e74c6b51a9ed6589199c787bf5f9875612ca4a8a0785fb2d4a84429badaf22a"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a5c84c68147988265e60416b57fc83425a78058853509c1b0629c180094904a5"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3b926aa83d1edb5aa5b427b4053dc420ec295a08e40911296b9eb1b6170f6cca"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:87c450779d0914f2861b8526e035c5e6da0a3199d8f1add1a665e1cbc6fc6d02"}, + {file = "cffi-1.15.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4f2c9f67e9821cad2e5f480bc8d83b8742896f1242dba247911072d4fa94c192"}, + {file = "cffi-1.15.1-cp38-cp38-win32.whl", hash = "sha256:8b7ee99e510d7b66cdb6c593f21c043c248537a32e0bedf02e01e9553a172314"}, + {file = "cffi-1.15.1-cp38-cp38-win_amd64.whl", hash = "sha256:00a9ed42e88df81ffae7a8ab6d9356b371399b91dbdf0c3cb1e84c03a13aceb5"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:54a2db7b78338edd780e7ef7f9f6c442500fb0d41a5a4ea24fff1c929d5af585"}, + {file = "cffi-1.15.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:fcd131dd944808b5bdb38e6f5b53013c5aa4f334c5cad0c72742f6eba4b73db0"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7473e861101c9e72452f9bf8acb984947aa1661a7704553a9f6e4baa5ba64415"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6c9a799e985904922a4d207a94eae35c78ebae90e128f0c4e521ce339396be9d"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3bcde07039e586f91b45c88f8583ea7cf7a0770df3a1649627bf598332cb6984"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:33ab79603146aace82c2427da5ca6e58f2b3f2fb5da893ceac0c42218a40be35"}, + {file = "cffi-1.15.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5d598b938678ebf3c67377cdd45e09d431369c3b1a5b331058c338e201f12b27"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:db0fbb9c62743ce59a9ff687eb5f4afbe77e5e8403d6697f7446e5f609976f76"}, + {file = "cffi-1.15.1-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:98d85c6a2bef81588d9227dde12db8a7f47f639f4a17c9ae08e773aa9c697bf3"}, + {file = "cffi-1.15.1-cp39-cp39-win32.whl", hash = "sha256:40f4774f5a9d4f5e344f31a32b5096977b5d48560c5592e2f3d2c4374bd543ee"}, + {file = "cffi-1.15.1-cp39-cp39-win_amd64.whl", hash = "sha256:70df4e3b545a17496c9b3f41f5115e69a4f2e77e94e1d2a8e1070bc0c38c8a3c"}, + {file = "cffi-1.15.1.tar.gz", hash = "sha256:d400bfb9a37b1351253cb402671cea7e89bdecc294e8016a707f6d1d8ac934f9"}, +] + +[package.dependencies] +pycparser = "*" + +[[package]] +name = "charset-normalizer" +version = "3.1.0" +description = "The Real First Universal Charset Detector. Open, modern and actively maintained alternative to Chardet." +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "charset-normalizer-3.1.0.tar.gz", hash = "sha256:34e0a2f9c370eb95597aae63bf85eb5e96826d81e3dcf88b8886012906f509b5"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:e0ac8959c929593fee38da1c2b64ee9778733cdf03c482c9ff1d508b6b593b2b"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d7fc3fca01da18fbabe4625d64bb612b533533ed10045a2ac3dd194bfa656b60"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:04eefcee095f58eaabe6dc3cc2262f3bcd776d2c67005880894f447b3f2cb9c1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:20064ead0717cf9a73a6d1e779b23d149b53daf971169289ed2ed43a71e8d3b0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1435ae15108b1cb6fffbcea2af3d468683b7afed0169ad718451f8db5d1aff6f"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c84132a54c750fda57729d1e2599bb598f5fa0344085dbde5003ba429a4798c0"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:75f2568b4189dda1c567339b48cba4ac7384accb9c2a7ed655cd86b04055c795"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:11d3bcb7be35e7b1bba2c23beedac81ee893ac9871d0ba79effc7fc01167db6c"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:891cf9b48776b5c61c700b55a598621fdb7b1e301a550365571e9624f270c203"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:5f008525e02908b20e04707a4f704cd286d94718f48bb33edddc7d7b584dddc1"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_ppc64le.whl", hash = "sha256:b06f0d3bf045158d2fb8837c5785fe9ff9b8c93358be64461a1089f5da983137"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_s390x.whl", hash = "sha256:49919f8400b5e49e961f320c735388ee686a62327e773fa5b3ce6721f7e785ce"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:22908891a380d50738e1f978667536f6c6b526a2064156203d418f4856d6e86a"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win32.whl", hash = "sha256:12d1a39aa6b8c6f6248bb54550efcc1c38ce0d8096a146638fd4738e42284448"}, + {file = "charset_normalizer-3.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:65ed923f84a6844de5fd29726b888e58c62820e0769b76565480e1fdc3d062f8"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:9a3267620866c9d17b959a84dd0bd2d45719b817245e49371ead79ed4f710d19"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:6734e606355834f13445b6adc38b53c0fd45f1a56a9ba06c2058f86893ae8017"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:f8303414c7b03f794347ad062c0516cee0e15f7a612abd0ce1e25caf6ceb47df"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aaf53a6cebad0eae578f062c7d462155eada9c172bd8c4d250b8c1d8eb7f916a"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:3dc5b6a8ecfdc5748a7e429782598e4f17ef378e3e272eeb1340ea57c9109f41"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:e1b25e3ad6c909f398df8921780d6a3d120d8c09466720226fc621605b6f92b1"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:0ca564606d2caafb0abe6d1b5311c2649e8071eb241b2d64e75a0d0065107e62"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b82fab78e0b1329e183a65260581de4375f619167478dddab510c6c6fb04d9b6"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:bd7163182133c0c7701b25e604cf1611c0d87712e56e88e7ee5d72deab3e76b5"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:11d117e6c63e8f495412d37e7dc2e2fff09c34b2d09dbe2bee3c6229577818be"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:cf6511efa4801b9b38dc5546d7547d5b5c6ef4b081c60b23e4d941d0eba9cbeb"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:abc1185d79f47c0a7aaf7e2412a0eb2c03b724581139193d2d82b3ad8cbb00ac"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:cb7b2ab0188829593b9de646545175547a70d9a6e2b63bf2cd87a0a391599324"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win32.whl", hash = "sha256:c36bcbc0d5174a80d6cccf43a0ecaca44e81d25be4b7f90f0ed7bcfbb5a00909"}, + {file = "charset_normalizer-3.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:cca4def576f47a09a943666b8f829606bcb17e2bc2d5911a46c8f8da45f56755"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:0c95f12b74681e9ae127728f7e5409cbbef9cd914d5896ef238cc779b8152373"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:fca62a8301b605b954ad2e9c3666f9d97f63872aa4efcae5492baca2056b74ab"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:ac0aa6cd53ab9a31d397f8303f92c42f534693528fafbdb997c82bae6e477ad9"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:c3af8e0f07399d3176b179f2e2634c3ce9c1301379a6b8c9c9aeecd481da494f"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:3a5fc78f9e3f501a1614a98f7c54d3969f3ad9bba8ba3d9b438c3bc5d047dd28"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:628c985afb2c7d27a4800bfb609e03985aaecb42f955049957814e0491d4006d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:74db0052d985cf37fa111828d0dd230776ac99c740e1a758ad99094be4f1803d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:1e8fcdd8f672a1c4fc8d0bd3a2b576b152d2a349782d1eb0f6b8e52e9954731d"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_ppc64le.whl", hash = "sha256:04afa6387e2b282cf78ff3dbce20f0cc071c12dc8f685bd40960cc68644cfea6"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_s390x.whl", hash = "sha256:dd5653e67b149503c68c4018bf07e42eeed6b4e956b24c00ccdf93ac79cdff84"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:d2686f91611f9e17f4548dbf050e75b079bbc2a82be565832bc8ea9047b61c8c"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win32.whl", hash = "sha256:4155b51ae05ed47199dc5b2a4e62abccb274cee6b01da5b895099b61b1982974"}, + {file = "charset_normalizer-3.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:322102cdf1ab682ecc7d9b1c5eed4ec59657a65e1c146a0da342b78f4112db23"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:e633940f28c1e913615fd624fcdd72fdba807bf53ea6925d6a588e84e1151531"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:3a06f32c9634a8705f4ca9946d667609f52cf130d5548881401f1eb2c39b1e2c"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:7381c66e0561c5757ffe616af869b916c8b4e42b367ab29fedc98481d1e74e14"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3573d376454d956553c356df45bb824262c397c6e26ce43e8203c4c540ee0acb"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:e89df2958e5159b811af9ff0f92614dabf4ff617c03a4c1c6ff53bf1c399e0e1"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:78cacd03e79d009d95635e7d6ff12c21eb89b894c354bd2b2ed0b4763373693b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de5695a6f1d8340b12a5d6d4484290ee74d61e467c39ff03b39e30df62cf83a0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1c60b9c202d00052183c9be85e5eaf18a4ada0a47d188a83c8f5c5b23252f649"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:f645caaf0008bacf349875a974220f1f1da349c5dbe7c4ec93048cdc785a3326"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ea9f9c6034ea2d93d9147818f17c2a0860d41b71c38b9ce4d55f21b6f9165a11"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_ppc64le.whl", hash = "sha256:80d1543d58bd3d6c271b66abf454d437a438dff01c3e62fdbcd68f2a11310d4b"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_s390x.whl", hash = "sha256:73dc03a6a7e30b7edc5b01b601e53e7fc924b04e1835e8e407c12c037e81adbd"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:6f5c2e7bc8a4bf7c426599765b1bd33217ec84023033672c1e9a8b35eaeaaaf8"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win32.whl", hash = "sha256:12a2b561af122e3d94cdb97fe6fb2bb2b82cef0cdca131646fdb940a1eda04f0"}, + {file = "charset_normalizer-3.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:3160a0fd9754aab7d47f95a6b63ab355388d890163eb03b2d2b87ab0a30cfa59"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:38e812a197bf8e71a59fe55b757a84c1f946d0ac114acafaafaf21667a7e169e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6baf0baf0d5d265fa7944feb9f7451cc316bfe30e8df1a61b1bb08577c554f31"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:8f25e17ab3039b05f762b0a55ae0b3632b2e073d9c8fc88e89aca31a6198e88f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3747443b6a904001473370d7810aa19c3a180ccd52a7157aacc264a5ac79265e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:b116502087ce8a6b7a5f1814568ccbd0e9f6cfd99948aa59b0e241dc57cf739f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d16fd5252f883eb074ca55cb622bc0bee49b979ae4e8639fff6ca3ff44f9f854"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:21fa558996782fc226b529fdd2ed7866c2c6ec91cee82735c98a197fae39f706"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:6f6c7a8a57e9405cad7485f4c9d3172ae486cfef1344b5ddd8e5239582d7355e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ac3775e3311661d4adace3697a52ac0bab17edd166087d493b52d4f4f553f9f0"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:10c93628d7497c81686e8e5e557aafa78f230cd9e77dd0c40032ef90c18f2230"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_ppc64le.whl", hash = "sha256:6f4f4668e1831850ebcc2fd0b1cd11721947b6dc7c00bf1c6bd3c929ae14f2c7"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_s390x.whl", hash = "sha256:0be65ccf618c1e7ac9b849c315cc2e8a8751d9cfdaa43027d4f6624bd587ab7e"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:53d0a3fa5f8af98a1e261de6a3943ca631c526635eb5817a87a59d9a57ebf48f"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win32.whl", hash = "sha256:a04f86f41a8916fe45ac5024ec477f41f886b3c435da2d4e3d2709b22ab02af1"}, + {file = "charset_normalizer-3.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:830d2948a5ec37c386d3170c483063798d7879037492540f10a475e3fd6f244b"}, + {file = "charset_normalizer-3.1.0-py3-none-any.whl", hash = "sha256:3d9098b479e78c85080c98e1e35ff40b4a31d8953102bb0fd7d1b6f8a2111a3d"}, +] + +[[package]] +name = "click" +version = "8.1.3" +description = "Composable command line interface toolkit" +optional = false +python-versions = ">=3.7" +files = [ + {file = "click-8.1.3-py3-none-any.whl", hash = "sha256:bb4d8133cb15a609f44e8213d9b391b0809795062913b383c62be0ee95b1db48"}, + {file = "click-8.1.3.tar.gz", hash = "sha256:7682dc8afb30297001674575ea00d1814d808d6a36af415a82bd481d37ba7b8e"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[[package]] +name = "colorama" +version = "0.4.6" +description = "Cross-platform colored terminal text." +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,!=3.5.*,!=3.6.*,>=2.7" +files = [ + {file = "colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6"}, + {file = "colorama-0.4.6.tar.gz", hash = "sha256:08695f5cb7ed6e0531a20572697297273c47b8cae5a63ffc6d6ed5c201be6e44"}, +] + +[[package]] +name = "comm" +version = "0.1.3" +description = "Jupyter Python Comm implementation, for usage in ipykernel, xeus-python etc." +optional = false +python-versions = ">=3.6" +files = [ + {file = "comm-0.1.3-py3-none-any.whl", hash = "sha256:16613c6211e20223f215fc6d3b266a247b6e2641bf4e0a3ad34cb1aff2aa3f37"}, + {file = "comm-0.1.3.tar.gz", hash = "sha256:a61efa9daffcfbe66fd643ba966f846a624e4e6d6767eda9cf6e993aadaab93e"}, +] + +[package.dependencies] +traitlets = ">=5.3" + +[package.extras] +lint = ["black (>=22.6.0)", "mdformat (>0.7)", "mdformat-gfm (>=0.3.5)", "ruff (>=0.0.156)"] +test = ["pytest"] +typing = ["mypy (>=0.990)"] + +[[package]] +name = "configargparse" +version = "1.5.3" +description = "A drop-in replacement for argparse that allows options to also be set via config files and/or environment variables." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "ConfigArgParse-1.5.3-py3-none-any.whl", hash = "sha256:18f6535a2db9f6e02bd5626cc7455eac3e96b9ab3d969d366f9aafd5c5c00fe7"}, + {file = "ConfigArgParse-1.5.3.tar.gz", hash = "sha256:1b0b3cbf664ab59dada57123c81eff3d9737e0d11d8cf79e3d6eb10823f1739f"}, +] + +[package.extras] +test = ["PyYAML", "mock", "pytest"] +yaml = ["PyYAML"] + +[[package]] +name = "cycler" +version = "0.11.0" +description = "Composable style cycles" +optional = false +python-versions = ">=3.6" +files = [ + {file = "cycler-0.11.0-py3-none-any.whl", hash = "sha256:3a27e95f763a428a739d2add979fa7494c912a32c17c4c38c4d5f082cad165a3"}, + {file = "cycler-0.11.0.tar.gz", hash = "sha256:9c87405839a19696e837b3b818fed3f5f69f16f1eec1a1ad77e043dcea9c772f"}, +] + +[[package]] +name = "dash" +version = "2.10.2" +description = "A Python framework for building reactive web-apps. Developed by Plotly." +optional = false +python-versions = ">=3.6" +files = [ + {file = "dash-2.10.2-py3-none-any.whl", hash = "sha256:f1b7132558bd2341e6cf7ca1e83e216d46bbb27a255ca962a0fa5561c6776953"}, + {file = "dash-2.10.2.tar.gz", hash = "sha256:b99839890b44171da0e3668c2e607ef4b4f6948a6074c034693534ba52aa1d2b"}, +] + +[package.dependencies] +dash-core-components = "2.0.0" +dash-html-components = "2.0.0" +dash-table = "5.0.0" +Flask = ">=1.0.4,<2.3.0" +plotly = ">=5.0.0" +Werkzeug = "<2.3.0" + +[package.extras] +celery = ["celery[redis] (>=5.1.2)", "importlib-metadata (<5)", "redis (>=3.5.3)"] +ci = ["black (==21.6b0)", "black (==22.3.0)", "dash-dangerously-set-inner-html", "dash-flow-example (==0.0.5)", "flake8 (==3.9.2)", "flaky (==3.7.0)", "flask-talisman (==1.0.0)", "isort (==4.3.21)", "mimesis", "mock (==4.0.3)", "numpy", "openpyxl", "orjson (==3.5.4)", "orjson (==3.6.7)", "pandas (==1.1.5)", "pandas (>=1.4.0)", "preconditions", "pyarrow", "pyarrow (<3)", "pylint (==2.13.5)", "pytest-mock", "pytest-rerunfailures", "pytest-sugar (==0.9.6)", "xlrd (<2)", "xlrd (>=2.0.1)"] +compress = ["flask-compress"] +dev = ["PyYAML (>=5.4.1)", "coloredlogs (>=15.0.1)", "fire (>=0.4.0)"] +diskcache = ["diskcache (>=5.2.1)", "multiprocess (>=0.70.12)", "psutil (>=5.8.0)"] +testing = ["beautifulsoup4 (>=4.8.2)", "cryptography (<3.4)", "dash-testing-stub (>=0.0.2)", "lxml (>=4.6.2)", "multiprocess (>=0.70.12)", "percy (>=2.0.2)", "psutil (>=5.8.0)", "pytest (>=6.0.2)", "requests[security] (>=2.21.0)", "selenium (>=3.141.0,<=4.2.0)", "waitress (>=1.4.4)"] + +[[package]] +name = "dash-core-components" +version = "2.0.0" +description = "Core component suite for Dash" +optional = false +python-versions = "*" +files = [ + {file = "dash_core_components-2.0.0-py3-none-any.whl", hash = "sha256:52b8e8cce13b18d0802ee3acbc5e888cb1248a04968f962d63d070400af2e346"}, + {file = "dash_core_components-2.0.0.tar.gz", hash = "sha256:c6733874af975e552f95a1398a16c2ee7df14ce43fa60bb3718a3c6e0b63ffee"}, +] + +[[package]] +name = "dash-html-components" +version = "2.0.0" +description = "Vanilla HTML components for Dash" +optional = false +python-versions = "*" +files = [ + {file = "dash_html_components-2.0.0-py3-none-any.whl", hash = "sha256:b42cc903713c9706af03b3f2548bda4be7307a7cf89b7d6eae3da872717d1b63"}, + {file = "dash_html_components-2.0.0.tar.gz", hash = "sha256:8703a601080f02619a6390998e0b3da4a5daabe97a1fd7a9cebc09d015f26e50"}, +] + +[[package]] +name = "dash-table" +version = "5.0.0" +description = "Dash table" +optional = false +python-versions = "*" +files = [ + {file = "dash_table-5.0.0-py3-none-any.whl", hash = "sha256:19036fa352bb1c11baf38068ec62d172f0515f73ca3276c79dee49b95ddc16c9"}, + {file = "dash_table-5.0.0.tar.gz", hash = "sha256:18624d693d4c8ef2ddec99a6f167593437a7ea0bf153aa20f318c170c5bc7308"}, +] + +[[package]] +name = "debugpy" +version = "1.6.7" +description = "An implementation of the Debug Adapter Protocol for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "debugpy-1.6.7-cp310-cp310-macosx_11_0_x86_64.whl", hash = "sha256:b3e7ac809b991006ad7f857f016fa92014445085711ef111fdc3f74f66144096"}, + {file = "debugpy-1.6.7-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:e3876611d114a18aafef6383695dfc3f1217c98a9168c1aaf1a02b01ec7d8d1e"}, + {file = "debugpy-1.6.7-cp310-cp310-win32.whl", hash = "sha256:33edb4afa85c098c24cc361d72ba7c21bb92f501104514d4ffec1fb36e09c01a"}, + {file = "debugpy-1.6.7-cp310-cp310-win_amd64.whl", hash = "sha256:ed6d5413474e209ba50b1a75b2d9eecf64d41e6e4501977991cdc755dc83ab0f"}, + {file = "debugpy-1.6.7-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:38ed626353e7c63f4b11efad659be04c23de2b0d15efff77b60e4740ea685d07"}, + {file = "debugpy-1.6.7-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:279d64c408c60431c8ee832dfd9ace7c396984fd7341fa3116aee414e7dcd88d"}, + {file = "debugpy-1.6.7-cp37-cp37m-win32.whl", hash = "sha256:dbe04e7568aa69361a5b4c47b4493d5680bfa3a911d1e105fbea1b1f23f3eb45"}, + {file = "debugpy-1.6.7-cp37-cp37m-win_amd64.whl", hash = "sha256:f90a2d4ad9a035cee7331c06a4cf2245e38bd7c89554fe3b616d90ab8aab89cc"}, + {file = "debugpy-1.6.7-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:5224eabbbeddcf1943d4e2821876f3e5d7d383f27390b82da5d9558fd4eb30a9"}, + {file = "debugpy-1.6.7-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bae1123dff5bfe548ba1683eb972329ba6d646c3a80e6b4c06cd1b1dd0205e9b"}, + {file = "debugpy-1.6.7-cp38-cp38-win32.whl", hash = "sha256:9cd10cf338e0907fdcf9eac9087faa30f150ef5445af5a545d307055141dd7a4"}, + {file = "debugpy-1.6.7-cp38-cp38-win_amd64.whl", hash = "sha256:aaf6da50377ff4056c8ed470da24632b42e4087bc826845daad7af211e00faad"}, + {file = "debugpy-1.6.7-cp39-cp39-macosx_11_0_x86_64.whl", hash = "sha256:0679b7e1e3523bd7d7869447ec67b59728675aadfc038550a63a362b63029d2c"}, + {file = "debugpy-1.6.7-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:de86029696e1b3b4d0d49076b9eba606c226e33ae312a57a46dca14ff370894d"}, + {file = "debugpy-1.6.7-cp39-cp39-win32.whl", hash = "sha256:d71b31117779d9a90b745720c0eab54ae1da76d5b38c8026c654f4a066b0130a"}, + {file = "debugpy-1.6.7-cp39-cp39-win_amd64.whl", hash = "sha256:c0ff93ae90a03b06d85b2c529eca51ab15457868a377c4cc40a23ab0e4e552a3"}, + {file = "debugpy-1.6.7-py2.py3-none-any.whl", hash = "sha256:53f7a456bc50706a0eaabecf2d3ce44c4d5010e46dfc65b6b81a518b42866267"}, + {file = "debugpy-1.6.7.zip", hash = "sha256:c4c2f0810fa25323abfdfa36cbbbb24e5c3b1a42cb762782de64439c575d67f2"}, +] + +[[package]] +name = "decorator" +version = "5.1.1" +description = "Decorators for Humans" +optional = false +python-versions = ">=3.5" +files = [ + {file = "decorator-5.1.1-py3-none-any.whl", hash = "sha256:b8c3f85900b9dc423225913c5aace94729fe1fa9763b38939a95226f02d37186"}, + {file = "decorator-5.1.1.tar.gz", hash = "sha256:637996211036b6385ef91435e4fae22989472f9d571faba8927ba8253acbc330"}, +] + +[[package]] +name = "defusedxml" +version = "0.7.1" +description = "XML bomb protection for Python stdlib modules" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "defusedxml-0.7.1-py2.py3-none-any.whl", hash = "sha256:a352e7e428770286cc899e2542b6cdaedb2b4953ff269a210103ec58f6198a61"}, + {file = "defusedxml-0.7.1.tar.gz", hash = "sha256:1bb3032db185915b62d7c6209c5a8792be6a32ab2fedacc84e01b52c51aa3e69"}, +] + +[[package]] +name = "descartes" +version = "1.1.0" +description = "Use geometric objects as matplotlib paths and patches" +optional = false +python-versions = "*" +files = [ + {file = "descartes-1.1.0-py2-none-any.whl", hash = "sha256:b7e412e7e6e294412f1d0f661f187babc970088c2456089e6801eebb043c2e1b"}, + {file = "descartes-1.1.0-py3-none-any.whl", hash = "sha256:4c62dc41109689d03e4b35de0a2bcbdeeb81047badc607c4415d5c753bd683af"}, + {file = "descartes-1.1.0.tar.gz", hash = "sha256:135a502146af5ed6ff359975e2ebc5fa4b71b5432c355c2cafdc6dea1337035b"}, +] + +[package.dependencies] +matplotlib = "*" + +[[package]] +name = "exceptiongroup" +version = "1.1.1" +description = "Backport of PEP 654 (exception groups)" +optional = false +python-versions = ">=3.7" +files = [ + {file = "exceptiongroup-1.1.1-py3-none-any.whl", hash = "sha256:232c37c63e4f682982c8b6459f33a8981039e5fb8756b2074364e5055c498c9e"}, + {file = "exceptiongroup-1.1.1.tar.gz", hash = "sha256:d484c3090ba2889ae2928419117447a14daf3c1231d5e30d0aae34f354f01785"}, +] + +[package.extras] +test = ["pytest (>=6)"] + +[[package]] +name = "executing" +version = "1.2.0" +description = "Get the currently executing AST node of a frame, and other information" +optional = false +python-versions = "*" +files = [ + {file = "executing-1.2.0-py2.py3-none-any.whl", hash = "sha256:0314a69e37426e3608aada02473b4161d4caf5a4b244d1d0c48072b8fee7bacc"}, + {file = "executing-1.2.0.tar.gz", hash = "sha256:19da64c18d2d851112f09c287f8d3dbbdf725ab0e569077efb6cdcbd3497c107"}, +] + +[package.extras] +tests = ["asttokens", "littleutils", "pytest", "rich"] + +[[package]] +name = "fastjsonschema" +version = "2.17.1" +description = "Fastest Python implementation of JSON schema" +optional = false +python-versions = "*" +files = [ + {file = "fastjsonschema-2.17.1-py3-none-any.whl", hash = "sha256:4b90b252628ca695280924d863fe37234eebadc29c5360d322571233dc9746e0"}, + {file = "fastjsonschema-2.17.1.tar.gz", hash = "sha256:f4eeb8a77cef54861dbf7424ac8ce71306f12cbb086c45131bcba2c6a4f726e3"}, +] + +[package.extras] +devel = ["colorama", "json-spec", "jsonschema", "pylint", "pytest", "pytest-benchmark", "pytest-cache", "validictory"] + +[[package]] +name = "fire" +version = "0.5.0" +description = "A library for automatically generating command line interfaces." +optional = false +python-versions = "*" +files = [ + {file = "fire-0.5.0.tar.gz", hash = "sha256:a6b0d49e98c8963910021f92bba66f65ab440da2982b78eb1bbf95a0a34aacc6"}, +] + +[package.dependencies] +six = "*" +termcolor = "*" + +[[package]] +name = "flask" +version = "2.2.5" +description = "A simple framework for building complex web applications." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Flask-2.2.5-py3-none-any.whl", hash = "sha256:58107ed83443e86067e41eff4631b058178191a355886f8e479e347fa1285fdf"}, + {file = "Flask-2.2.5.tar.gz", hash = "sha256:edee9b0a7ff26621bd5a8c10ff484ae28737a2410d99b0bb9a6850c7fb977aa0"}, +] + +[package.dependencies] +click = ">=8.0" +importlib-metadata = {version = ">=3.6.0", markers = "python_version < \"3.10\""} +itsdangerous = ">=2.0" +Jinja2 = ">=3.0" +Werkzeug = ">=2.2.2" + +[package.extras] +async = ["asgiref (>=3.2)"] +dotenv = ["python-dotenv"] + +[[package]] +name = "fonttools" +version = "4.40.0" +description = "Tools to manipulate font files" +optional = false +python-versions = ">=3.8" +files = [ + {file = "fonttools-4.40.0-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:b802dcbf9bcff74672f292b2466f6589ab8736ce4dcf36f48eb994c2847c4b30"}, + {file = "fonttools-4.40.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:7f6e3fa3da923063c286320e728ba2270e49c73386e3a711aa680f4b0747d692"}, + {file = "fonttools-4.40.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5fdf60f8a5c6bcce7d024a33f7e4bc7921f5b74e8ea13bccd204f2c8b86f3470"}, + {file = "fonttools-4.40.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:91784e21a1a085fac07c6a407564f4a77feb471b5954c9ee55a4f9165151f6c1"}, + {file = "fonttools-4.40.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:05171f3c546f64d78569f10adc0de72561882352cac39ec7439af12304d8d8c0"}, + {file = "fonttools-4.40.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:7449e5e306f3a930a8944c85d0cbc8429cba13503372a1a40f23124d6fb09b58"}, + {file = "fonttools-4.40.0-cp310-cp310-win32.whl", hash = "sha256:bae8c13abbc2511e9a855d2142c0ab01178dd66b1a665798f357da0d06253e0d"}, + {file = "fonttools-4.40.0-cp310-cp310-win_amd64.whl", hash = "sha256:425b74a608427499b0e45e433c34ddc350820b6f25b7c8761963a08145157a66"}, + {file = "fonttools-4.40.0-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:00ab569b2a3e591e00425023ade87e8fef90380c1dde61be7691cb524ca5f743"}, + {file = "fonttools-4.40.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:18ea64ac43e94c9e0c23d7a9475f1026be0e25b10dda8f236fc956188761df97"}, + {file = "fonttools-4.40.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:022c4a16b412293e7f1ce21b8bab7a6f9d12c4ffdf171fdc67122baddb973069"}, + {file = "fonttools-4.40.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:530c5d35109f3e0cea2535742d6a3bc99c0786cf0cbd7bb2dc9212387f0d908c"}, + {file = "fonttools-4.40.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:5e00334c66f4e83535384cb5339526d01d02d77f142c23b2f97bd6a4f585497a"}, + {file = "fonttools-4.40.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:eb52c10fda31159c22c7ed85074e05f8b97da8773ea461706c273e31bcbea836"}, + {file = "fonttools-4.40.0-cp311-cp311-win32.whl", hash = "sha256:6a8d71b9a5c884c72741868e845c0e563c5d83dcaf10bb0ceeec3b4b2eb14c67"}, + {file = "fonttools-4.40.0-cp311-cp311-win_amd64.whl", hash = "sha256:15abb3d055c1b2dff9ce376b6c3db10777cb74b37b52b78f61657634fd348a0d"}, + {file = "fonttools-4.40.0-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:14037c31138fbd21847ad5e5441dfdde003e0a8f3feb5812a1a21fd1c255ffbd"}, + {file = "fonttools-4.40.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:94c915f6716589f78bc00fbc14c5b8de65cfd11ee335d32504f1ef234524cb24"}, + {file = "fonttools-4.40.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:37467cee0f32cada2ec08bc16c9c31f9b53ea54b2f5604bf25a1246b5f50593a"}, + {file = "fonttools-4.40.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:56d4d85f5374b45b08d2f928517d1e313ea71b4847240398decd0ab3ebbca885"}, + {file = "fonttools-4.40.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:8c4305b171b61040b1ee75d18f9baafe58bd3b798d1670078efe2c92436bfb63"}, + {file = "fonttools-4.40.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:a954b90d1473c85a22ecf305761d9fd89da93bbd31dae86e7dea436ad2cb5dc9"}, + {file = "fonttools-4.40.0-cp38-cp38-win32.whl", hash = "sha256:1bc4c5b147be8dbc5df9cc8ac5e93ee914ad030fe2a201cc8f02f499db71011d"}, + {file = "fonttools-4.40.0-cp38-cp38-win_amd64.whl", hash = "sha256:8a917828dbfdb1cbe50cf40eeae6fbf9c41aef9e535649ed8f4982b2ef65c091"}, + {file = "fonttools-4.40.0-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:882983279bf39afe4e945109772c2ffad2be2c90983d6559af8b75c19845a80a"}, + {file = "fonttools-4.40.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:c55f1b4109dbc3aeb496677b3e636d55ef46dc078c2a5e3f3db4e90f1c6d2907"}, + {file = "fonttools-4.40.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ec468c022d09f1817c691cf884feb1030ef6f1e93e3ea6831b0d8144c06480d1"}, + {file = "fonttools-4.40.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:6d5adf4ba114f028fc3f5317a221fd8b0f4ef7a2e5524a2b1e0fd891b093791a"}, + {file = "fonttools-4.40.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:aa83b3f151bc63970f39b2b42a06097c5a22fd7ed9f7ba008e618de4503d3895"}, + {file = "fonttools-4.40.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:97d95b8301b62bdece1af943b88bcb3680fd385f88346a4a899ee145913b414a"}, + {file = "fonttools-4.40.0-cp39-cp39-win32.whl", hash = "sha256:1a003608400dd1cca3e089e8c94973c6b51a4fb1ef00ff6d7641617b9242e637"}, + {file = "fonttools-4.40.0-cp39-cp39-win_amd64.whl", hash = "sha256:7961575221e3da0841c75da53833272c520000d76f7f71274dbf43370f8a1065"}, + {file = "fonttools-4.40.0-py3-none-any.whl", hash = "sha256:200729d12461e2038700d31f0d49ad5a7b55855dec7525074979a06b46f88505"}, + {file = "fonttools-4.40.0.tar.gz", hash = "sha256:337b6e83d7ee73c40ea62407f2ce03b07c3459e213b6f332b94a69923b9e1cb9"}, +] + +[package.extras] +all = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "fs (>=2.2.0,<3)", "lxml (>=4.0,<5)", "lz4 (>=1.7.4.2)", "matplotlib", "munkres", "scipy", "skia-pathops (>=0.5.0)", "sympy", "uharfbuzz (>=0.23.0)", "unicodedata2 (>=15.0.0)", "xattr", "zopfli (>=0.1.4)"] +graphite = ["lz4 (>=1.7.4.2)"] +interpolatable = ["munkres", "scipy"] +lxml = ["lxml (>=4.0,<5)"] +pathops = ["skia-pathops (>=0.5.0)"] +plot = ["matplotlib"] +repacker = ["uharfbuzz (>=0.23.0)"] +symfont = ["sympy"] +type1 = ["xattr"] +ufo = ["fs (>=2.2.0,<3)"] +unicode = ["unicodedata2 (>=15.0.0)"] +woff = ["brotli (>=1.0.1)", "brotlicffi (>=0.8.0)", "zopfli (>=0.1.4)"] + +[[package]] +name = "fqdn" +version = "1.5.1" +description = "Validates fully-qualified domain names against RFC 1123, so that they are acceptable to modern bowsers" +optional = false +python-versions = ">=2.7, !=3.0, !=3.1, !=3.2, !=3.3, !=3.4, <4" +files = [ + {file = "fqdn-1.5.1-py3-none-any.whl", hash = "sha256:3a179af3761e4df6eb2e026ff9e1a3033d3587bf980a0b1b2e1e5d08d7358014"}, + {file = "fqdn-1.5.1.tar.gz", hash = "sha256:105ed3677e767fb5ca086a0c1f4bb66ebc3c100be518f0e0d755d9eae164d89f"}, +] + +[[package]] +name = "idna" +version = "3.4" +description = "Internationalized Domain Names in Applications (IDNA)" +optional = false +python-versions = ">=3.5" +files = [ + {file = "idna-3.4-py3-none-any.whl", hash = "sha256:90b77e79eaa3eba6de819a0c442c0b4ceefc341a7a2ab77d7562bf49f425c5c2"}, + {file = "idna-3.4.tar.gz", hash = "sha256:814f528e8dead7d329833b91c5faa87d60bf71824cd12a7530b5526063d02cb4"}, +] + +[[package]] +name = "importlib-metadata" +version = "6.7.0" +description = "Read metadata from Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_metadata-6.7.0-py3-none-any.whl", hash = "sha256:cb52082e659e97afc5dac71e79de97d8681de3aa07ff18578330904a9d18e5b5"}, + {file = "importlib_metadata-6.7.0.tar.gz", hash = "sha256:1aaf550d4f73e5d6783e7acb77aec43d49da8017410afae93822cc9cca98c4d4"}, +] + +[package.dependencies] +zipp = ">=0.5" + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +perf = ["ipython"] +testing = ["flufl.flake8", "importlib-resources (>=1.3)", "packaging", "pyfakefs", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf (>=0.9.2)", "pytest-ruff"] + +[[package]] +name = "importlib-resources" +version = "5.12.0" +description = "Read resources from Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "importlib_resources-5.12.0-py3-none-any.whl", hash = "sha256:7b1deeebbf351c7578e09bf2f63fa2ce8b5ffec296e0d349139d43cca061a81a"}, + {file = "importlib_resources-5.12.0.tar.gz", hash = "sha256:4be82589bf5c1d7999aedf2a45159d10cb3ca4f19b2271f8792bc8e6da7b22f6"}, +] + +[package.dependencies] +zipp = {version = ">=3.1.0", markers = "python_version < \"3.10\""} + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["flake8 (<5)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[[package]] +name = "iniconfig" +version = "2.0.0" +description = "brain-dead simple config-ini parsing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "iniconfig-2.0.0-py3-none-any.whl", hash = "sha256:b6a85871a79d2e3b22d2d1b94ac2824226a63c6b741c88f7ae975f18b6778374"}, + {file = "iniconfig-2.0.0.tar.gz", hash = "sha256:2d91e135bf72d31a410b17c16da610a82cb55f6b0477d1a902134b24a455b8b3"}, +] + +[[package]] +name = "ipykernel" +version = "6.23.2" +description = "IPython Kernel for Jupyter" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipykernel-6.23.2-py3-none-any.whl", hash = "sha256:7ccb6e2d32fd958c21453db494c914f3474908a2fdefd99ab548a5375b548d1f"}, + {file = "ipykernel-6.23.2.tar.gz", hash = "sha256:fcfb67c5b504aa1bfcda1c5b3716636239e0f7b9290958f1c558c79b4c0e7ed5"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "platform_system == \"Darwin\""} +comm = ">=0.1.1" +debugpy = ">=1.6.5" +ipython = ">=7.23.1" +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +matplotlib-inline = ">=0.1" +nest-asyncio = "*" +packaging = "*" +psutil = "*" +pyzmq = ">=20" +tornado = ">=6.1" +traitlets = ">=5.4.0" + +[package.extras] +cov = ["coverage[toml]", "curio", "matplotlib", "pytest-cov", "trio"] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "trio"] +pyqt5 = ["pyqt5"] +pyside6 = ["pyside6"] +test = ["flaky", "ipyparallel", "pre-commit", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "ipython" +version = "8.12.2" +description = "IPython: Productive Interactive Computing" +optional = false +python-versions = ">=3.8" +files = [ + {file = "ipython-8.12.2-py3-none-any.whl", hash = "sha256:ea8801f15dfe4ffb76dea1b09b847430ffd70d827b41735c64a0638a04103bfc"}, + {file = "ipython-8.12.2.tar.gz", hash = "sha256:c7b80eb7f5a855a88efc971fda506ff7a91c280b42cdae26643e0f601ea281ea"}, +] + +[package.dependencies] +appnope = {version = "*", markers = "sys_platform == \"darwin\""} +backcall = "*" +colorama = {version = "*", markers = "sys_platform == \"win32\""} +decorator = "*" +jedi = ">=0.16" +matplotlib-inline = "*" +pexpect = {version = ">4.3", markers = "sys_platform != \"win32\""} +pickleshare = "*" +prompt-toolkit = ">=3.0.30,<3.0.37 || >3.0.37,<3.1.0" +pygments = ">=2.4.0" +stack-data = "*" +traitlets = ">=5" +typing-extensions = {version = "*", markers = "python_version < \"3.10\""} + +[package.extras] +all = ["black", "curio", "docrepr", "ipykernel", "ipyparallel", "ipywidgets", "matplotlib", "matplotlib (!=3.2.0)", "nbconvert", "nbformat", "notebook", "numpy (>=1.21)", "pandas", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "qtconsole", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "trio", "typing-extensions"] +black = ["black"] +doc = ["docrepr", "ipykernel", "matplotlib", "pytest (<7)", "pytest (<7.1)", "pytest-asyncio", "setuptools (>=18.5)", "sphinx (>=1.3)", "sphinx-rtd-theme", "stack-data", "testpath", "typing-extensions"] +kernel = ["ipykernel"] +nbconvert = ["nbconvert"] +nbformat = ["nbformat"] +notebook = ["ipywidgets", "notebook"] +parallel = ["ipyparallel"] +qtconsole = ["qtconsole"] +test = ["pytest (<7.1)", "pytest-asyncio", "testpath"] +test-extra = ["curio", "matplotlib (!=3.2.0)", "nbformat", "numpy (>=1.21)", "pandas", "pytest (<7.1)", "pytest-asyncio", "testpath", "trio"] + +[[package]] +name = "ipython-genutils" +version = "0.2.0" +description = "Vestigial utilities from IPython" +optional = false +python-versions = "*" +files = [ + {file = "ipython_genutils-0.2.0-py2.py3-none-any.whl", hash = "sha256:72dd37233799e619666c9f639a9da83c34013a73e8bbc79a7a6348d93c61fab8"}, + {file = "ipython_genutils-0.2.0.tar.gz", hash = "sha256:eb2e116e75ecef9d4d228fdc66af54269afa26ab4463042e33785b887c628ba8"}, +] + +[[package]] +name = "ipywidgets" +version = "8.0.6" +description = "Jupyter interactive widgets" +optional = false +python-versions = ">=3.7" +files = [ + {file = "ipywidgets-8.0.6-py3-none-any.whl", hash = "sha256:a60bf8d2528997e05ac83fd19ea2fbe65f2e79fbe1b2b35779bdfc46c2941dcc"}, + {file = "ipywidgets-8.0.6.tar.gz", hash = "sha256:de7d779f2045d60de9f6c25f653fdae2dba57898e6a1284494b3ba20b6893bb8"}, +] + +[package.dependencies] +ipykernel = ">=4.5.1" +ipython = ">=6.1.0" +jupyterlab-widgets = ">=3.0.7,<3.1.0" +traitlets = ">=4.3.1" +widgetsnbextension = ">=4.0.7,<4.1.0" + +[package.extras] +test = ["ipykernel", "jsonschema", "pytest (>=3.6.0)", "pytest-cov", "pytz"] + +[[package]] +name = "isoduration" +version = "20.11.0" +description = "Operations with ISO 8601 durations" +optional = false +python-versions = ">=3.7" +files = [ + {file = "isoduration-20.11.0-py3-none-any.whl", hash = "sha256:b2904c2a4228c3d44f409c8ae8e2370eb21a26f7ac2ec5446df141dde3452042"}, + {file = "isoduration-20.11.0.tar.gz", hash = "sha256:ac2f9015137935279eac671f94f89eb00584f940f5dc49462a0c4ee692ba1bd9"}, +] + +[package.dependencies] +arrow = ">=0.15.0" + +[[package]] +name = "itsdangerous" +version = "2.1.2" +description = "Safely pass data to untrusted environments and back." +optional = false +python-versions = ">=3.7" +files = [ + {file = "itsdangerous-2.1.2-py3-none-any.whl", hash = "sha256:2c2349112351b88699d8d4b6b075022c0808887cb7ad10069318a8b0bc88db44"}, + {file = "itsdangerous-2.1.2.tar.gz", hash = "sha256:5dbbc68b317e5e42f327f9021763545dc3fc3bfe22e6deb96aaf1fc38874156a"}, +] + +[[package]] +name = "jedi" +version = "0.18.2" +description = "An autocompletion tool for Python that can be used for text editors." +optional = false +python-versions = ">=3.6" +files = [ + {file = "jedi-0.18.2-py2.py3-none-any.whl", hash = "sha256:203c1fd9d969ab8f2119ec0a3342e0b49910045abe6af0a3ae83a5764d54639e"}, + {file = "jedi-0.18.2.tar.gz", hash = "sha256:bae794c30d07f6d910d32a7048af09b5a39ed740918da923c6b780790ebac612"}, +] + +[package.dependencies] +parso = ">=0.8.0,<0.9.0" + +[package.extras] +docs = ["Jinja2 (==2.11.3)", "MarkupSafe (==1.1.1)", "Pygments (==2.8.1)", "alabaster (==0.7.12)", "babel (==2.9.1)", "chardet (==4.0.0)", "commonmark (==0.8.1)", "docutils (==0.17.1)", "future (==0.18.2)", "idna (==2.10)", "imagesize (==1.2.0)", "mock (==1.0.1)", "packaging (==20.9)", "pyparsing (==2.4.7)", "pytz (==2021.1)", "readthedocs-sphinx-ext (==2.1.4)", "recommonmark (==0.5.0)", "requests (==2.25.1)", "six (==1.15.0)", "snowballstemmer (==2.1.0)", "sphinx (==1.8.5)", "sphinx-rtd-theme (==0.4.3)", "sphinxcontrib-serializinghtml (==1.1.4)", "sphinxcontrib-websupport (==1.2.4)", "urllib3 (==1.26.4)"] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["Django (<3.1)", "attrs", "colorama", "docopt", "pytest (<7.0.0)"] + +[[package]] +name = "jinja2" +version = "3.1.2" +description = "A very fast and expressive template engine." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Jinja2-3.1.2-py3-none-any.whl", hash = "sha256:6088930bfe239f0e6710546ab9c19c9ef35e29792895fed6e6e31a023a182a61"}, + {file = "Jinja2-3.1.2.tar.gz", hash = "sha256:31351a702a408a9e7595a8fc6150fc3f43bb6bf7e319770cbc0db9df9437e852"}, +] + +[package.dependencies] +MarkupSafe = ">=2.0" + +[package.extras] +i18n = ["Babel (>=2.7)"] + +[[package]] +name = "joblib" +version = "1.2.0" +description = "Lightweight pipelining with Python functions" +optional = false +python-versions = ">=3.7" +files = [ + {file = "joblib-1.2.0-py3-none-any.whl", hash = "sha256:091138ed78f800342968c523bdde947e7a305b8594b910a0fea2ab83c3c6d385"}, + {file = "joblib-1.2.0.tar.gz", hash = "sha256:e1cee4a79e4af22881164f218d4311f60074197fb707e082e803b61f6d137018"}, +] + +[[package]] +name = "jsonpointer" +version = "2.4" +description = "Identify specific nodes in a JSON document (RFC 6901)" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*, !=3.5.*, !=3.6.*" +files = [ + {file = "jsonpointer-2.4-py2.py3-none-any.whl", hash = "sha256:15d51bba20eea3165644553647711d150376234112651b4f1811022aecad7d7a"}, +] + +[[package]] +name = "jsonschema" +version = "4.17.3" +description = "An implementation of JSON Schema validation for Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jsonschema-4.17.3-py3-none-any.whl", hash = "sha256:a870ad254da1a8ca84b6a2905cac29d265f805acc57af304784962a2aa6508f6"}, + {file = "jsonschema-4.17.3.tar.gz", hash = "sha256:0f864437ab8b6076ba6707453ef8f98a6a0d512a80e93f8abdb676f737ecb60d"}, +] + +[package.dependencies] +attrs = ">=17.4.0" +fqdn = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +idna = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +importlib-resources = {version = ">=1.4.0", markers = "python_version < \"3.9\""} +isoduration = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +jsonpointer = {version = ">1.13", optional = true, markers = "extra == \"format-nongpl\""} +pkgutil-resolve-name = {version = ">=1.3.10", markers = "python_version < \"3.9\""} +pyrsistent = ">=0.14.0,<0.17.0 || >0.17.0,<0.17.1 || >0.17.1,<0.17.2 || >0.17.2" +rfc3339-validator = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +rfc3986-validator = {version = ">0.1.0", optional = true, markers = "extra == \"format-nongpl\""} +uri-template = {version = "*", optional = true, markers = "extra == \"format-nongpl\""} +webcolors = {version = ">=1.11", optional = true, markers = "extra == \"format-nongpl\""} + +[package.extras] +format = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3987", "uri-template", "webcolors (>=1.11)"] +format-nongpl = ["fqdn", "idna", "isoduration", "jsonpointer (>1.13)", "rfc3339-validator", "rfc3986-validator (>0.1.0)", "uri-template", "webcolors (>=1.11)"] + +[[package]] +name = "jupyter" +version = "1.0.0" +description = "Jupyter metapackage. Install all the Jupyter components in one go." +optional = false +python-versions = "*" +files = [ + {file = "jupyter-1.0.0-py2.py3-none-any.whl", hash = "sha256:5b290f93b98ffbc21c0c7e749f054b3267782166d72fa5e3ed1ed4eaf34a2b78"}, + {file = "jupyter-1.0.0.tar.gz", hash = "sha256:d9dc4b3318f310e34c82951ea5d6683f67bed7def4b259fafbfe4f1beb1d8e5f"}, + {file = "jupyter-1.0.0.zip", hash = "sha256:3e1f86076bbb7c8c207829390305a2b1fe836d471ed54be66a3b8c41e7f46cc7"}, +] + +[package.dependencies] +ipykernel = "*" +ipywidgets = "*" +jupyter-console = "*" +nbconvert = "*" +notebook = "*" +qtconsole = "*" + +[[package]] +name = "jupyter-client" +version = "8.2.0" +description = "Jupyter protocol implementation and client libraries" +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_client-8.2.0-py3-none-any.whl", hash = "sha256:b18219aa695d39e2ad570533e0d71fb7881d35a873051054a84ee2a17c4b7389"}, + {file = "jupyter_client-8.2.0.tar.gz", hash = "sha256:9fe233834edd0e6c0aa5f05ca2ab4bdea1842bfd2d8a932878212fc5301ddaf0"}, +] + +[package.dependencies] +importlib-metadata = {version = ">=4.8.3", markers = "python_version < \"3.10\""} +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +python-dateutil = ">=2.8.2" +pyzmq = ">=23.0" +tornado = ">=6.2" +traitlets = ">=5.3" + +[package.extras] +docs = ["ipykernel", "myst-parser", "pydata-sphinx-theme", "sphinx (>=4)", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling"] +test = ["coverage", "ipykernel (>=6.14)", "mypy", "paramiko", "pre-commit", "pytest", "pytest-cov", "pytest-jupyter[client] (>=0.4.1)", "pytest-timeout"] + +[[package]] +name = "jupyter-console" +version = "6.6.3" +description = "Jupyter terminal console" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyter_console-6.6.3-py3-none-any.whl", hash = "sha256:309d33409fcc92ffdad25f0bcdf9a4a9daa61b6f341177570fdac03de5352485"}, + {file = "jupyter_console-6.6.3.tar.gz", hash = "sha256:566a4bf31c87adbfadf22cdf846e3069b59a71ed5da71d6ba4d8aaad14a53539"}, +] + +[package.dependencies] +ipykernel = ">=6.14" +ipython = "*" +jupyter-client = ">=7.0.0" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +prompt-toolkit = ">=3.0.30" +pygments = "*" +pyzmq = ">=17" +traitlets = ">=5.4" + +[package.extras] +test = ["flaky", "pexpect", "pytest"] + +[[package]] +name = "jupyter-core" +version = "5.3.1" +description = "Jupyter core package. A base package on which Jupyter projects rely." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_core-5.3.1-py3-none-any.whl", hash = "sha256:ae9036db959a71ec1cac33081eeb040a79e681f08ab68b0883e9a676c7a90dce"}, + {file = "jupyter_core-5.3.1.tar.gz", hash = "sha256:5ba5c7938a7f97a6b0481463f7ff0dbac7c15ba48cf46fa4035ca6e838aa1aba"}, +] + +[package.dependencies] +platformdirs = ">=2.5" +pywin32 = {version = ">=300", markers = "sys_platform == \"win32\" and platform_python_implementation != \"PyPy\""} +traitlets = ">=5.3" + +[package.extras] +docs = ["myst-parser", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-spelling", "traitlets"] +test = ["ipykernel", "pre-commit", "pytest", "pytest-cov", "pytest-timeout"] + +[[package]] +name = "jupyter-events" +version = "0.6.3" +description = "Jupyter Event System library" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyter_events-0.6.3-py3-none-any.whl", hash = "sha256:57a2749f87ba387cd1bfd9b22a0875b889237dbf2edc2121ebb22bde47036c17"}, + {file = "jupyter_events-0.6.3.tar.gz", hash = "sha256:9a6e9995f75d1b7146b436ea24d696ce3a35bfa8bfe45e0c33c334c79464d0b3"}, +] + +[package.dependencies] +jsonschema = {version = ">=3.2.0", extras = ["format-nongpl"]} +python-json-logger = ">=2.0.4" +pyyaml = ">=5.3" +rfc3339-validator = "*" +rfc3986-validator = ">=0.1.1" +traitlets = ">=5.3" + +[package.extras] +cli = ["click", "rich"] +docs = ["jupyterlite-sphinx", "myst-parser", "pydata-sphinx-theme", "sphinxcontrib-spelling"] +test = ["click", "coverage", "pre-commit", "pytest (>=7.0)", "pytest-asyncio (>=0.19.0)", "pytest-console-scripts", "pytest-cov", "rich"] + +[[package]] +name = "jupyter-server" +version = "2.6.0" +description = "The backend—i.e. core services, APIs, and REST endpoints—to Jupyter web applications." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_server-2.6.0-py3-none-any.whl", hash = "sha256:19525a1515b5999618a91b3e99ec9f6869aa8c5ba73e0b6279fcda918b54ba36"}, + {file = "jupyter_server-2.6.0.tar.gz", hash = "sha256:ae4af349f030ed08dd78cb7ac1a03a92d886000380c9ea6283f3c542a81f4b06"}, +] + +[package.dependencies] +anyio = ">=3.1.0" +argon2-cffi = "*" +jinja2 = "*" +jupyter-client = ">=7.4.4" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +jupyter-events = ">=0.6.0" +jupyter-server-terminals = "*" +nbconvert = ">=6.4.4" +nbformat = ">=5.3.0" +overrides = "*" +packaging = "*" +prometheus-client = "*" +pywinpty = {version = "*", markers = "os_name == \"nt\""} +pyzmq = ">=24" +send2trash = "*" +terminado = ">=0.8.3" +tornado = ">=6.2.0" +traitlets = ">=5.6.0" +websocket-client = "*" + +[package.extras] +docs = ["ipykernel", "jinja2", "jupyter-client", "jupyter-server", "myst-parser", "nbformat", "prometheus-client", "pydata-sphinx-theme", "send2trash", "sphinx-autodoc-typehints", "sphinxcontrib-github-alt", "sphinxcontrib-openapi (>=0.8.0)", "sphinxcontrib-spelling", "sphinxemoji", "tornado", "typing-extensions"] +test = ["ipykernel", "pre-commit", "pytest (>=7.0)", "pytest-console-scripts", "pytest-jupyter[server] (>=0.4)", "pytest-timeout", "requests"] + +[[package]] +name = "jupyter-server-terminals" +version = "0.4.4" +description = "A Jupyter Server Extension Providing Terminals." +optional = false +python-versions = ">=3.8" +files = [ + {file = "jupyter_server_terminals-0.4.4-py3-none-any.whl", hash = "sha256:75779164661cec02a8758a5311e18bb8eb70c4e86c6b699403100f1585a12a36"}, + {file = "jupyter_server_terminals-0.4.4.tar.gz", hash = "sha256:57ab779797c25a7ba68e97bcfb5d7740f2b5e8a83b5e8102b10438041a7eac5d"}, +] + +[package.dependencies] +pywinpty = {version = ">=2.0.3", markers = "os_name == \"nt\""} +terminado = ">=0.8.3" + +[package.extras] +docs = ["jinja2", "jupyter-server", "mistune (<3.0)", "myst-parser", "nbformat", "packaging", "pydata-sphinx-theme", "sphinxcontrib-github-alt", "sphinxcontrib-openapi", "sphinxcontrib-spelling", "sphinxemoji", "tornado"] +test = ["coverage", "jupyter-server (>=2.0.0)", "pytest (>=7.0)", "pytest-cov", "pytest-jupyter[server] (>=0.5.3)", "pytest-timeout"] + +[[package]] +name = "jupyterlab-pygments" +version = "0.2.2" +description = "Pygments theme using JupyterLab CSS variables" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyterlab_pygments-0.2.2-py2.py3-none-any.whl", hash = "sha256:2405800db07c9f770863bcf8049a529c3dd4d3e28536638bd7c1c01d2748309f"}, + {file = "jupyterlab_pygments-0.2.2.tar.gz", hash = "sha256:7405d7fde60819d905a9fa8ce89e4cd830e318cdad22a0030f7a901da705585d"}, +] + +[[package]] +name = "jupyterlab-widgets" +version = "3.0.7" +description = "Jupyter interactive widgets for JupyterLab" +optional = false +python-versions = ">=3.7" +files = [ + {file = "jupyterlab_widgets-3.0.7-py3-none-any.whl", hash = "sha256:c73f8370338ec19f1bec47254752d6505b03601cbd5a67e6a0b184532f73a459"}, + {file = "jupyterlab_widgets-3.0.7.tar.gz", hash = "sha256:c3a50ed5bf528a0c7a869096503af54702f86dda1db469aee1c92dc0c01b43ca"}, +] + +[[package]] +name = "kiwisolver" +version = "1.4.4" +description = "A fast implementation of the Cassowary constraint solver" +optional = false +python-versions = ">=3.7" +files = [ + {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:2f5e60fabb7343a836360c4f0919b8cd0d6dbf08ad2ca6b9cf90bf0c76a3c4f6"}, + {file = "kiwisolver-1.4.4-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:10ee06759482c78bdb864f4109886dff7b8a56529bc1609d4f1112b93fe6423c"}, + {file = "kiwisolver-1.4.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c79ebe8f3676a4c6630fd3f777f3cfecf9289666c84e775a67d1d358578dc2e3"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:abbe9fa13da955feb8202e215c4018f4bb57469b1b78c7a4c5c7b93001699938"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7577c1987baa3adc4b3c62c33bd1118c3ef5c8ddef36f0f2c950ae0b199e100d"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f8ad8285b01b0d4695102546b342b493b3ccc6781fc28c8c6a1bb63e95d22f09"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:8ed58b8acf29798b036d347791141767ccf65eee7f26bde03a71c944449e53de"}, + {file = "kiwisolver-1.4.4-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a68b62a02953b9841730db7797422f983935aeefceb1679f0fc85cbfbd311c32"}, + {file = "kiwisolver-1.4.4-cp310-cp310-win32.whl", hash = "sha256:e92a513161077b53447160b9bd8f522edfbed4bd9759e4c18ab05d7ef7e49408"}, + {file = "kiwisolver-1.4.4-cp310-cp310-win_amd64.whl", hash = "sha256:3fe20f63c9ecee44560d0e7f116b3a747a5d7203376abeea292ab3152334d004"}, + {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:e0ea21f66820452a3f5d1655f8704a60d66ba1191359b96541eaf457710a5fc6"}, + {file = "kiwisolver-1.4.4-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:bc9db8a3efb3e403e4ecc6cd9489ea2bac94244f80c78e27c31dcc00d2790ac2"}, + {file = "kiwisolver-1.4.4-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:d5b61785a9ce44e5a4b880272baa7cf6c8f48a5180c3e81c59553ba0cb0821ca"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c2dbb44c3f7e6c4d3487b31037b1bdbf424d97687c1747ce4ff2895795c9bf69"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6295ecd49304dcf3bfbfa45d9a081c96509e95f4b9d0eb7ee4ec0530c4a96514"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4bd472dbe5e136f96a4b18f295d159d7f26fd399136f5b17b08c4e5f498cd494"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:bf7d9fce9bcc4752ca4a1b80aabd38f6d19009ea5cbda0e0856983cf6d0023f5"}, + {file = "kiwisolver-1.4.4-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:78d6601aed50c74e0ef02f4204da1816147a6d3fbdc8b3872d263338a9052c51"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:877272cf6b4b7e94c9614f9b10140e198d2186363728ed0f701c6eee1baec1da"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:db608a6757adabb32f1cfe6066e39b3706d8c3aa69bbc353a5b61edad36a5cb4"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_ppc64le.whl", hash = "sha256:5853eb494c71e267912275e5586fe281444eb5e722de4e131cddf9d442615626"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_s390x.whl", hash = "sha256:f0a1dbdb5ecbef0d34eb77e56fcb3e95bbd7e50835d9782a45df81cc46949750"}, + {file = "kiwisolver-1.4.4-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:283dffbf061a4ec60391d51e6155e372a1f7a4f5b15d59c8505339454f8989e4"}, + {file = "kiwisolver-1.4.4-cp311-cp311-win32.whl", hash = "sha256:d06adcfa62a4431d404c31216f0f8ac97397d799cd53800e9d3efc2fbb3cf14e"}, + {file = "kiwisolver-1.4.4-cp311-cp311-win_amd64.whl", hash = "sha256:e7da3fec7408813a7cebc9e4ec55afed2d0fd65c4754bc376bf03498d4e92686"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:62ac9cc684da4cf1778d07a89bf5f81b35834cb96ca523d3a7fb32509380cbf6"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:41dae968a94b1ef1897cb322b39360a0812661dba7c682aa45098eb8e193dbdf"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:02f79693ec433cb4b5f51694e8477ae83b3205768a6fb48ffba60549080e295b"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:d0611a0a2a518464c05ddd5a3a1a0e856ccc10e67079bb17f265ad19ab3c7597"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:db5283d90da4174865d520e7366801a93777201e91e79bacbac6e6927cbceede"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1041feb4cda8708ce73bb4dcb9ce1ccf49d553bf87c3954bdfa46f0c3f77252c"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-win32.whl", hash = "sha256:a553dadda40fef6bfa1456dc4be49b113aa92c2a9a9e8711e955618cd69622e3"}, + {file = "kiwisolver-1.4.4-cp37-cp37m-win_amd64.whl", hash = "sha256:03baab2d6b4a54ddbb43bba1a3a2d1627e82d205c5cf8f4c924dc49284b87166"}, + {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:841293b17ad704d70c578f1f0013c890e219952169ce8a24ebc063eecf775454"}, + {file = "kiwisolver-1.4.4-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f4f270de01dd3e129a72efad823da90cc4d6aafb64c410c9033aba70db9f1ff0"}, + {file = "kiwisolver-1.4.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f9f39e2f049db33a908319cf46624a569b36983c7c78318e9726a4cb8923b26c"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c97528e64cb9ebeff9701e7938653a9951922f2a38bd847787d4a8e498cc83ae"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:1d1573129aa0fd901076e2bfb4275a35f5b7aa60fbfb984499d661ec950320b0"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:ad881edc7ccb9d65b0224f4e4d05a1e85cf62d73aab798943df6d48ab0cd79a1"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b428ef021242344340460fa4c9185d0b1f66fbdbfecc6c63eff4b7c29fad429d"}, + {file = "kiwisolver-1.4.4-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:2e407cb4bd5a13984a6c2c0fe1845e4e41e96f183e5e5cd4d77a857d9693494c"}, + {file = "kiwisolver-1.4.4-cp38-cp38-win32.whl", hash = "sha256:75facbe9606748f43428fc91a43edb46c7ff68889b91fa31f53b58894503a191"}, + {file = "kiwisolver-1.4.4-cp38-cp38-win_amd64.whl", hash = "sha256:5bce61af018b0cb2055e0e72e7d65290d822d3feee430b7b8203d8a855e78766"}, + {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8c808594c88a025d4e322d5bb549282c93c8e1ba71b790f539567932722d7bd8"}, + {file = "kiwisolver-1.4.4-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:f0a71d85ecdd570ded8ac3d1c0f480842f49a40beb423bb8014539a9f32a5897"}, + {file = "kiwisolver-1.4.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:b533558eae785e33e8c148a8d9921692a9fe5aa516efbdff8606e7d87b9d5824"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:efda5fc8cc1c61e4f639b8067d118e742b812c930f708e6667a5ce0d13499e29"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:7c43e1e1206cd421cd92e6b3280d4385d41d7166b3ed577ac20444b6995a445f"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bc8d3bd6c72b2dd9decf16ce70e20abcb3274ba01b4e1c96031e0c4067d1e7cd"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_ppc64le.manylinux2014_ppc64le.whl", hash = "sha256:4ea39b0ccc4f5d803e3337dd46bcce60b702be4d86fd0b3d7531ef10fd99a1ac"}, + {file = "kiwisolver-1.4.4-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:968f44fdbf6dd757d12920d63b566eeb4d5b395fd2d00d29d7ef00a00582aac9"}, + {file = "kiwisolver-1.4.4-cp39-cp39-win32.whl", hash = "sha256:da7e547706e69e45d95e116e6939488d62174e033b763ab1496b4c29b76fabea"}, + {file = "kiwisolver-1.4.4-cp39-cp39-win_amd64.whl", hash = "sha256:ba59c92039ec0a66103b1d5fe588fa546373587a7d68f5c96f743c3396afc04b"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:91672bacaa030f92fc2f43b620d7b337fd9a5af28b0d6ed3f77afc43c4a64b5a"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:787518a6789009c159453da4d6b683f468ef7a65bbde796bcea803ccf191058d"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:da152d8cdcab0e56e4f45eb08b9aea6455845ec83172092f09b0e077ece2cf7a"}, + {file = "kiwisolver-1.4.4-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:ecb1fa0db7bf4cff9dac752abb19505a233c7f16684c5826d1f11ebd9472b871"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:28bc5b299f48150b5f822ce68624e445040595a4ac3d59251703779836eceff9"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:81e38381b782cc7e1e46c4e14cd997ee6040768101aefc8fa3c24a4cc58e98f8"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:2a66fdfb34e05b705620dd567f5a03f239a088d5a3f321e7b6ac3239d22aa286"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:872b8ca05c40d309ed13eb2e582cab0c5a05e81e987ab9c521bf05ad1d5cf5cb"}, + {file = "kiwisolver-1.4.4-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:70e7c2e7b750585569564e2e5ca9845acfaa5da56ac46df68414f29fea97be9f"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9f85003f5dfa867e86d53fac6f7e6f30c045673fa27b603c397753bebadc3008"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2e307eb9bd99801f82789b44bb45e9f541961831c7311521b13a6c85afc09767"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b1792d939ec70abe76f5054d3f36ed5656021dcad1322d1cc996d4e54165cef9"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f6cb459eea32a4e2cf18ba5fcece2dbdf496384413bc1bae15583f19e567f3b2"}, + {file = "kiwisolver-1.4.4-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:36dafec3d6d6088d34e2de6b85f9d8e2324eb734162fba59d2ba9ed7a2043d5b"}, + {file = "kiwisolver-1.4.4.tar.gz", hash = "sha256:d41997519fcba4a1e46eb4a2fe31bc12f0ff957b2b81bac28db24744f333e955"}, +] + +[[package]] +name = "lark" +version = "1.1.5" +description = "a modern parsing library" +optional = false +python-versions = "*" +files = [ + {file = "lark-1.1.5-py3-none-any.whl", hash = "sha256:8476f9903e93fbde4f6c327f74d79e9b4bd0ed9294c5dfa3164ab8c581b5de2a"}, + {file = "lark-1.1.5.tar.gz", hash = "sha256:4b534eae1f9af5b4ea000bea95776350befe1981658eea3820a01c37e504bb4d"}, +] + +[package.extras] +atomic-cache = ["atomicwrites"] +nearley = ["js2py"] +regex = ["regex"] + +[[package]] +name = "llvmlite" +version = "0.39.1" +description = "lightweight wrapper around basic LLVM functionality" +optional = false +python-versions = ">=3.7" +files = [ + {file = "llvmlite-0.39.1-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:6717c7a6e93c9d2c3d07c07113ec80ae24af45cde536b34363d4bcd9188091d9"}, + {file = "llvmlite-0.39.1-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:ddab526c5a2c4ccb8c9ec4821fcea7606933dc53f510e2a6eebb45a418d3488a"}, + {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a3f331a323d0f0ada6b10d60182ef06c20a2f01be21699999d204c5750ffd0b4"}, + {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e2c00ff204afa721b0bb9835b5bf1ba7fba210eefcec5552a9e05a63219ba0dc"}, + {file = "llvmlite-0.39.1-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:16f56eb1eec3cda3a5c526bc3f63594fc24e0c8d219375afeb336f289764c6c7"}, + {file = "llvmlite-0.39.1-cp310-cp310-win32.whl", hash = "sha256:d0bfd18c324549c0fec2c5dc610fd024689de6f27c6cc67e4e24a07541d6e49b"}, + {file = "llvmlite-0.39.1-cp310-cp310-win_amd64.whl", hash = "sha256:7ebf1eb9badc2a397d4f6a6c8717447c81ac011db00064a00408bc83c923c0e4"}, + {file = "llvmlite-0.39.1-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:6546bed4e02a1c3d53a22a0bced254b3b6894693318b16c16c8e43e29d6befb6"}, + {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1578f5000fdce513712e99543c50e93758a954297575610f48cb1fd71b27c08a"}, + {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3803f11ad5f6f6c3d2b545a303d68d9fabb1d50e06a8d6418e6fcd2d0df00959"}, + {file = "llvmlite-0.39.1-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50aea09a2b933dab7c9df92361b1844ad3145bfb8dd2deb9cd8b8917d59306fb"}, + {file = "llvmlite-0.39.1-cp37-cp37m-win32.whl", hash = "sha256:b1a0bbdb274fb683f993198775b957d29a6f07b45d184c571ef2a721ce4388cf"}, + {file = "llvmlite-0.39.1-cp37-cp37m-win_amd64.whl", hash = "sha256:e172c73fccf7d6db4bd6f7de963dedded900d1a5c6778733241d878ba613980e"}, + {file = "llvmlite-0.39.1-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:e31f4b799d530255aaf0566e3da2df5bfc35d3cd9d6d5a3dcc251663656c27b1"}, + {file = "llvmlite-0.39.1-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:62c0ea22e0b9dffb020601bb65cb11dd967a095a488be73f07d8867f4e327ca5"}, + {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9ffc84ade195abd4abcf0bd3b827b9140ae9ef90999429b9ea84d5df69c9058c"}, + {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c0f158e4708dda6367d21cf15afc58de4ebce979c7a1aa2f6b977aae737e2a54"}, + {file = "llvmlite-0.39.1-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:22d36591cd5d02038912321d9ab8e4668e53ae2211da5523f454e992b5e13c36"}, + {file = "llvmlite-0.39.1-cp38-cp38-win32.whl", hash = "sha256:4c6ebace910410daf0bebda09c1859504fc2f33d122e9a971c4c349c89cca630"}, + {file = "llvmlite-0.39.1-cp38-cp38-win_amd64.whl", hash = "sha256:fb62fc7016b592435d3e3a8f680e3ea8897c3c9e62e6e6cc58011e7a4801439e"}, + {file = "llvmlite-0.39.1-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:fa9b26939ae553bf30a9f5c4c754db0fb2d2677327f2511e674aa2f5df941789"}, + {file = "llvmlite-0.39.1-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e4f212c018db951da3e1dc25c2651abc688221934739721f2dad5ff1dd5f90e7"}, + {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:39dc2160aed36e989610fc403487f11b8764b6650017ff367e45384dff88ffbf"}, + {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1ec3d70b3e507515936e475d9811305f52d049281eaa6c8273448a61c9b5b7e2"}, + {file = "llvmlite-0.39.1-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:60f8dd1e76f47b3dbdee4b38d9189f3e020d22a173c00f930b52131001d801f9"}, + {file = "llvmlite-0.39.1-cp39-cp39-win32.whl", hash = "sha256:03aee0ccd81735696474dc4f8b6be60774892a2929d6c05d093d17392c237f32"}, + {file = "llvmlite-0.39.1-cp39-cp39-win_amd64.whl", hash = "sha256:3fc14e757bc07a919221f0cbaacb512704ce5774d7fcada793f1996d6bc75f2a"}, + {file = "llvmlite-0.39.1.tar.gz", hash = "sha256:b43abd7c82e805261c425d50335be9a6c4f84264e34d6d6e475207300005d572"}, +] + +[[package]] +name = "loguru" +version = "0.6.0" +description = "Python logging made (stupidly) simple" +optional = false +python-versions = ">=3.5" +files = [ + {file = "loguru-0.6.0-py3-none-any.whl", hash = "sha256:4e2414d534a2ab57573365b3e6d0234dfb1d84b68b7f3b948e6fb743860a77c3"}, + {file = "loguru-0.6.0.tar.gz", hash = "sha256:066bd06758d0a513e9836fd9c6b5a75bfb3fd36841f4b996bc60b547a309d41c"}, +] + +[package.dependencies] +colorama = {version = ">=0.3.4", markers = "sys_platform == \"win32\""} +win32-setctime = {version = ">=1.0.0", markers = "sys_platform == \"win32\""} + +[package.extras] +dev = ["Sphinx (>=4.1.1)", "black (>=19.10b0)", "colorama (>=0.3.4)", "docutils (==0.16)", "flake8 (>=3.7.7)", "isort (>=5.1.1)", "pytest (>=4.6.2)", "pytest-cov (>=2.7.1)", "sphinx-autobuild (>=0.7.1)", "sphinx-rtd-theme (>=0.4.3)", "tox (>=3.9.0)"] + +[[package]] +name = "markupsafe" +version = "2.1.3" +description = "Safely add untrusted strings to HTML/XML markup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:cd0f502fe016460680cd20aaa5a76d241d6f35a1c3350c474bac1273803893fa"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e09031c87a1e51556fdcb46e5bd4f59dfb743061cf93c4d6831bf894f125eb57"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:68e78619a61ecf91e76aa3e6e8e33fc4894a2bebe93410754bd28fce0a8a4f9f"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:65c1a9bcdadc6c28eecee2c119465aebff8f7a584dd719facdd9e825ec61ab52"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:525808b8019e36eb524b8c68acdd63a37e75714eac50e988180b169d64480a00"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:962f82a3086483f5e5f64dbad880d31038b698494799b097bc59c2edf392fce6"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:aa7bd130efab1c280bed0f45501b7c8795f9fdbeb02e965371bbef3523627779"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:c9c804664ebe8f83a211cace637506669e7890fec1b4195b505c214e50dd4eb7"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win32.whl", hash = "sha256:10bbfe99883db80bdbaff2dcf681dfc6533a614f700da1287707e8a5d78a8431"}, + {file = "MarkupSafe-2.1.3-cp310-cp310-win_amd64.whl", hash = "sha256:1577735524cdad32f9f694208aa75e422adba74f1baee7551620e43a3141f559"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:ad9e82fb8f09ade1c3e1b996a6337afac2b8b9e365f926f5a61aacc71adc5b3c"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:3c0fae6c3be832a0a0473ac912810b2877c8cb9d76ca48de1ed31e1c68386575"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:b076b6226fb84157e3f7c971a47ff3a679d837cf338547532ab866c57930dbee"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bfce63a9e7834b12b87c64d6b155fdd9b3b96191b6bd334bf37db7ff1fe457f2"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:338ae27d6b8745585f87218a3f23f1512dbf52c26c28e322dbe54bcede54ccb9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:e4dd52d80b8c83fdce44e12478ad2e85c64ea965e75d66dbeafb0a3e77308fcc"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:df0be2b576a7abbf737b1575f048c23fb1d769f267ec4358296f31c2479db8f9"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:5bbe06f8eeafd38e5d0a4894ffec89378b6c6a625ff57e3028921f8ff59318ac"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win32.whl", hash = "sha256:dd15ff04ffd7e05ffcb7fe79f1b98041b8ea30ae9234aed2a9168b5797c3effb"}, + {file = "MarkupSafe-2.1.3-cp311-cp311-win_amd64.whl", hash = "sha256:134da1eca9ec0ae528110ccc9e48041e0828d79f24121a1a146161103c76e686"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:8e254ae696c88d98da6555f5ace2279cf7cd5b3f52be2b5cf97feafe883b58d2"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cb0932dc158471523c9637e807d9bfb93e06a95cbf010f1a38b98623b929ef2b"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:9402b03f1a1b4dc4c19845e5c749e3ab82d5078d16a2a4c2cd2df62d57bb0707"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:ca379055a47383d02a5400cb0d110cef0a776fc644cda797db0c5696cfd7e18e"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:b7ff0f54cb4ff66dd38bebd335a38e2c22c41a8ee45aa608efc890ac3e3931bc"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:c011a4149cfbcf9f03994ec2edffcb8b1dc2d2aede7ca243746df97a5d41ce48"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:56d9f2ecac662ca1611d183feb03a3fa4406469dafe241673d521dd5ae92a155"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win32.whl", hash = "sha256:8758846a7e80910096950b67071243da3e5a20ed2546e6392603c096778d48e0"}, + {file = "MarkupSafe-2.1.3-cp37-cp37m-win_amd64.whl", hash = "sha256:787003c0ddb00500e49a10f2844fac87aa6ce977b90b0feaaf9de23c22508b24"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:2ef12179d3a291be237280175b542c07a36e7f60718296278d8593d21ca937d4"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:2c1b19b3aaacc6e57b7e25710ff571c24d6c3613a45e905b1fde04d691b98ee0"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8afafd99945ead6e075b973fefa56379c5b5c53fd8937dad92c662da5d8fd5ee"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:8c41976a29d078bb235fea9b2ecd3da465df42a562910f9022f1a03107bd02be"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d080e0a5eb2529460b30190fcfcc4199bd7f827663f858a226a81bc27beaa97e"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:69c0f17e9f5a7afdf2cc9fb2d1ce6aabdb3bafb7f38017c0b77862bcec2bbad8"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:504b320cd4b7eff6f968eddf81127112db685e81f7e36e75f9f84f0df46041c3"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:42de32b22b6b804f42c5d98be4f7e5e977ecdd9ee9b660fda1a3edf03b11792d"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win32.whl", hash = "sha256:ceb01949af7121f9fc39f7d27f91be8546f3fb112c608bc4029aef0bab86a2a5"}, + {file = "MarkupSafe-2.1.3-cp38-cp38-win_amd64.whl", hash = "sha256:1b40069d487e7edb2676d3fbdb2b0829ffa2cd63a2ec26c4938b2d34391b4ecc"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:8023faf4e01efadfa183e863fefde0046de576c6f14659e8782065bcece22198"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:6b2b56950d93e41f33b4223ead100ea0fe11f8e6ee5f641eb753ce4b77a7042b"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:9dcdfd0eaf283af041973bff14a2e143b8bd64e069f4c383416ecd79a81aab58"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:05fb21170423db021895e1ea1e1f3ab3adb85d1c2333cbc2310f2a26bc77272e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:282c2cb35b5b673bbcadb33a585408104df04f14b2d9b01d4c345a3b92861c2c"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:ab4a0df41e7c16a1392727727e7998a467472d0ad65f3ad5e6e765015df08636"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:7ef3cb2ebbf91e330e3bb937efada0edd9003683db6b57bb108c4001f37a02ea"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:0a4e4a1aff6c7ac4cd55792abf96c915634c2b97e3cc1c7129578aa68ebd754e"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win32.whl", hash = "sha256:fec21693218efe39aa7f8599346e90c705afa52c5b31ae019b2e57e8f6542bb2"}, + {file = "MarkupSafe-2.1.3-cp39-cp39-win_amd64.whl", hash = "sha256:3fd4abcb888d15a94f32b75d8fd18ee162ca0c064f35b11134be77050296d6ba"}, + {file = "MarkupSafe-2.1.3.tar.gz", hash = "sha256:af598ed32d6ae86f1b747b82783958b1a4ab8f617b06fe68795c7f026abbdcad"}, +] + +[[package]] +name = "matplotlib" +version = "3.5.2" +description = "Python plotting package" +optional = false +python-versions = ">=3.7" +files = [ + {file = "matplotlib-3.5.2-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:03bbb3f5f78836855e127b5dab228d99551ad0642918ccbf3067fcd52ac7ac5e"}, + {file = "matplotlib-3.5.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:49a5938ed6ef9dda560f26ea930a2baae11ea99e1c2080c8714341ecfda72a89"}, + {file = "matplotlib-3.5.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:77157be0fc4469cbfb901270c205e7d8adb3607af23cef8bd11419600647ceed"}, + {file = "matplotlib-3.5.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5844cea45d804174bf0fac219b4ab50774e504bef477fc10f8f730ce2d623441"}, + {file = "matplotlib-3.5.2-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:c87973ddec10812bddc6c286b88fdd654a666080fbe846a1f7a3b4ba7b11ab78"}, + {file = "matplotlib-3.5.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:4a05f2b37222319753a5d43c0a4fd97ed4ff15ab502113e3f2625c26728040cf"}, + {file = "matplotlib-3.5.2-cp310-cp310-win32.whl", hash = "sha256:9776e1a10636ee5f06ca8efe0122c6de57ffe7e8c843e0fb6e001e9d9256ec95"}, + {file = "matplotlib-3.5.2-cp310-cp310-win_amd64.whl", hash = "sha256:b4fedaa5a9aa9ce14001541812849ed1713112651295fdddd640ea6620e6cf98"}, + {file = "matplotlib-3.5.2-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:ee175a571e692fc8ae8e41ac353c0e07259113f4cb063b0ec769eff9717e84bb"}, + {file = "matplotlib-3.5.2-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:2e8bda1088b941ead50caabd682601bece983cadb2283cafff56e8fcddbf7d7f"}, + {file = "matplotlib-3.5.2-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:9480842d5aadb6e754f0b8f4ebeb73065ac8be1855baa93cd082e46e770591e9"}, + {file = "matplotlib-3.5.2-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:6c623b355d605a81c661546af7f24414165a8a2022cddbe7380a31a4170fa2e9"}, + {file = "matplotlib-3.5.2-cp37-cp37m-win32.whl", hash = "sha256:a91426ae910819383d337ba0dc7971c7cefdaa38599868476d94389a329e599b"}, + {file = "matplotlib-3.5.2-cp37-cp37m-win_amd64.whl", hash = "sha256:c4b82c2ae6d305fcbeb0eb9c93df2602ebd2f174f6e8c8a5d92f9445baa0c1d3"}, + {file = "matplotlib-3.5.2-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:ebc27ad11df3c1661f4677a7762e57a8a91dd41b466c3605e90717c9a5f90c82"}, + {file = "matplotlib-3.5.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a32ea6e12e80dedaca2d4795d9ed40f97bfa56e6011e14f31502fdd528b9c89"}, + {file = "matplotlib-3.5.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:2a0967d4156adbd0d46db06bc1a877f0370bce28d10206a5071f9ecd6dc60b79"}, + {file = "matplotlib-3.5.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e2b696699386766ef171a259d72b203a3c75d99d03ec383b97fc2054f52e15cf"}, + {file = "matplotlib-3.5.2-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:7f409716119fa39b03da3d9602bd9b41142fab7a0568758cd136cd80b1bf36c8"}, + {file = "matplotlib-3.5.2-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:b8d3f4e71e26307e8c120b72c16671d70c5cd08ae412355c11254aa8254fb87f"}, + {file = "matplotlib-3.5.2-cp38-cp38-win32.whl", hash = "sha256:b6c63cd01cad0ea8704f1fd586e9dc5777ccedcd42f63cbbaa3eae8dd41172a1"}, + {file = "matplotlib-3.5.2-cp38-cp38-win_amd64.whl", hash = "sha256:75c406c527a3aa07638689586343f4b344fcc7ab1f79c396699eb550cd2b91f7"}, + {file = "matplotlib-3.5.2-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:4a44cdfdb9d1b2f18b1e7d315eb3843abb097869cd1ef89cfce6a488cd1b5182"}, + {file = "matplotlib-3.5.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:3d8e129af95b156b41cb3be0d9a7512cc6d73e2b2109f82108f566dbabdbf377"}, + {file = "matplotlib-3.5.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:364e6bca34edc10a96aa3b1d7cd76eb2eea19a4097198c1b19e89bee47ed5781"}, + {file = "matplotlib-3.5.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:ea75df8e567743207e2b479ba3d8843537be1c146d4b1e3e395319a4e1a77fe9"}, + {file = "matplotlib-3.5.2-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:44c6436868186564450df8fd2fc20ed9daaef5caad699aa04069e87099f9b5a8"}, + {file = "matplotlib-3.5.2-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:7d7705022df2c42bb02937a2a824f4ec3cca915700dd80dc23916af47ff05f1a"}, + {file = "matplotlib-3.5.2-cp39-cp39-win32.whl", hash = "sha256:ee0b8e586ac07f83bb2950717e66cb305e2859baf6f00a9c39cc576e0ce9629c"}, + {file = "matplotlib-3.5.2-cp39-cp39-win_amd64.whl", hash = "sha256:c772264631e5ae61f0bd41313bbe48e1b9bcc95b974033e1118c9caa1a84d5c6"}, + {file = "matplotlib-3.5.2-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:751d3815b555dcd6187ad35b21736dc12ce6925fc3fa363bbc6dc0f86f16484f"}, + {file = "matplotlib-3.5.2-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:31fbc2af27ebb820763f077ec7adc79b5a031c2f3f7af446bd7909674cd59460"}, + {file = "matplotlib-3.5.2-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4fa28ca76ac5c2b2d54bc058b3dad8e22ee85d26d1ee1b116a6fd4d2277b6a04"}, + {file = "matplotlib-3.5.2-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:24173c23d1bcbaed5bf47b8785d27933a1ac26a5d772200a0f3e0e38f471b001"}, + {file = "matplotlib-3.5.2.tar.gz", hash = "sha256:48cf850ce14fa18067f2d9e0d646763681948487a8080ec0af2686468b4607a2"}, +] + +[package.dependencies] +cycler = ">=0.10" +fonttools = ">=4.22.0" +kiwisolver = ">=1.0.1" +numpy = ">=1.17" +packaging = ">=20.0" +pillow = ">=6.2.0" +pyparsing = ">=2.2.1" +python-dateutil = ">=2.7" + +[[package]] +name = "matplotlib-inline" +version = "0.1.6" +description = "Inline Matplotlib backend for Jupyter" +optional = false +python-versions = ">=3.5" +files = [ + {file = "matplotlib-inline-0.1.6.tar.gz", hash = "sha256:f887e5f10ba98e8d2b150ddcf4702c1e5f8b3a20005eb0f74bfdbd360ee6f304"}, + {file = "matplotlib_inline-0.1.6-py3-none-any.whl", hash = "sha256:f1f41aab5328aa5aaea9b16d083b128102f8712542f819fe7e6a420ff581b311"}, +] + +[package.dependencies] +traitlets = "*" + +[[package]] +name = "mistune" +version = "2.0.5" +description = "A sane Markdown parser with useful plugins and renderers" +optional = false +python-versions = "*" +files = [ + {file = "mistune-2.0.5-py2.py3-none-any.whl", hash = "sha256:bad7f5d431886fcbaf5f758118ecff70d31f75231b34024a1341120340a65ce8"}, + {file = "mistune-2.0.5.tar.gz", hash = "sha256:0246113cb2492db875c6be56974a7c893333bf26cd92891c85f63151cee09d34"}, +] + +[[package]] +name = "nbclassic" +version = "1.0.0" +description = "Jupyter Notebook as a Jupyter Server extension." +optional = false +python-versions = ">=3.7" +files = [ + {file = "nbclassic-1.0.0-py3-none-any.whl", hash = "sha256:f99e4769b4750076cd4235c044b61232110733322384a94a63791d2e7beacc66"}, + {file = "nbclassic-1.0.0.tar.gz", hash = "sha256:0ae11eb2319455d805596bf320336cda9554b41d99ab9a3c31bf8180bffa30e3"}, +] + +[package.dependencies] +argon2-cffi = "*" +ipykernel = "*" +ipython-genutils = "*" +jinja2 = "*" +jupyter-client = ">=6.1.1" +jupyter-core = ">=4.6.1" +jupyter-server = ">=1.8" +nbconvert = ">=5" +nbformat = "*" +nest-asyncio = ">=1.5" +notebook-shim = ">=0.2.3" +prometheus-client = "*" +pyzmq = ">=17" +Send2Trash = ">=1.8.0" +terminado = ">=0.8.3" +tornado = ">=6.1" +traitlets = ">=4.2.1" + +[package.extras] +docs = ["myst-parser", "nbsphinx", "sphinx", "sphinx-rtd-theme", "sphinxcontrib-github-alt"] +json-logging = ["json-logging"] +test = ["coverage", "nbval", "pytest", "pytest-cov", "pytest-jupyter", "pytest-playwright", "pytest-tornasync", "requests", "requests-unixsocket", "testpath"] + +[[package]] +name = "nbclient" +version = "0.8.0" +description = "A client library for executing notebooks. Formerly nbconvert's ExecutePreprocessor." +optional = false +python-versions = ">=3.8.0" +files = [ + {file = "nbclient-0.8.0-py3-none-any.whl", hash = "sha256:25e861299e5303a0477568557c4045eccc7a34c17fc08e7959558707b9ebe548"}, + {file = "nbclient-0.8.0.tar.gz", hash = "sha256:f9b179cd4b2d7bca965f900a2ebf0db4a12ebff2f36a711cb66861e4ae158e55"}, +] + +[package.dependencies] +jupyter-client = ">=6.1.12" +jupyter-core = ">=4.12,<5.0.dev0 || >=5.1.dev0" +nbformat = ">=5.1" +traitlets = ">=5.4" + +[package.extras] +dev = ["pre-commit"] +docs = ["autodoc-traits", "mock", "moto", "myst-parser", "nbclient[test]", "sphinx (>=1.7)", "sphinx-book-theme", "sphinxcontrib-spelling"] +test = ["flaky", "ipykernel (>=6.19.3)", "ipython", "ipywidgets", "nbconvert (>=7.0.0)", "pytest (>=7.0)", "pytest-asyncio", "pytest-cov (>=4.0)", "testpath", "xmltodict"] + +[[package]] +name = "nbconvert" +version = "7.4.0" +description = "Converting Jupyter Notebooks" +optional = false +python-versions = ">=3.7" +files = [ + {file = "nbconvert-7.4.0-py3-none-any.whl", hash = "sha256:af5064a9db524f9f12f4e8be7f0799524bd5b14c1adea37e34e83c95127cc818"}, + {file = "nbconvert-7.4.0.tar.gz", hash = "sha256:51b6c77b507b177b73f6729dba15676e42c4e92bcb00edc8cc982ee72e7d89d7"}, +] + +[package.dependencies] +beautifulsoup4 = "*" +bleach = "*" +defusedxml = "*" +importlib-metadata = {version = ">=3.6", markers = "python_version < \"3.10\""} +jinja2 = ">=3.0" +jupyter-core = ">=4.7" +jupyterlab-pygments = "*" +markupsafe = ">=2.0" +mistune = ">=2.0.3,<3" +nbclient = ">=0.5.0" +nbformat = ">=5.1" +packaging = "*" +pandocfilters = ">=1.4.1" +pygments = ">=2.4.1" +tinycss2 = "*" +traitlets = ">=5.0" + +[package.extras] +all = ["nbconvert[docs,qtpdf,serve,test,webpdf]"] +docs = ["ipykernel", "ipython", "myst-parser", "nbsphinx (>=0.2.12)", "pydata-sphinx-theme", "sphinx (==5.0.2)", "sphinxcontrib-spelling"] +qtpdf = ["nbconvert[qtpng]"] +qtpng = ["pyqtwebengine (>=5.15)"] +serve = ["tornado (>=6.1)"] +test = ["ipykernel", "ipywidgets (>=7)", "pre-commit", "pytest", "pytest-dependency"] +webpdf = ["pyppeteer (>=1,<1.1)"] + +[[package]] +name = "nbformat" +version = "5.5.0" +description = "The Jupyter Notebook format" +optional = false +python-versions = ">=3.7" +files = [ + {file = "nbformat-5.5.0-py3-none-any.whl", hash = "sha256:eb21018bbcdb29e7a4b8b29068d4b6794cdad685db8fcd569b97a09a048dc2e4"}, + {file = "nbformat-5.5.0.tar.gz", hash = "sha256:9ebe30e6c3b3e5b47d39ff0a3897a1acf523d2bfafcb4e2d04cdb70f8a66c507"}, +] + +[package.dependencies] +fastjsonschema = "*" +jsonschema = ">=2.6" +jupyter_core = "*" +traitlets = ">=5.1" + +[package.extras] +test = ["check-manifest", "pep440", "pre-commit", "pytest", "testpath"] + +[[package]] +name = "nest-asyncio" +version = "1.5.6" +description = "Patch asyncio to allow nested event loops" +optional = false +python-versions = ">=3.5" +files = [ + {file = "nest_asyncio-1.5.6-py3-none-any.whl", hash = "sha256:b9a953fb40dceaa587d109609098db21900182b16440652454a146cffb06e8b8"}, + {file = "nest_asyncio-1.5.6.tar.gz", hash = "sha256:d267cc1ff794403f7df692964d1d2a3fa9418ffea2a3f6859a439ff482fef290"}, +] + +[[package]] +name = "notebook" +version = "6.5.4" +description = "A web-based notebook environment for interactive computing" +optional = false +python-versions = ">=3.7" +files = [ + {file = "notebook-6.5.4-py3-none-any.whl", hash = "sha256:dd17e78aefe64c768737b32bf171c1c766666a21cc79a44d37a1700771cab56f"}, + {file = "notebook-6.5.4.tar.gz", hash = "sha256:517209568bd47261e2def27a140e97d49070602eea0d226a696f42a7f16c9a4e"}, +] + +[package.dependencies] +argon2-cffi = "*" +ipykernel = "*" +ipython-genutils = "*" +jinja2 = "*" +jupyter-client = ">=5.3.4" +jupyter-core = ">=4.6.1" +nbclassic = ">=0.4.7" +nbconvert = ">=5" +nbformat = "*" +nest-asyncio = ">=1.5" +prometheus-client = "*" +pyzmq = ">=17" +Send2Trash = ">=1.8.0" +terminado = ">=0.8.3" +tornado = ">=6.1" +traitlets = ">=4.2.1" + +[package.extras] +docs = ["myst-parser", "nbsphinx", "sphinx", "sphinx-rtd-theme", "sphinxcontrib-github-alt"] +json-logging = ["json-logging"] +test = ["coverage", "nbval", "pytest", "pytest-cov", "requests", "requests-unixsocket", "selenium (==4.1.5)", "testpath"] + +[[package]] +name = "notebook-shim" +version = "0.2.3" +description = "A shim layer for notebook traits and config" +optional = false +python-versions = ">=3.7" +files = [ + {file = "notebook_shim-0.2.3-py3-none-any.whl", hash = "sha256:a83496a43341c1674b093bfcebf0fe8e74cbe7eda5fd2bbc56f8e39e1486c0c7"}, + {file = "notebook_shim-0.2.3.tar.gz", hash = "sha256:f69388ac283ae008cd506dda10d0288b09a017d822d5e8c7129a152cbd3ce7e9"}, +] + +[package.dependencies] +jupyter-server = ">=1.8,<3" + +[package.extras] +test = ["pytest", "pytest-console-scripts", "pytest-jupyter", "pytest-tornasync"] + +[[package]] +name = "nptyping" +version = "2.5.0" +description = "Type hints for NumPy." +optional = false +python-versions = ">=3.7" +files = [ + {file = "nptyping-2.5.0-py3-none-any.whl", hash = "sha256:764e51836faae33a7ae2e928af574cfb701355647accadcc89f2ad793630b7c8"}, + {file = "nptyping-2.5.0.tar.gz", hash = "sha256:e3d35b53af967e6fb407c3016ff9abae954d3a0568f7cc13a461084224e8e20a"}, +] + +[package.dependencies] +numpy = {version = ">=1.20.0,<2.0.0", markers = "python_version >= \"3.8\""} +typing-extensions = {version = ">=4.0.0,<5.0.0", markers = "python_version < \"3.10\""} + +[package.extras] +build = ["invoke (>=1.6.0)", "pip-tools (>=6.5.0)"] +complete = ["pandas", "pandas-stubs-fork"] +dev = ["autoflake", "beartype (<0.10.0)", "beartype (>=0.10.0)", "black", "codecov (>=2.1.0)", "coverage", "feedparser", "invoke (>=1.6.0)", "isort", "mypy", "pandas", "pandas-stubs-fork", "pip-tools (>=6.5.0)", "pylint", "pyright", "setuptools", "typeguard", "wheel"] +pandas = ["pandas", "pandas-stubs-fork"] +qa = ["autoflake", "beartype (<0.10.0)", "beartype (>=0.10.0)", "black", "codecov (>=2.1.0)", "coverage", "feedparser", "isort", "mypy", "pylint", "pyright", "setuptools", "typeguard", "wheel"] + +[[package]] +name = "numba" +version = "0.56.4" +description = "compiling Python code using LLVM" +optional = false +python-versions = ">=3.7" +files = [ + {file = "numba-0.56.4-cp310-cp310-macosx_10_14_x86_64.whl", hash = "sha256:9f62672145f8669ec08762895fe85f4cf0ead08ce3164667f2b94b2f62ab23c3"}, + {file = "numba-0.56.4-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:c602d015478b7958408d788ba00a50272649c5186ea8baa6cf71d4a1c761bba1"}, + {file = "numba-0.56.4-cp310-cp310-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:85dbaed7a05ff96492b69a8900c5ba605551afb9b27774f7f10511095451137c"}, + {file = "numba-0.56.4-cp310-cp310-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:f4cfc3a19d1e26448032049c79fc60331b104f694cf570a9e94f4e2c9d0932bb"}, + {file = "numba-0.56.4-cp310-cp310-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:4e08e203b163ace08bad500b0c16f6092b1eb34fd1fce4feaf31a67a3a5ecf3b"}, + {file = "numba-0.56.4-cp310-cp310-win32.whl", hash = "sha256:0611e6d3eebe4cb903f1a836ffdb2bda8d18482bcd0a0dcc56e79e2aa3fefef5"}, + {file = "numba-0.56.4-cp310-cp310-win_amd64.whl", hash = "sha256:fbfb45e7b297749029cb28694abf437a78695a100e7c2033983d69f0ba2698d4"}, + {file = "numba-0.56.4-cp37-cp37m-macosx_10_14_x86_64.whl", hash = "sha256:3cb1a07a082a61df80a468f232e452d818f5ae254b40c26390054e4e868556e0"}, + {file = "numba-0.56.4-cp37-cp37m-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:d69ad934e13c15684e7887100a8f5f0f61d7a8e57e0fd29d9993210089a5b531"}, + {file = "numba-0.56.4-cp37-cp37m-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:dbcc847bac2d225265d054993a7f910fda66e73d6662fe7156452cac0325b073"}, + {file = "numba-0.56.4-cp37-cp37m-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:8a95ca9cc77ea4571081f6594e08bd272b66060634b8324e99cd1843020364f9"}, + {file = "numba-0.56.4-cp37-cp37m-win32.whl", hash = "sha256:fcdf84ba3ed8124eb7234adfbb8792f311991cbf8aed1cad4b1b1a7ee08380c1"}, + {file = "numba-0.56.4-cp37-cp37m-win_amd64.whl", hash = "sha256:42f9e1be942b215df7e6cc9948cf9c15bb8170acc8286c063a9e57994ef82fd1"}, + {file = "numba-0.56.4-cp38-cp38-macosx_10_14_x86_64.whl", hash = "sha256:553da2ce74e8862e18a72a209ed3b6d2924403bdd0fb341fa891c6455545ba7c"}, + {file = "numba-0.56.4-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:4373da9757049db7c90591e9ec55a2e97b2b36ba7ae3bf9c956a513374077470"}, + {file = "numba-0.56.4-cp38-cp38-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:3a993349b90569518739009d8f4b523dfedd7e0049e6838c0e17435c3e70dcc4"}, + {file = "numba-0.56.4-cp38-cp38-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:720886b852a2d62619ae3900fe71f1852c62db4f287d0c275a60219e1643fc04"}, + {file = "numba-0.56.4-cp38-cp38-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:e64d338b504c9394a4a34942df4627e1e6cb07396ee3b49fe7b8d6420aa5104f"}, + {file = "numba-0.56.4-cp38-cp38-win32.whl", hash = "sha256:03fe94cd31e96185cce2fae005334a8cc712fc2ba7756e52dff8c9400718173f"}, + {file = "numba-0.56.4-cp38-cp38-win_amd64.whl", hash = "sha256:91f021145a8081f881996818474ef737800bcc613ffb1e618a655725a0f9e246"}, + {file = "numba-0.56.4-cp39-cp39-macosx_10_14_x86_64.whl", hash = "sha256:d0ae9270a7a5cc0ede63cd234b4ff1ce166c7a749b91dbbf45e0000c56d3eade"}, + {file = "numba-0.56.4-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:c75e8a5f810ce80a0cfad6e74ee94f9fde9b40c81312949bf356b7304ef20740"}, + {file = "numba-0.56.4-cp39-cp39-manylinux2014_aarch64.manylinux_2_17_aarch64.whl", hash = "sha256:a12ef323c0f2101529d455cfde7f4135eaa147bad17afe10b48634f796d96abd"}, + {file = "numba-0.56.4-cp39-cp39-manylinux2014_i686.manylinux_2_17_i686.whl", hash = "sha256:03634579d10a6129181129de293dd6b5eaabee86881369d24d63f8fe352dd6cb"}, + {file = "numba-0.56.4-cp39-cp39-manylinux2014_x86_64.manylinux_2_17_x86_64.whl", hash = "sha256:0240f9026b015e336069329839208ebd70ec34ae5bfbf402e4fcc8e06197528e"}, + {file = "numba-0.56.4-cp39-cp39-win32.whl", hash = "sha256:14dbbabf6ffcd96ee2ac827389afa59a70ffa9f089576500434c34abf9b054a4"}, + {file = "numba-0.56.4-cp39-cp39-win_amd64.whl", hash = "sha256:0da583c532cd72feefd8e551435747e0e0fbb3c0530357e6845fcc11e38d6aea"}, + {file = "numba-0.56.4.tar.gz", hash = "sha256:32d9fef412c81483d7efe0ceb6cf4d3310fde8b624a9cecca00f790573ac96ee"}, +] + +[package.dependencies] +importlib-metadata = {version = "*", markers = "python_version < \"3.9\""} +llvmlite = "==0.39.*" +numpy = ">=1.18,<1.24" +setuptools = "*" + +[[package]] +name = "numpy" +version = "1.23.5" +description = "NumPy is the fundamental package for array computing with Python." +optional = false +python-versions = ">=3.8" +files = [ + {file = "numpy-1.23.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9c88793f78fca17da0145455f0d7826bcb9f37da4764af27ac945488116efe63"}, + {file = "numpy-1.23.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:e9f4c4e51567b616be64e05d517c79a8a22f3606499941d97bb76f2ca59f982d"}, + {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7903ba8ab592b82014713c491f6c5d3a1cde5b4a3bf116404e08f5b52f6daf43"}, + {file = "numpy-1.23.5-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5e05b1c973a9f858c74367553e236f287e749465f773328c8ef31abe18f691e1"}, + {file = "numpy-1.23.5-cp310-cp310-win32.whl", hash = "sha256:522e26bbf6377e4d76403826ed689c295b0b238f46c28a7251ab94716da0b280"}, + {file = "numpy-1.23.5-cp310-cp310-win_amd64.whl", hash = "sha256:dbee87b469018961d1ad79b1a5d50c0ae850000b639bcb1b694e9981083243b6"}, + {file = "numpy-1.23.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:ce571367b6dfe60af04e04a1834ca2dc5f46004ac1cc756fb95319f64c095a96"}, + {file = "numpy-1.23.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:56e454c7833e94ec9769fa0f86e6ff8e42ee38ce0ce1fa4cbb747ea7e06d56aa"}, + {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5039f55555e1eab31124a5768898c9e22c25a65c1e0037f4d7c495a45778c9f2"}, + {file = "numpy-1.23.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58f545efd1108e647604a1b5aa809591ccd2540f468a880bedb97247e72db387"}, + {file = "numpy-1.23.5-cp311-cp311-win32.whl", hash = "sha256:b2a9ab7c279c91974f756c84c365a669a887efa287365a8e2c418f8b3ba73fb0"}, + {file = "numpy-1.23.5-cp311-cp311-win_amd64.whl", hash = "sha256:0cbe9848fad08baf71de1a39e12d1b6310f1d5b2d0ea4de051058e6e1076852d"}, + {file = "numpy-1.23.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:f063b69b090c9d918f9df0a12116029e274daf0181df392839661c4c7ec9018a"}, + {file = "numpy-1.23.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0aaee12d8883552fadfc41e96b4c82ee7d794949e2a7c3b3a7201e968c7ecab9"}, + {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:92c8c1e89a1f5028a4c6d9e3ccbe311b6ba53694811269b992c0b224269e2398"}, + {file = "numpy-1.23.5-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d208a0f8729f3fb790ed18a003f3a57895b989b40ea4dce4717e9cf4af62c6bb"}, + {file = "numpy-1.23.5-cp38-cp38-win32.whl", hash = "sha256:06005a2ef6014e9956c09ba07654f9837d9e26696a0470e42beedadb78c11b07"}, + {file = "numpy-1.23.5-cp38-cp38-win_amd64.whl", hash = "sha256:ca51fcfcc5f9354c45f400059e88bc09215fb71a48d3768fb80e357f3b457e1e"}, + {file = "numpy-1.23.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8969bfd28e85c81f3f94eb4a66bc2cf1dbdc5c18efc320af34bffc54d6b1e38f"}, + {file = "numpy-1.23.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:a7ac231a08bb37f852849bbb387a20a57574a97cfc7b6cabb488a4fc8be176de"}, + {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:bf837dc63ba5c06dc8797c398db1e223a466c7ece27a1f7b5232ba3466aafe3d"}, + {file = "numpy-1.23.5-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:33161613d2269025873025b33e879825ec7b1d831317e68f4f2f0f84ed14c719"}, + {file = "numpy-1.23.5-cp39-cp39-win32.whl", hash = "sha256:af1da88f6bc3d2338ebbf0e22fe487821ea4d8e89053e25fa59d1d79786e7481"}, + {file = "numpy-1.23.5-cp39-cp39-win_amd64.whl", hash = "sha256:09b7847f7e83ca37c6e627682f145856de331049013853f344f37b0c9690e3df"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:abdde9f795cf292fb9651ed48185503a2ff29be87770c3b8e2a14b0cd7aa16f8"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:f9a909a8bae284d46bbfdefbdd4a262ba19d3bc9921b1e76126b1d21c3c34135"}, + {file = "numpy-1.23.5-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:01dd17cbb340bf0fc23981e52e1d18a9d4050792e8fb8363cecbf066a84b827d"}, + {file = "numpy-1.23.5.tar.gz", hash = "sha256:1b1766d6f397c18153d40015ddfc79ddb715cabadc04d2d228d4e5a8bc4ded1a"}, +] + +[[package]] +name = "nuscenes-devkit" +version = "1.1.10" +description = "The official devkit of the nuScenes dataset (www.nuscenes.org)." +optional = false +python-versions = ">=3.6" +files = [ + {file = "nuscenes_devkit-1.1.10-py3-none-any.whl", hash = "sha256:7b19cd3b2d305dbacce9ea5d06401e1d06106dbc2bdf86260f0d8b18d21869f8"}, +] + +[package.dependencies] +cachetools = "*" +descartes = "*" +fire = "*" +jupyter = "*" +matplotlib = "<=3.5.2" +numpy = "*" +opencv-python = "*" +Pillow = ">6.2.1" +pycocotools = ">=2.0.1" +pyquaternion = ">=0.9.5" +scikit-learn = "*" +scipy = "*" +Shapely = "<=1.8.5" +tqdm = "*" + +[[package]] +name = "open3d" +version = "0.16.0" +description = "" +optional = false +python-versions = ">=3.6" +files = [ + {file = "open3d-0.16.0-cp310-cp310-macosx_10_15_x86_64.whl", hash = "sha256:ee108b9f88d36bc35f6da298a51aea0287c31de027ff657e4fd5b51aa2b384e8"}, + {file = "open3d-0.16.0-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:0a76da8eb507016f8d2aea0d493d23baa44828a6baad84a68c7de479ae56314f"}, + {file = "open3d-0.16.0-cp310-cp310-manylinux2014_aarch64.whl", hash = "sha256:872631f1bd0a3e04aec5d20dbaaf9c9aaea20b200933094f51a81864b6e5ae29"}, + {file = "open3d-0.16.0-cp310-cp310-manylinux_2_27_x86_64.whl", hash = "sha256:fbe45e72c591bd88b2b5b202e1c5607fd17799b97cb3d4ccf5e27a666caa5761"}, + {file = "open3d-0.16.0-cp310-cp310-win_amd64.whl", hash = "sha256:9ea3b7b2efc902c0611e94073c013ad9850ed91e4de6bc0cce653a0b629760c2"}, + {file = "open3d-0.16.0-cp37-cp37m-macosx_10_15_x86_64.whl", hash = "sha256:7a24cf117349d9313db2f22c4de9cebf1f50ed7601af6f362fc246d323f04c63"}, + {file = "open3d-0.16.0-cp37-cp37m-manylinux2014_aarch64.whl", hash = "sha256:edec04c2d2acbac198c80818fed4ccb8d4f16b1b9012a92ffccf29da029388ba"}, + {file = "open3d-0.16.0-cp37-cp37m-manylinux_2_27_x86_64.whl", hash = "sha256:ed00668bd6a976a5c2a4c84a610a3bd70ffe86f4c09a395249c29e26deadcaec"}, + {file = "open3d-0.16.0-cp37-cp37m-win_amd64.whl", hash = "sha256:23f4e71d8d5d717cc5c49b68538667b6c8cac40a180a45029d9833bb126242a5"}, + {file = "open3d-0.16.0-cp38-cp38-macosx_10_15_x86_64.whl", hash = "sha256:f10f9ae54427f6f1ee9f67dfec87b6108f94f29ceeb82ddaabf99efc7d1d9d66"}, + {file = "open3d-0.16.0-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:a7c79f07c1cb254ba7f0df111a1f66700a1a2b11f3a384d01cce5309286a14c5"}, + {file = "open3d-0.16.0-cp38-cp38-manylinux2014_aarch64.whl", hash = "sha256:ea463881576f6b32c491952cd26168b53d9e91b7f35ef5e6933d1eee16e44d0b"}, + {file = "open3d-0.16.0-cp38-cp38-manylinux_2_27_x86_64.whl", hash = "sha256:ae94498e869a48a0798a7f47a5caf42409601dc9f18dc6569954961e1a2a86ad"}, + {file = "open3d-0.16.0-cp38-cp38-win_amd64.whl", hash = "sha256:a4d4385986bad86c1fd2b3b689626be645f1150895633d16906064e0e1c333c0"}, + {file = "open3d-0.16.0-cp39-cp39-macosx_10_15_x86_64.whl", hash = "sha256:d5d7d784f1678afff04fb7b78360e3e7e2bd04c3b2ae1e5a064651b76d0421b5"}, + {file = "open3d-0.16.0-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:142afd19ba546e675cfe81f1647756a2502e93eb5315906a63fcf7ef4d552877"}, + {file = "open3d-0.16.0-cp39-cp39-manylinux2014_aarch64.whl", hash = "sha256:fe59a56433007ef9cab2548d9fe628d89c184b4b58ec60d794b11b068ba4d0bf"}, + {file = "open3d-0.16.0-cp39-cp39-manylinux_2_27_x86_64.whl", hash = "sha256:291aea92f43311b0f4bc20080f6431f52d185d46eb3d2ae79b67400f70d81efb"}, + {file = "open3d-0.16.0-cp39-cp39-win_amd64.whl", hash = "sha256:10c1c25213da9ec5af789cd2a3aba854cbeb0438b64f6fde0e6e5beb86632f41"}, +] + +[package.dependencies] +addict = "*" +configargparse = "*" +dash = ">=2.6.0" +matplotlib = ">=3" +nbformat = "5.5.0" +numpy = ">=1.18.0" +pandas = ">=1.0" +pillow = ">=8.2.0" +pyquaternion = "*" +pyyaml = ">=5.4.1" +scikit-learn = ">=0.21" +tqdm = "*" + +[[package]] +name = "opencv-python" +version = "4.7.0.72" +description = "Wrapper package for OpenCV python bindings." +optional = false +python-versions = ">=3.6" +files = [ + {file = "opencv-python-4.7.0.72.tar.gz", hash = "sha256:3424794a711f33284581f3c1e4b071cfc827d02b99d6fd9a35391f517c453306"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-macosx_10_16_x86_64.whl", hash = "sha256:d4f8880440c433a0025d78804dda6901d1e8e541a561dda66892d90290aef881"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-macosx_11_0_arm64.whl", hash = "sha256:7a297e7651e22eb17c265ddbbc80e2ba2a8ff4f4a1696a67c45e5f5798245842"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cd08343654c6b88c5a8c25bf425f8025aed2e3189b4d7306b5861d32affaf737"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ebfc0a3a2f57716e709028b992e4de7fd8752105d7a768531c4f434043c6f9ff"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-win32.whl", hash = "sha256:eda115797b114fc16ca6f182b91c5d984f0015c19bec3145e55d33d708e9bae1"}, + {file = "opencv_python-4.7.0.72-cp37-abi3-win_amd64.whl", hash = "sha256:812af57553ec1c6709060c63f6b7e9ad07ddc0f592f3ccc6d00c71e0fe0e6376"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.21.0", markers = "python_version <= \"3.9\" and platform_system == \"Darwin\" and platform_machine == \"arm64\""}, + {version = ">=1.21.2", markers = "python_version >= \"3.10\""}, + {version = ">=1.21.4", markers = "python_version >= \"3.10\" and platform_system == \"Darwin\""}, + {version = ">=1.22.0", markers = "python_version >= \"3.11\""}, + {version = ">=1.19.3", markers = "python_version >= \"3.6\" and platform_system == \"Linux\" and platform_machine == \"aarch64\" or python_version >= \"3.9\""}, + {version = ">=1.17.0", markers = "python_version >= \"3.7\""}, + {version = ">=1.17.3", markers = "python_version >= \"3.8\""}, +] + +[[package]] +name = "overrides" +version = "7.3.1" +description = "A decorator to automatically detect mismatch when overriding a method." +optional = false +python-versions = ">=3.6" +files = [ + {file = "overrides-7.3.1-py3-none-any.whl", hash = "sha256:6187d8710a935d09b0bcef8238301d6ee2569d2ac1ae0ec39a8c7924e27f58ca"}, + {file = "overrides-7.3.1.tar.gz", hash = "sha256:8b97c6c1e1681b78cbc9424b138d880f0803c2254c5ebaabdde57bb6c62093f2"}, +] + +[[package]] +name = "packaging" +version = "23.1" +description = "Core utilities for Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "packaging-23.1-py3-none-any.whl", hash = "sha256:994793af429502c4ea2ebf6bf664629d07c1a9fe974af92966e4b8d2df7edc61"}, + {file = "packaging-23.1.tar.gz", hash = "sha256:a392980d2b6cffa644431898be54b0045151319d1e7ec34f0cfed48767dd334f"}, +] + +[[package]] +name = "pandas" +version = "2.0.2" +description = "Powerful data structures for data analysis, time series, and statistics" +optional = false +python-versions = ">=3.8" +files = [ + {file = "pandas-2.0.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:9ebb9f1c22ddb828e7fd017ea265a59d80461d5a79154b49a4207bd17514d122"}, + {file = "pandas-2.0.2-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:1eb09a242184092f424b2edd06eb2b99d06dc07eeddff9929e8667d4ed44e181"}, + {file = "pandas-2.0.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c7319b6e68de14e6209460f72a8d1ef13c09fb3d3ef6c37c1e65b35d50b5c145"}, + {file = "pandas-2.0.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:dd46bde7309088481b1cf9c58e3f0e204b9ff9e3244f441accd220dd3365ce7c"}, + {file = "pandas-2.0.2-cp310-cp310-win32.whl", hash = "sha256:51a93d422fbb1bd04b67639ba4b5368dffc26923f3ea32a275d2cc450f1d1c86"}, + {file = "pandas-2.0.2-cp310-cp310-win_amd64.whl", hash = "sha256:66d00300f188fa5de73f92d5725ced162488f6dc6ad4cecfe4144ca29debe3b8"}, + {file = "pandas-2.0.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:02755de164da6827764ceb3bbc5f64b35cb12394b1024fdf88704d0fa06e0e2f"}, + {file = "pandas-2.0.2-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:0a1e0576611641acde15c2322228d138258f236d14b749ad9af498ab69089e2d"}, + {file = "pandas-2.0.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a6b5f14cd24a2ed06e14255ff40fe2ea0cfaef79a8dd68069b7ace74bd6acbba"}, + {file = "pandas-2.0.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:50e451932b3011b61d2961b4185382c92cc8c6ee4658dcd4f320687bb2d000ee"}, + {file = "pandas-2.0.2-cp311-cp311-win32.whl", hash = "sha256:7b21cb72958fc49ad757685db1919021d99650d7aaba676576c9e88d3889d456"}, + {file = "pandas-2.0.2-cp311-cp311-win_amd64.whl", hash = "sha256:c4af689352c4fe3d75b2834933ee9d0ccdbf5d7a8a7264f0ce9524e877820c08"}, + {file = "pandas-2.0.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:69167693cb8f9b3fc060956a5d0a0a8dbfed5f980d9fd2c306fb5b9c855c814c"}, + {file = "pandas-2.0.2-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:30a89d0fec4263ccbf96f68592fd668939481854d2ff9da709d32a047689393b"}, + {file = "pandas-2.0.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:a18e5c72b989ff0f7197707ceddc99828320d0ca22ab50dd1b9e37db45b010c0"}, + {file = "pandas-2.0.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:7376e13d28eb16752c398ca1d36ccfe52bf7e887067af9a0474de6331dd948d2"}, + {file = "pandas-2.0.2-cp38-cp38-win32.whl", hash = "sha256:6d6d10c2142d11d40d6e6c0a190b1f89f525bcf85564707e31b0a39e3b398e08"}, + {file = "pandas-2.0.2-cp38-cp38-win_amd64.whl", hash = "sha256:e69140bc2d29a8556f55445c15f5794490852af3de0f609a24003ef174528b79"}, + {file = "pandas-2.0.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:b42b120458636a981077cfcfa8568c031b3e8709701315e2bfa866324a83efa8"}, + {file = "pandas-2.0.2-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:f908a77cbeef9bbd646bd4b81214cbef9ac3dda4181d5092a4aa9797d1bc7774"}, + {file = "pandas-2.0.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:713f2f70abcdade1ddd68fc91577cb090b3544b07ceba78a12f799355a13ee44"}, + {file = "pandas-2.0.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cf3f0c361a4270185baa89ec7ab92ecaa355fe783791457077473f974f654df5"}, + {file = "pandas-2.0.2-cp39-cp39-win32.whl", hash = "sha256:598e9020d85a8cdbaa1815eb325a91cfff2bb2b23c1442549b8a3668e36f0f77"}, + {file = "pandas-2.0.2-cp39-cp39-win_amd64.whl", hash = "sha256:77550c8909ebc23e56a89f91b40ad01b50c42cfbfab49b3393694a50549295ea"}, + {file = "pandas-2.0.2.tar.gz", hash = "sha256:dd5476b6c3fe410ee95926873f377b856dbc4e81a9c605a0dc05aaccc6a7c6c6"}, +] + +[package.dependencies] +numpy = [ + {version = ">=1.20.3", markers = "python_version < \"3.10\""}, + {version = ">=1.21.0", markers = "python_version >= \"3.10\""}, + {version = ">=1.23.2", markers = "python_version >= \"3.11\""}, +] +python-dateutil = ">=2.8.2" +pytz = ">=2020.1" +tzdata = ">=2022.1" + +[package.extras] +all = ["PyQt5 (>=5.15.1)", "SQLAlchemy (>=1.4.16)", "beautifulsoup4 (>=4.9.3)", "bottleneck (>=1.3.2)", "brotlipy (>=0.7.0)", "fastparquet (>=0.6.3)", "fsspec (>=2021.07.0)", "gcsfs (>=2021.07.0)", "html5lib (>=1.1)", "hypothesis (>=6.34.2)", "jinja2 (>=3.0.0)", "lxml (>=4.6.3)", "matplotlib (>=3.6.1)", "numba (>=0.53.1)", "numexpr (>=2.7.3)", "odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pandas-gbq (>=0.15.0)", "psycopg2 (>=2.8.6)", "pyarrow (>=7.0.0)", "pymysql (>=1.0.2)", "pyreadstat (>=1.1.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)", "python-snappy (>=0.6.0)", "pyxlsb (>=1.0.8)", "qtpy (>=2.2.0)", "s3fs (>=2021.08.0)", "scipy (>=1.7.1)", "tables (>=3.6.1)", "tabulate (>=0.8.9)", "xarray (>=0.21.0)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)", "zstandard (>=0.15.2)"] +aws = ["s3fs (>=2021.08.0)"] +clipboard = ["PyQt5 (>=5.15.1)", "qtpy (>=2.2.0)"] +compression = ["brotlipy (>=0.7.0)", "python-snappy (>=0.6.0)", "zstandard (>=0.15.2)"] +computation = ["scipy (>=1.7.1)", "xarray (>=0.21.0)"] +excel = ["odfpy (>=1.4.1)", "openpyxl (>=3.0.7)", "pyxlsb (>=1.0.8)", "xlrd (>=2.0.1)", "xlsxwriter (>=1.4.3)"] +feather = ["pyarrow (>=7.0.0)"] +fss = ["fsspec (>=2021.07.0)"] +gcp = ["gcsfs (>=2021.07.0)", "pandas-gbq (>=0.15.0)"] +hdf5 = ["tables (>=3.6.1)"] +html = ["beautifulsoup4 (>=4.9.3)", "html5lib (>=1.1)", "lxml (>=4.6.3)"] +mysql = ["SQLAlchemy (>=1.4.16)", "pymysql (>=1.0.2)"] +output-formatting = ["jinja2 (>=3.0.0)", "tabulate (>=0.8.9)"] +parquet = ["pyarrow (>=7.0.0)"] +performance = ["bottleneck (>=1.3.2)", "numba (>=0.53.1)", "numexpr (>=2.7.1)"] +plot = ["matplotlib (>=3.6.1)"] +postgresql = ["SQLAlchemy (>=1.4.16)", "psycopg2 (>=2.8.6)"] +spss = ["pyreadstat (>=1.1.2)"] +sql-other = ["SQLAlchemy (>=1.4.16)"] +test = ["hypothesis (>=6.34.2)", "pytest (>=7.0.0)", "pytest-asyncio (>=0.17.0)", "pytest-xdist (>=2.2.0)"] +xml = ["lxml (>=4.6.3)"] + +[[package]] +name = "pandocfilters" +version = "1.5.0" +description = "Utilities for writing pandoc filters in python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pandocfilters-1.5.0-py2.py3-none-any.whl", hash = "sha256:33aae3f25fd1a026079f5d27bdd52496f0e0803b3469282162bafdcbdf6ef14f"}, + {file = "pandocfilters-1.5.0.tar.gz", hash = "sha256:0b679503337d233b4339a817bfc8c50064e2eff681314376a47cb582305a7a38"}, +] + +[[package]] +name = "parso" +version = "0.8.3" +description = "A Python Parser" +optional = false +python-versions = ">=3.6" +files = [ + {file = "parso-0.8.3-py2.py3-none-any.whl", hash = "sha256:c001d4636cd3aecdaf33cbb40aebb59b094be2a74c556778ef5576c175e19e75"}, + {file = "parso-0.8.3.tar.gz", hash = "sha256:8c07be290bb59f03588915921e29e8a50002acaf2cdc5fa0e0114f91709fafa0"}, +] + +[package.extras] +qa = ["flake8 (==3.8.3)", "mypy (==0.782)"] +testing = ["docopt", "pytest (<6.0.0)"] + +[[package]] +name = "pexpect" +version = "4.8.0" +description = "Pexpect allows easy control of interactive console applications." +optional = false +python-versions = "*" +files = [ + {file = "pexpect-4.8.0-py2.py3-none-any.whl", hash = "sha256:0b48a55dcb3c05f3329815901ea4fc1537514d6ba867a152b581d69ae3710937"}, + {file = "pexpect-4.8.0.tar.gz", hash = "sha256:fc65a43959d153d0114afe13997d439c22823a27cefceb5ff35c2178c6784c0c"}, +] + +[package.dependencies] +ptyprocess = ">=0.5" + +[[package]] +name = "pickleshare" +version = "0.7.5" +description = "Tiny 'shelve'-like database with concurrency support" +optional = false +python-versions = "*" +files = [ + {file = "pickleshare-0.7.5-py2.py3-none-any.whl", hash = "sha256:9649af414d74d4df115d5d718f82acb59c9d418196b7b4290ed47a12ce62df56"}, + {file = "pickleshare-0.7.5.tar.gz", hash = "sha256:87683d47965c1da65cdacaf31c8441d12b8044cdec9aca500cd78fc2c683afca"}, +] + +[[package]] +name = "pillow" +version = "8.4.0" +description = "Python Imaging Library (Fork)" +optional = false +python-versions = ">=3.6" +files = [ + {file = "Pillow-8.4.0-cp310-cp310-macosx_10_10_universal2.whl", hash = "sha256:81f8d5c81e483a9442d72d182e1fb6dcb9723f289a57e8030811bac9ea3fef8d"}, + {file = "Pillow-8.4.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3f97cfb1e5a392d75dd8b9fd274d205404729923840ca94ca45a0af57e13dbe6"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:eb9fc393f3c61f9054e1ed26e6fe912c7321af2f41ff49d3f83d05bacf22cc78"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:d82cdb63100ef5eedb8391732375e6d05993b765f72cb34311fab92103314649"}, + {file = "Pillow-8.4.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:62cc1afda735a8d109007164714e73771b499768b9bb5afcbbee9d0ff374b43f"}, + {file = "Pillow-8.4.0-cp310-cp310-win32.whl", hash = "sha256:e3dacecfbeec9a33e932f00c6cd7996e62f53ad46fbe677577394aaa90ee419a"}, + {file = "Pillow-8.4.0-cp310-cp310-win_amd64.whl", hash = "sha256:620582db2a85b2df5f8a82ddeb52116560d7e5e6b055095f04ad828d1b0baa39"}, + {file = "Pillow-8.4.0-cp36-cp36m-macosx_10_10_x86_64.whl", hash = "sha256:1bc723b434fbc4ab50bb68e11e93ce5fb69866ad621e3c2c9bdb0cd70e345f55"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:72cbcfd54df6caf85cc35264c77ede902452d6df41166010262374155947460c"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:70ad9e5c6cb9b8487280a02c0ad8a51581dcbbe8484ce058477692a27c151c0a"}, + {file = "Pillow-8.4.0-cp36-cp36m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:25a49dc2e2f74e65efaa32b153527fc5ac98508d502fa46e74fa4fd678ed6645"}, + {file = "Pillow-8.4.0-cp36-cp36m-win32.whl", hash = "sha256:93ce9e955cc95959df98505e4608ad98281fff037350d8c2671c9aa86bcf10a9"}, + {file = "Pillow-8.4.0-cp36-cp36m-win_amd64.whl", hash = "sha256:2e4440b8f00f504ee4b53fe30f4e381aae30b0568193be305256b1462216feff"}, + {file = "Pillow-8.4.0-cp37-cp37m-macosx_10_10_x86_64.whl", hash = "sha256:8c803ac3c28bbc53763e6825746f05cc407b20e4a69d0122e526a582e3b5e153"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c8a17b5d948f4ceeceb66384727dde11b240736fddeda54ca740b9b8b1556b29"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:1394a6ad5abc838c5cd8a92c5a07535648cdf6d09e8e2d6df916dfa9ea86ead8"}, + {file = "Pillow-8.4.0-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:792e5c12376594bfcb986ebf3855aa4b7c225754e9a9521298e460e92fb4a488"}, + {file = "Pillow-8.4.0-cp37-cp37m-win32.whl", hash = "sha256:d99ec152570e4196772e7a8e4ba5320d2d27bf22fdf11743dd882936ed64305b"}, + {file = "Pillow-8.4.0-cp37-cp37m-win_amd64.whl", hash = "sha256:7b7017b61bbcdd7f6363aeceb881e23c46583739cb69a3ab39cb384f6ec82e5b"}, + {file = "Pillow-8.4.0-cp38-cp38-macosx_10_10_x86_64.whl", hash = "sha256:d89363f02658e253dbd171f7c3716a5d340a24ee82d38aab9183f7fdf0cdca49"}, + {file = "Pillow-8.4.0-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:0a0956fdc5defc34462bb1c765ee88d933239f9a94bc37d132004775241a7585"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5b7bb9de00197fb4261825c15551adf7605cf14a80badf1761d61e59da347779"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:72b9e656e340447f827885b8d7a15fc8c4e68d410dc2297ef6787eec0f0ea409"}, + {file = "Pillow-8.4.0-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:a5a4532a12314149d8b4e4ad8ff09dde7427731fcfa5917ff16d0291f13609df"}, + {file = "Pillow-8.4.0-cp38-cp38-win32.whl", hash = "sha256:82aafa8d5eb68c8463b6e9baeb4f19043bb31fefc03eb7b216b51e6a9981ae09"}, + {file = "Pillow-8.4.0-cp38-cp38-win_amd64.whl", hash = "sha256:066f3999cb3b070a95c3652712cffa1a748cd02d60ad7b4e485c3748a04d9d76"}, + {file = "Pillow-8.4.0-cp39-cp39-macosx_10_10_x86_64.whl", hash = "sha256:5503c86916d27c2e101b7f71c2ae2cddba01a2cf55b8395b0255fd33fa4d1f1a"}, + {file = "Pillow-8.4.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:4acc0985ddf39d1bc969a9220b51d94ed51695d455c228d8ac29fcdb25810e6e"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0b052a619a8bfcf26bd8b3f48f45283f9e977890263e4571f2393ed8898d331b"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:493cb4e415f44cd601fcec11c99836f707bb714ab03f5ed46ac25713baf0ff20"}, + {file = "Pillow-8.4.0-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:b8831cb7332eda5dc89b21a7bce7ef6ad305548820595033a4b03cf3091235ed"}, + {file = "Pillow-8.4.0-cp39-cp39-win32.whl", hash = "sha256:5e9ac5f66616b87d4da618a20ab0a38324dbe88d8a39b55be8964eb520021e02"}, + {file = "Pillow-8.4.0-cp39-cp39-win_amd64.whl", hash = "sha256:3eb1ce5f65908556c2d8685a8f0a6e989d887ec4057326f6c22b24e8a172c66b"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-macosx_10_10_x86_64.whl", hash = "sha256:ddc4d832a0f0b4c52fff973a0d44b6c99839a9d016fe4e6a1cb8f3eea96479c2"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:9a3e5ddc44c14042f0844b8cf7d2cd455f6cc80fd7f5eefbe657292cf601d9ad"}, + {file = "Pillow-8.4.0-pp36-pypy36_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c70e94281588ef053ae8998039610dbd71bc509e4acbc77ab59d7d2937b10698"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-macosx_10_10_x86_64.whl", hash = "sha256:3862b7256046fcd950618ed22d1d60b842e3a40a48236a5498746f21189afbbc"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:a4901622493f88b1a29bd30ec1a2f683782e57c3c16a2dbc7f2595ba01f639df"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:84c471a734240653a0ec91dec0996696eea227eafe72a33bd06c92697728046b"}, + {file = "Pillow-8.4.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:244cf3b97802c34c41905d22810846802a3329ddcb93ccc432870243211c79fc"}, + {file = "Pillow-8.4.0.tar.gz", hash = "sha256:b8e2f83c56e141920c39464b852de3719dfbfb6e3c99a2d8da0edf4fb33176ed"}, +] + +[[package]] +name = "pkgutil-resolve-name" +version = "1.3.10" +description = "Resolve a name to an object." +optional = false +python-versions = ">=3.6" +files = [ + {file = "pkgutil_resolve_name-1.3.10-py3-none-any.whl", hash = "sha256:ca27cc078d25c5ad71a9de0a7a330146c4e014c2462d9af19c6b828280649c5e"}, + {file = "pkgutil_resolve_name-1.3.10.tar.gz", hash = "sha256:357d6c9e6a755653cfd78893817c0853af365dd51ec97f3d358a819373bbd174"}, +] + +[[package]] +name = "platformdirs" +version = "3.8.0" +description = "A small Python package for determining appropriate platform-specific dirs, e.g. a \"user data dir\"." +optional = false +python-versions = ">=3.7" +files = [ + {file = "platformdirs-3.8.0-py3-none-any.whl", hash = "sha256:ca9ed98ce73076ba72e092b23d3c93ea6c4e186b3f1c3dad6edd98ff6ffcca2e"}, + {file = "platformdirs-3.8.0.tar.gz", hash = "sha256:b0cabcb11063d21a0b261d557acb0a9d2126350e63b70cdf7db6347baea456dc"}, +] + +[package.extras] +docs = ["furo (>=2023.5.20)", "proselint (>=0.13)", "sphinx (>=7.0.1)", "sphinx-autodoc-typehints (>=1.23,!=1.23.4)"] +test = ["appdirs (==1.4.4)", "covdefaults (>=2.3)", "pytest (>=7.3.1)", "pytest-cov (>=4.1)", "pytest-mock (>=3.10)"] + +[[package]] +name = "plotly" +version = "5.15.0" +description = "An open-source, interactive data visualization library for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "plotly-5.15.0-py2.py3-none-any.whl", hash = "sha256:3508876bbd6aefb8a692c21a7128ca87ce42498dd041efa5c933ee44b55aab24"}, + {file = "plotly-5.15.0.tar.gz", hash = "sha256:822eabe53997d5ebf23c77e1d1fcbf3bb6aa745eb05d532afd4b6f9a2e2ab02f"}, +] + +[package.dependencies] +packaging = "*" +tenacity = ">=6.2.0" + +[[package]] +name = "pluggy" +version = "1.2.0" +description = "plugin and hook calling mechanisms for python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pluggy-1.2.0-py3-none-any.whl", hash = "sha256:c2fd55a7d7a3863cba1a013e4e2414658b1d07b6bc57b3919e0c63c9abb99849"}, + {file = "pluggy-1.2.0.tar.gz", hash = "sha256:d12f0c4b579b15f5e054301bb226ee85eeeba08ffec228092f8defbaa3a4c4b3"}, +] + +[package.extras] +dev = ["pre-commit", "tox"] +testing = ["pytest", "pytest-benchmark"] + +[[package]] +name = "prometheus-client" +version = "0.17.0" +description = "Python client for the Prometheus monitoring system." +optional = false +python-versions = ">=3.6" +files = [ + {file = "prometheus_client-0.17.0-py3-none-any.whl", hash = "sha256:a77b708cf083f4d1a3fb3ce5c95b4afa32b9c521ae363354a4a910204ea095ce"}, + {file = "prometheus_client-0.17.0.tar.gz", hash = "sha256:9c3b26f1535945e85b8934fb374678d263137b78ef85f305b1156c7c881cd11b"}, +] + +[package.extras] +twisted = ["twisted"] + +[[package]] +name = "prompt-toolkit" +version = "3.0.38" +description = "Library for building powerful interactive command lines in Python" +optional = false +python-versions = ">=3.7.0" +files = [ + {file = "prompt_toolkit-3.0.38-py3-none-any.whl", hash = "sha256:45ea77a2f7c60418850331366c81cf6b5b9cf4c7fd34616f733c5427e6abbb1f"}, + {file = "prompt_toolkit-3.0.38.tar.gz", hash = "sha256:23ac5d50538a9a38c8bde05fecb47d0b403ecd0662857a86f886f798563d5b9b"}, +] + +[package.dependencies] +wcwidth = "*" + +[[package]] +name = "psutil" +version = "5.9.5" +description = "Cross-platform lib for process and system monitoring in Python." +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "psutil-5.9.5-cp27-cp27m-macosx_10_9_x86_64.whl", hash = "sha256:be8929ce4313f9f8146caad4272f6abb8bf99fc6cf59344a3167ecd74f4f203f"}, + {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_i686.whl", hash = "sha256:ab8ed1a1d77c95453db1ae00a3f9c50227ebd955437bcf2a574ba8adbf6a74d5"}, + {file = "psutil-5.9.5-cp27-cp27m-manylinux2010_x86_64.whl", hash = "sha256:4aef137f3345082a3d3232187aeb4ac4ef959ba3d7c10c33dd73763fbc063da4"}, + {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_i686.whl", hash = "sha256:ea8518d152174e1249c4f2a1c89e3e6065941df2fa13a1ab45327716a23c2b48"}, + {file = "psutil-5.9.5-cp27-cp27mu-manylinux2010_x86_64.whl", hash = "sha256:acf2aef9391710afded549ff602b5887d7a2349831ae4c26be7c807c0a39fac4"}, + {file = "psutil-5.9.5-cp27-none-win32.whl", hash = "sha256:5b9b8cb93f507e8dbaf22af6a2fd0ccbe8244bf30b1baad6b3954e935157ae3f"}, + {file = "psutil-5.9.5-cp27-none-win_amd64.whl", hash = "sha256:8c5f7c5a052d1d567db4ddd231a9d27a74e8e4a9c3f44b1032762bd7b9fdcd42"}, + {file = "psutil-5.9.5-cp36-abi3-macosx_10_9_x86_64.whl", hash = "sha256:3c6f686f4225553615612f6d9bc21f1c0e305f75d7d8454f9b46e901778e7217"}, + {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_i686.manylinux2010_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7a7dd9997128a0d928ed4fb2c2d57e5102bb6089027939f3b722f3a210f9a8da"}, + {file = "psutil-5.9.5-cp36-abi3-manylinux_2_12_x86_64.manylinux2010_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:89518112647f1276b03ca97b65cc7f64ca587b1eb0278383017c2a0dcc26cbe4"}, + {file = "psutil-5.9.5-cp36-abi3-win32.whl", hash = "sha256:104a5cc0e31baa2bcf67900be36acde157756b9c44017b86b2c049f11957887d"}, + {file = "psutil-5.9.5-cp36-abi3-win_amd64.whl", hash = "sha256:b258c0c1c9d145a1d5ceffab1134441c4c5113b2417fafff7315a917a026c3c9"}, + {file = "psutil-5.9.5-cp38-abi3-macosx_11_0_arm64.whl", hash = "sha256:c607bb3b57dc779d55e1554846352b4e358c10fff3abf3514a7a6601beebdb30"}, + {file = "psutil-5.9.5.tar.gz", hash = "sha256:5410638e4df39c54d957fc51ce03048acd8e6d60abc0f5107af51e5fb566eb3c"}, +] + +[package.extras] +test = ["enum34", "ipaddress", "mock", "pywin32", "wmi"] + +[[package]] +name = "ptyprocess" +version = "0.7.0" +description = "Run a subprocess in a pseudo terminal" +optional = false +python-versions = "*" +files = [ + {file = "ptyprocess-0.7.0-py2.py3-none-any.whl", hash = "sha256:4b41f3967fce3af57cc7e94b888626c18bf37a083e3651ca8feeb66d492fef35"}, + {file = "ptyprocess-0.7.0.tar.gz", hash = "sha256:5c5d0a3b48ceee0b48485e0c26037c0acd7d29765ca3fbb5cb3831d347423220"}, +] + +[[package]] +name = "pure-eval" +version = "0.2.2" +description = "Safely evaluate AST nodes without side effects" +optional = false +python-versions = "*" +files = [ + {file = "pure_eval-0.2.2-py3-none-any.whl", hash = "sha256:01eaab343580944bc56080ebe0a674b39ec44a945e6d09ba7db3cb8cec289350"}, + {file = "pure_eval-0.2.2.tar.gz", hash = "sha256:2b45320af6dfaa1750f543d714b6d1c520a1688dec6fd24d339063ce0aaa9ac3"}, +] + +[package.extras] +tests = ["pytest"] + +[[package]] +name = "pycocotools" +version = "2.0.6" +description = "Official APIs for the MS-COCO dataset" +optional = false +python-versions = ">=3.5" +files = [ + {file = "pycocotools-2.0.6.tar.gz", hash = "sha256:7fe089b05cc18e806dcf3bd764708d86dab922a100f3734eb77fb77a70a1d18c"}, +] + +[package.dependencies] +matplotlib = ">=2.1.0" +numpy = "*" + +[[package]] +name = "pycparser" +version = "2.21" +description = "C parser in Python" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*" +files = [ + {file = "pycparser-2.21-py2.py3-none-any.whl", hash = "sha256:8ee45429555515e1f6b185e78100aea234072576aa43ab53aefcae078162fca9"}, + {file = "pycparser-2.21.tar.gz", hash = "sha256:e644fdec12f7872f86c58ff790da456218b10f863970249516d60a5eaca77206"}, +] + +[[package]] +name = "pydantic" +version = "1.10.9" +description = "Data validation and settings management using python type hints" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pydantic-1.10.9-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:e692dec4a40bfb40ca530e07805b1208c1de071a18d26af4a2a0d79015b352ca"}, + {file = "pydantic-1.10.9-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:3c52eb595db83e189419bf337b59154bdcca642ee4b2a09e5d7797e41ace783f"}, + {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:939328fd539b8d0edf244327398a667b6b140afd3bf7e347cf9813c736211896"}, + {file = "pydantic-1.10.9-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:b48d3d634bca23b172f47f2335c617d3fcb4b3ba18481c96b7943a4c634f5c8d"}, + {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:f0b7628fb8efe60fe66fd4adadd7ad2304014770cdc1f4934db41fe46cc8825f"}, + {file = "pydantic-1.10.9-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:e1aa5c2410769ca28aa9a7841b80d9d9a1c5f223928ca8bec7e7c9a34d26b1d4"}, + {file = "pydantic-1.10.9-cp310-cp310-win_amd64.whl", hash = "sha256:eec39224b2b2e861259d6f3c8b6290d4e0fbdce147adb797484a42278a1a486f"}, + {file = "pydantic-1.10.9-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d111a21bbbfd85c17248130deac02bbd9b5e20b303338e0dbe0faa78330e37e0"}, + {file = "pydantic-1.10.9-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:2e9aec8627a1a6823fc62fb96480abe3eb10168fd0d859ee3d3b395105ae19a7"}, + {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:07293ab08e7b4d3c9d7de4949a0ea571f11e4557d19ea24dd3ae0c524c0c334d"}, + {file = "pydantic-1.10.9-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:7ee829b86ce984261d99ff2fd6e88f2230068d96c2a582f29583ed602ef3fc2c"}, + {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:4b466a23009ff5cdd7076eb56aca537c745ca491293cc38e72bf1e0e00de5b91"}, + {file = "pydantic-1.10.9-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:7847ca62e581e6088d9000f3c497267868ca2fa89432714e21a4fb33a04d52e8"}, + {file = "pydantic-1.10.9-cp311-cp311-win_amd64.whl", hash = "sha256:7845b31959468bc5b78d7b95ec52fe5be32b55d0d09983a877cca6aedc51068f"}, + {file = "pydantic-1.10.9-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:517a681919bf880ce1dac7e5bc0c3af1e58ba118fd774da2ffcd93c5f96eaece"}, + {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:67195274fd27780f15c4c372f4ba9a5c02dad6d50647b917b6a92bf00b3d301a"}, + {file = "pydantic-1.10.9-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2196c06484da2b3fded1ab6dbe182bdabeb09f6318b7fdc412609ee2b564c49a"}, + {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:6257bb45ad78abacda13f15bde5886efd6bf549dd71085e64b8dcf9919c38b60"}, + {file = "pydantic-1.10.9-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3283b574b01e8dbc982080d8287c968489d25329a463b29a90d4157de4f2baaf"}, + {file = "pydantic-1.10.9-cp37-cp37m-win_amd64.whl", hash = "sha256:5f8bbaf4013b9a50e8100333cc4e3fa2f81214033e05ac5aa44fa24a98670a29"}, + {file = "pydantic-1.10.9-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:b9cd67fb763248cbe38f0593cd8611bfe4b8ad82acb3bdf2b0898c23415a1f82"}, + {file = "pydantic-1.10.9-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:f50e1764ce9353be67267e7fd0da08349397c7db17a562ad036aa7c8f4adfdb6"}, + {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:73ef93e5e1d3c8e83f1ff2e7fdd026d9e063c7e089394869a6e2985696693766"}, + {file = "pydantic-1.10.9-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:128d9453d92e6e81e881dd7e2484e08d8b164da5507f62d06ceecf84bf2e21d3"}, + {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:ad428e92ab68798d9326bb3e5515bc927444a3d71a93b4a2ca02a8a5d795c572"}, + {file = "pydantic-1.10.9-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:fab81a92f42d6d525dd47ced310b0c3e10c416bbfae5d59523e63ea22f82b31e"}, + {file = "pydantic-1.10.9-cp38-cp38-win_amd64.whl", hash = "sha256:963671eda0b6ba6926d8fc759e3e10335e1dc1b71ff2a43ed2efd6996634dafb"}, + {file = "pydantic-1.10.9-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:970b1bdc6243ef663ba5c7e36ac9ab1f2bfecb8ad297c9824b542d41a750b298"}, + {file = "pydantic-1.10.9-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:7e1d5290044f620f80cf1c969c542a5468f3656de47b41aa78100c5baa2b8276"}, + {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:83fcff3c7df7adff880622a98022626f4f6dbce6639a88a15a3ce0f96466cb60"}, + {file = "pydantic-1.10.9-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:0da48717dc9495d3a8f215e0d012599db6b8092db02acac5e0d58a65248ec5bc"}, + {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:0a2aabdc73c2a5960e87c3ffebca6ccde88665616d1fd6d3db3178ef427b267a"}, + {file = "pydantic-1.10.9-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:9863b9420d99dfa9c064042304868e8ba08e89081428a1c471858aa2af6f57c4"}, + {file = "pydantic-1.10.9-cp39-cp39-win_amd64.whl", hash = "sha256:e7c9900b43ac14110efa977be3da28931ffc74c27e96ee89fbcaaf0b0fe338e1"}, + {file = "pydantic-1.10.9-py3-none-any.whl", hash = "sha256:6cafde02f6699ce4ff643417d1a9223716ec25e228ddc3b436fe7e2d25a1f305"}, + {file = "pydantic-1.10.9.tar.gz", hash = "sha256:95c70da2cd3b6ddf3b9645ecaa8d98f3d80c606624b6d245558d202cd23ea3be"}, +] + +[package.dependencies] +typing-extensions = ">=4.2.0" + +[package.extras] +dotenv = ["python-dotenv (>=0.10.4)"] +email = ["email-validator (>=1.0.3)"] + +[[package]] +name = "pygments" +version = "2.15.1" +description = "Pygments is a syntax highlighting package written in Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Pygments-2.15.1-py3-none-any.whl", hash = "sha256:db2db3deb4b4179f399a09054b023b6a586b76499d36965813c71aa8ed7b5fd1"}, + {file = "Pygments-2.15.1.tar.gz", hash = "sha256:8ace4d3c1dd481894b2005f560ead0f9f19ee64fe983366be1a21e171d12775c"}, +] + +[package.extras] +plugins = ["importlib-metadata"] + +[[package]] +name = "pyparsing" +version = "3.1.0" +description = "pyparsing module - Classes and methods to define and execute parsing grammars" +optional = false +python-versions = ">=3.6.8" +files = [ + {file = "pyparsing-3.1.0-py3-none-any.whl", hash = "sha256:d554a96d1a7d3ddaf7183104485bc19fd80543ad6ac5bdb6426719d766fb06c1"}, + {file = "pyparsing-3.1.0.tar.gz", hash = "sha256:edb662d6fe322d6e990b1594b5feaeadf806803359e3d4d42f11e295e588f0ea"}, +] + +[package.extras] +diagrams = ["jinja2", "railroad-diagrams"] + +[[package]] +name = "pyquaternion" +version = "0.9.9" +description = "A fully featured, pythonic library for representing and using quaternions." +optional = false +python-versions = "*" +files = [ + {file = "pyquaternion-0.9.9-py2-none-any.whl", hash = "sha256:d0eb69219ca99bfcbc25c1e2c4f82e58c61dce3e907e929f13c5f3615e4b6518"}, + {file = "pyquaternion-0.9.9-py3-none-any.whl", hash = "sha256:e65f6e3f7b1fdf1a9e23f82434334a1ae84f14223eee835190cd2e841f8172ec"}, + {file = "pyquaternion-0.9.9.tar.gz", hash = "sha256:b1f61af219cb2fe966b5fb79a192124f2e63a3f7a777ac3cadf2957b1a81bea8"}, +] + +[package.dependencies] +numpy = "*" + +[package.extras] +dev = ["mkdocs"] +test = ["nose"] + +[[package]] +name = "pyrsistent" +version = "0.19.3" +description = "Persistent/Functional/Immutable data structures" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pyrsistent-0.19.3-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:20460ac0ea439a3e79caa1dbd560344b64ed75e85d8703943e0b66c2a6150e4a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4c18264cb84b5e68e7085a43723f9e4c1fd1d935ab240ce02c0324a8e01ccb64"}, + {file = "pyrsistent-0.19.3-cp310-cp310-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:4b774f9288dda8d425adb6544e5903f1fb6c273ab3128a355c6b972b7df39dcf"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win32.whl", hash = "sha256:5a474fb80f5e0d6c9394d8db0fc19e90fa540b82ee52dba7d246a7791712f74a"}, + {file = "pyrsistent-0.19.3-cp310-cp310-win_amd64.whl", hash = "sha256:49c32f216c17148695ca0e02a5c521e28a4ee6c5089f97e34fe24163113722da"}, + {file = "pyrsistent-0.19.3-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:f0774bf48631f3a20471dd7c5989657b639fd2d285b861237ea9e82c36a415a9"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:3ab2204234c0ecd8b9368dbd6a53e83c3d4f3cab10ecaf6d0e772f456c442393"}, + {file = "pyrsistent-0.19.3-cp311-cp311-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:e42296a09e83028b3476f7073fcb69ffebac0e66dbbfd1bd847d61f74db30f19"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win32.whl", hash = "sha256:64220c429e42a7150f4bfd280f6f4bb2850f95956bde93c6fda1b70507af6ef3"}, + {file = "pyrsistent-0.19.3-cp311-cp311-win_amd64.whl", hash = "sha256:016ad1afadf318eb7911baa24b049909f7f3bb2c5b1ed7b6a8f21db21ea3faa8"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:c4db1bd596fefd66b296a3d5d943c94f4fac5bcd13e99bffe2ba6a759d959a28"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:aeda827381f5e5d65cced3024126529ddc4289d944f75e090572c77ceb19adbf"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:42ac0b2f44607eb92ae88609eda931a4f0dfa03038c44c772e07f43e738bcac9"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win32.whl", hash = "sha256:e8f2b814a3dc6225964fa03d8582c6e0b6650d68a232df41e3cc1b66a5d2f8d1"}, + {file = "pyrsistent-0.19.3-cp37-cp37m-win_amd64.whl", hash = "sha256:c9bb60a40a0ab9aba40a59f68214eed5a29c6274c83b2cc206a359c4a89fa41b"}, + {file = "pyrsistent-0.19.3-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:a2471f3f8693101975b1ff85ffd19bb7ca7dd7c38f8a81701f67d6b4f97b87d8"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:cc5d149f31706762c1f8bda2e8c4f8fead6e80312e3692619a75301d3dbb819a"}, + {file = "pyrsistent-0.19.3-cp38-cp38-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3311cb4237a341aa52ab8448c27e3a9931e2ee09561ad150ba94e4cfd3fc888c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win32.whl", hash = "sha256:f0e7c4b2f77593871e918be000b96c8107da48444d57005b6a6bc61fb4331b2c"}, + {file = "pyrsistent-0.19.3-cp38-cp38-win_amd64.whl", hash = "sha256:c147257a92374fde8498491f53ffa8f4822cd70c0d85037e09028e478cababb7"}, + {file = "pyrsistent-0.19.3-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:b735e538f74ec31378f5a1e3886a26d2ca6351106b4dfde376a26fc32a044edc"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:99abb85579e2165bd8522f0c0138864da97847875ecbd45f3e7e2af569bfc6f2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:3a8cb235fa6d3fd7aae6a4f1429bbb1fec1577d978098da1252f0489937786f3"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win32.whl", hash = "sha256:c74bed51f9b41c48366a286395c67f4e894374306b197e62810e0fdaf2364da2"}, + {file = "pyrsistent-0.19.3-cp39-cp39-win_amd64.whl", hash = "sha256:878433581fc23e906d947a6814336eee031a00e6defba224234169ae3d3d6a98"}, + {file = "pyrsistent-0.19.3-py3-none-any.whl", hash = "sha256:ccf0d6bd208f8111179f0c26fdf84ed7c3891982f2edaeae7422575f47e66b64"}, + {file = "pyrsistent-0.19.3.tar.gz", hash = "sha256:1a2994773706bbb4995c31a97bc94f1418314923bd1048c6d964837040376440"}, +] + +[[package]] +name = "pytest" +version = "7.3.2" +description = "pytest: simple powerful testing with Python" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-7.3.2-py3-none-any.whl", hash = "sha256:cdcbd012c9312258922f8cd3f1b62a6580fdced17db6014896053d47cddf9295"}, + {file = "pytest-7.3.2.tar.gz", hash = "sha256:ee990a3cc55ba808b80795a79944756f315c67c12b56abd3ac993a7b8c17030b"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "sys_platform == \"win32\""} +exceptiongroup = {version = ">=1.0.0rc8", markers = "python_version < \"3.11\""} +iniconfig = "*" +packaging = "*" +pluggy = ">=0.12,<2.0" +tomli = {version = ">=1.0.0", markers = "python_version < \"3.11\""} + +[package.extras] +testing = ["argcomplete", "attrs (>=19.2.0)", "hypothesis (>=3.56)", "mock", "nose", "pygments (>=2.7.2)", "requests", "setuptools", "xmlschema"] + +[[package]] +name = "pytest-asyncio" +version = "0.18.3" +description = "Pytest support for asyncio" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-asyncio-0.18.3.tar.gz", hash = "sha256:7659bdb0a9eb9c6e3ef992eef11a2b3e69697800ad02fb06374a210d85b29f91"}, + {file = "pytest_asyncio-0.18.3-1-py3-none-any.whl", hash = "sha256:16cf40bdf2b4fb7fc8e4b82bd05ce3fbcd454cbf7b92afc445fe299dabb88213"}, + {file = "pytest_asyncio-0.18.3-py3-none-any.whl", hash = "sha256:8fafa6c52161addfd41ee7ab35f11836c5a16ec208f93ee388f752bea3493a84"}, +] + +[package.dependencies] +pytest = ">=6.1.0" + +[package.extras] +testing = ["coverage (==6.2)", "flaky (>=3.5.0)", "hypothesis (>=5.7.1)", "mypy (==0.931)", "pytest-trio (>=0.7.0)"] + +[[package]] +name = "pytest-mock" +version = "3.11.1" +description = "Thin-wrapper around the mock package for easier use with pytest" +optional = false +python-versions = ">=3.7" +files = [ + {file = "pytest-mock-3.11.1.tar.gz", hash = "sha256:7f6b125602ac6d743e523ae0bfa71e1a697a2f5534064528c6ff84c2f7c2fc7f"}, + {file = "pytest_mock-3.11.1-py3-none-any.whl", hash = "sha256:21c279fff83d70763b05f8874cc9cfb3fcacd6d354247a976f9529d19f9acf39"}, +] + +[package.dependencies] +pytest = ">=5.0" + +[package.extras] +dev = ["pre-commit", "pytest-asyncio", "tox"] + +[[package]] +name = "python-dateutil" +version = "2.8.2" +description = "Extensions to the standard Python datetime module" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,>=2.7" +files = [ + {file = "python-dateutil-2.8.2.tar.gz", hash = "sha256:0123cacc1627ae19ddf3c27a5de5bd67ee4586fbdd6440d9748f8abb483d3e86"}, + {file = "python_dateutil-2.8.2-py2.py3-none-any.whl", hash = "sha256:961d03dc3453ebbc59dbdea9e4e11c5651520a876d0f4db161e8674aae935da9"}, +] + +[package.dependencies] +six = ">=1.5" + +[[package]] +name = "python-json-logger" +version = "2.0.7" +description = "A python library adding a json log formatter" +optional = false +python-versions = ">=3.6" +files = [ + {file = "python-json-logger-2.0.7.tar.gz", hash = "sha256:23e7ec02d34237c5aa1e29a070193a4ea87583bb4e7f8fd06d3de8264c4b2e1c"}, + {file = "python_json_logger-2.0.7-py3-none-any.whl", hash = "sha256:f380b826a991ebbe3de4d897aeec42760035ac760345e57b812938dc8b35e2bd"}, +] + +[[package]] +name = "pytz" +version = "2023.3" +description = "World timezone definitions, modern and historical" +optional = false +python-versions = "*" +files = [ + {file = "pytz-2023.3-py2.py3-none-any.whl", hash = "sha256:a151b3abb88eda1d4e34a9814df37de2a80e301e68ba0fd856fb9b46bfbbbffb"}, + {file = "pytz-2023.3.tar.gz", hash = "sha256:1d8ce29db189191fb55338ee6d0387d82ab59f3d00eac103412d64e0ebd0c588"}, +] + +[[package]] +name = "pywin32" +version = "306" +description = "Python for Window Extensions" +optional = false +python-versions = "*" +files = [ + {file = "pywin32-306-cp310-cp310-win32.whl", hash = "sha256:06d3420a5155ba65f0b72f2699b5bacf3109f36acbe8923765c22938a69dfc8d"}, + {file = "pywin32-306-cp310-cp310-win_amd64.whl", hash = "sha256:84f4471dbca1887ea3803d8848a1616429ac94a4a8d05f4bc9c5dcfd42ca99c8"}, + {file = "pywin32-306-cp311-cp311-win32.whl", hash = "sha256:e65028133d15b64d2ed8f06dd9fbc268352478d4f9289e69c190ecd6818b6407"}, + {file = "pywin32-306-cp311-cp311-win_amd64.whl", hash = "sha256:a7639f51c184c0272e93f244eb24dafca9b1855707d94c192d4a0b4c01e1100e"}, + {file = "pywin32-306-cp311-cp311-win_arm64.whl", hash = "sha256:70dba0c913d19f942a2db25217d9a1b726c278f483a919f1abfed79c9cf64d3a"}, + {file = "pywin32-306-cp312-cp312-win32.whl", hash = "sha256:383229d515657f4e3ed1343da8be101000562bf514591ff383ae940cad65458b"}, + {file = "pywin32-306-cp312-cp312-win_amd64.whl", hash = "sha256:37257794c1ad39ee9be652da0462dc2e394c8159dfd913a8a4e8eb6fd346da0e"}, + {file = "pywin32-306-cp312-cp312-win_arm64.whl", hash = "sha256:5821ec52f6d321aa59e2db7e0a35b997de60c201943557d108af9d4ae1ec7040"}, + {file = "pywin32-306-cp37-cp37m-win32.whl", hash = "sha256:1c73ea9a0d2283d889001998059f5eaaba3b6238f767c9cf2833b13e6a685f65"}, + {file = "pywin32-306-cp37-cp37m-win_amd64.whl", hash = "sha256:72c5f621542d7bdd4fdb716227be0dd3f8565c11b280be6315b06ace35487d36"}, + {file = "pywin32-306-cp38-cp38-win32.whl", hash = "sha256:e4c092e2589b5cf0d365849e73e02c391c1349958c5ac3e9d5ccb9a28e017b3a"}, + {file = "pywin32-306-cp38-cp38-win_amd64.whl", hash = "sha256:e8ac1ae3601bee6ca9f7cb4b5363bf1c0badb935ef243c4733ff9a393b1690c0"}, + {file = "pywin32-306-cp39-cp39-win32.whl", hash = "sha256:e25fd5b485b55ac9c057f67d94bc203f3f6595078d1fb3b458c9c28b7153a802"}, + {file = "pywin32-306-cp39-cp39-win_amd64.whl", hash = "sha256:39b61c15272833b5c329a2989999dcae836b1eed650252ab1b7bfbe1d59f30f4"}, +] + +[[package]] +name = "pywinpty" +version = "2.0.10" +description = "Pseudo terminal support for Windows from Python." +optional = false +python-versions = ">=3.7" +files = [ + {file = "pywinpty-2.0.10-cp310-none-win_amd64.whl", hash = "sha256:4c7d06ad10f6e92bc850a467f26d98f4f30e73d2fe5926536308c6ae0566bc16"}, + {file = "pywinpty-2.0.10-cp311-none-win_amd64.whl", hash = "sha256:7ffbd66310b83e42028fc9df7746118978d94fba8c1ebf15a7c1275fdd80b28a"}, + {file = "pywinpty-2.0.10-cp37-none-win_amd64.whl", hash = "sha256:38cb924f2778b5751ef91a75febd114776b3af0ae411bc667be45dd84fc881d3"}, + {file = "pywinpty-2.0.10-cp38-none-win_amd64.whl", hash = "sha256:902d79444b29ad1833b8d5c3c9aabdfd428f4f068504430df18074007c8c0de8"}, + {file = "pywinpty-2.0.10-cp39-none-win_amd64.whl", hash = "sha256:3c46aef80dd50979aff93de199e4a00a8ee033ba7a03cadf0a91fed45f0c39d7"}, + {file = "pywinpty-2.0.10.tar.gz", hash = "sha256:cdbb5694cf8c7242c2ecfaca35c545d31fa5d5814c3d67a4e628f803f680ebea"}, +] + +[[package]] +name = "pyyaml" +version = "6.0" +description = "YAML parser and emitter for Python" +optional = false +python-versions = ">=3.6" +files = [ + {file = "PyYAML-6.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:d4db7c7aef085872ef65a8fd7d6d09a14ae91f691dec3e87ee5ee0539d516f53"}, + {file = "PyYAML-6.0-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:9df7ed3b3d2e0ecfe09e14741b857df43adb5a3ddadc919a2d94fbdf78fea53c"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:77f396e6ef4c73fdc33a9157446466f1cff553d979bd00ecb64385760c6babdc"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:a80a78046a72361de73f8f395f1f1e49f956c6be882eed58505a15f3e430962b"}, + {file = "PyYAML-6.0-cp310-cp310-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:f84fbc98b019fef2ee9a1cb3ce93e3187a6df0b2538a651bfb890254ba9f90b5"}, + {file = "PyYAML-6.0-cp310-cp310-win32.whl", hash = "sha256:2cd5df3de48857ed0544b34e2d40e9fac445930039f3cfe4bcc592a1f836d513"}, + {file = "PyYAML-6.0-cp310-cp310-win_amd64.whl", hash = "sha256:daf496c58a8c52083df09b80c860005194014c3698698d1a57cbcfa182142a3a"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:d4b0ba9512519522b118090257be113b9468d804b19d63c71dbcf4a48fa32358"}, + {file = "PyYAML-6.0-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:81957921f441d50af23654aa6c5e5eaf9b06aba7f0a19c18a538dc7ef291c5a1"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:afa17f5bc4d1b10afd4466fd3a44dc0e245382deca5b3c353d8b757f9e3ecb8d"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:dbad0e9d368bb989f4515da330b88a057617d16b6a8245084f1b05400f24609f"}, + {file = "PyYAML-6.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:432557aa2c09802be39460360ddffd48156e30721f5e8d917f01d31694216782"}, + {file = "PyYAML-6.0-cp311-cp311-win32.whl", hash = "sha256:bfaef573a63ba8923503d27530362590ff4f576c626d86a9fed95822a8255fd7"}, + {file = "PyYAML-6.0-cp311-cp311-win_amd64.whl", hash = "sha256:01b45c0191e6d66c470b6cf1b9531a771a83c1c4208272ead47a3ae4f2f603bf"}, + {file = "PyYAML-6.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:897b80890765f037df3403d22bab41627ca8811ae55e9a722fd0392850ec4d86"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:50602afada6d6cbfad699b0c7bb50d5ccffa7e46a3d738092afddc1f9758427f"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:48c346915c114f5fdb3ead70312bd042a953a8ce5c7106d5bfb1a5254e47da92"}, + {file = "PyYAML-6.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:98c4d36e99714e55cfbaaee6dd5badbc9a1ec339ebfc3b1f52e293aee6bb71a4"}, + {file = "PyYAML-6.0-cp36-cp36m-win32.whl", hash = "sha256:0283c35a6a9fbf047493e3a0ce8d79ef5030852c51e9d911a27badfde0605293"}, + {file = "PyYAML-6.0-cp36-cp36m-win_amd64.whl", hash = "sha256:07751360502caac1c067a8132d150cf3d61339af5691fe9e87803040dbc5db57"}, + {file = "PyYAML-6.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:819b3830a1543db06c4d4b865e70ded25be52a2e0631ccd2f6a47a2822f2fd7c"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:473f9edb243cb1935ab5a084eb238d842fb8f404ed2193a915d1784b5a6b5fc0"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:0ce82d761c532fe4ec3f87fc45688bdd3a4c1dc5e0b4a19814b9009a29baefd4"}, + {file = "PyYAML-6.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:231710d57adfd809ef5d34183b8ed1eeae3f76459c18fb4a0b373ad56bedcdd9"}, + {file = "PyYAML-6.0-cp37-cp37m-win32.whl", hash = "sha256:c5687b8d43cf58545ade1fe3e055f70eac7a5a1a0bf42824308d868289a95737"}, + {file = "PyYAML-6.0-cp37-cp37m-win_amd64.whl", hash = "sha256:d15a181d1ecd0d4270dc32edb46f7cb7733c7c508857278d3d378d14d606db2d"}, + {file = "PyYAML-6.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:0b4624f379dab24d3725ffde76559cff63d9ec94e1736b556dacdfebe5ab6d4b"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:213c60cd50106436cc818accf5baa1aba61c0189ff610f64f4a3e8c6726218ba"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:9fa600030013c4de8165339db93d182b9431076eb98eb40ee068700c9c813e34"}, + {file = "PyYAML-6.0-cp38-cp38-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:277a0ef2981ca40581a47093e9e2d13b3f1fbbeffae064c1d21bfceba2030287"}, + {file = "PyYAML-6.0-cp38-cp38-win32.whl", hash = "sha256:d4eccecf9adf6fbcc6861a38015c2a64f38b9d94838ac1810a9023a0609e1b78"}, + {file = "PyYAML-6.0-cp38-cp38-win_amd64.whl", hash = "sha256:1e4747bc279b4f613a09eb64bba2ba602d8a6664c6ce6396a4d0cd413a50ce07"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:055d937d65826939cb044fc8c9b08889e8c743fdc6a32b33e2390f66013e449b"}, + {file = "PyYAML-6.0-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:e61ceaab6f49fb8bdfaa0f92c4b57bcfbea54c09277b1b4f7ac376bfb7a7c174"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:d67d839ede4ed1b28a4e8909735fc992a923cdb84e618544973d7dfc71540803"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_17_s390x.manylinux2014_s390x.whl", hash = "sha256:cba8c411ef271aa037d7357a2bc8f9ee8b58b9965831d9e51baf703280dc73d3"}, + {file = "PyYAML-6.0-cp39-cp39-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:40527857252b61eacd1d9af500c3337ba8deb8fc298940291486c465c8b46ec0"}, + {file = "PyYAML-6.0-cp39-cp39-win32.whl", hash = "sha256:b5b9eccad747aabaaffbc6064800670f0c297e52c12754eb1d976c57e4f74dcb"}, + {file = "PyYAML-6.0-cp39-cp39-win_amd64.whl", hash = "sha256:b3d267842bf12586ba6c734f89d1f5b871df0273157918b0ccefa29deb05c21c"}, + {file = "PyYAML-6.0.tar.gz", hash = "sha256:68fb519c14306fec9720a2a5b45bc9f0c8d1b9c72adf45c37baedfcd949c35a2"}, +] + +[[package]] +name = "pyzmq" +version = "25.1.0" +description = "Python bindings for 0MQ" +optional = false +python-versions = ">=3.6" +files = [ + {file = "pyzmq-25.1.0-cp310-cp310-macosx_10_15_universal2.whl", hash = "sha256:1a6169e69034eaa06823da6a93a7739ff38716142b3596c180363dee729d713d"}, + {file = "pyzmq-25.1.0-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:19d0383b1f18411d137d891cab567de9afa609b214de68b86e20173dc624c101"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f1e931d9a92f628858a50f5bdffdfcf839aebe388b82f9d2ccd5d22a38a789dc"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:97d984b1b2f574bc1bb58296d3c0b64b10e95e7026f8716ed6c0b86d4679843f"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:154bddda2a351161474b36dba03bf1463377ec226a13458725183e508840df89"}, + {file = "pyzmq-25.1.0-cp310-cp310-manylinux_2_28_x86_64.whl", hash = "sha256:cb6d161ae94fb35bb518b74bb06b7293299c15ba3bc099dccd6a5b7ae589aee3"}, + {file = "pyzmq-25.1.0-cp310-cp310-musllinux_1_1_aarch64.whl", hash = "sha256:90146ab578931e0e2826ee39d0c948d0ea72734378f1898939d18bc9c823fcf9"}, + {file = "pyzmq-25.1.0-cp310-cp310-musllinux_1_1_i686.whl", hash = "sha256:831ba20b660b39e39e5ac8603e8193f8fce1ee03a42c84ade89c36a251449d80"}, + {file = "pyzmq-25.1.0-cp310-cp310-musllinux_1_1_x86_64.whl", hash = "sha256:3a522510e3434e12aff80187144c6df556bb06fe6b9d01b2ecfbd2b5bfa5c60c"}, + {file = "pyzmq-25.1.0-cp310-cp310-win32.whl", hash = "sha256:be24a5867b8e3b9dd5c241de359a9a5217698ff616ac2daa47713ba2ebe30ad1"}, + {file = "pyzmq-25.1.0-cp310-cp310-win_amd64.whl", hash = "sha256:5693dcc4f163481cf79e98cf2d7995c60e43809e325b77a7748d8024b1b7bcba"}, + {file = "pyzmq-25.1.0-cp311-cp311-macosx_10_15_universal2.whl", hash = "sha256:13bbe36da3f8aaf2b7ec12696253c0bf6ffe05f4507985a8844a1081db6ec22d"}, + {file = "pyzmq-25.1.0-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:69511d604368f3dc58d4be1b0bad99b61ee92b44afe1cd9b7bd8c5e34ea8248a"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4a983c8694667fd76d793ada77fd36c8317e76aa66eec75be2653cef2ea72883"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:332616f95eb400492103ab9d542b69d5f0ff628b23129a4bc0a2fd48da6e4e0b"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:58416db767787aedbfd57116714aad6c9ce57215ffa1c3758a52403f7c68cff5"}, + {file = "pyzmq-25.1.0-cp311-cp311-manylinux_2_28_x86_64.whl", hash = "sha256:cad9545f5801a125f162d09ec9b724b7ad9b6440151b89645241d0120e119dcc"}, + {file = "pyzmq-25.1.0-cp311-cp311-musllinux_1_1_aarch64.whl", hash = "sha256:d6128d431b8dfa888bf51c22a04d48bcb3d64431caf02b3cb943269f17fd2994"}, + {file = "pyzmq-25.1.0-cp311-cp311-musllinux_1_1_i686.whl", hash = "sha256:2b15247c49d8cbea695b321ae5478d47cffd496a2ec5ef47131a9e79ddd7e46c"}, + {file = "pyzmq-25.1.0-cp311-cp311-musllinux_1_1_x86_64.whl", hash = "sha256:442d3efc77ca4d35bee3547a8e08e8d4bb88dadb54a8377014938ba98d2e074a"}, + {file = "pyzmq-25.1.0-cp311-cp311-win32.whl", hash = "sha256:65346f507a815a731092421d0d7d60ed551a80d9b75e8b684307d435a5597425"}, + {file = "pyzmq-25.1.0-cp311-cp311-win_amd64.whl", hash = "sha256:8b45d722046fea5a5694cba5d86f21f78f0052b40a4bbbbf60128ac55bfcc7b6"}, + {file = "pyzmq-25.1.0-cp36-cp36m-macosx_10_9_x86_64.whl", hash = "sha256:f45808eda8b1d71308c5416ef3abe958f033fdbb356984fabbfc7887bed76b3f"}, + {file = "pyzmq-25.1.0-cp36-cp36m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8b697774ea8273e3c0460cf0bba16cd85ca6c46dfe8b303211816d68c492e132"}, + {file = "pyzmq-25.1.0-cp36-cp36m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:b324fa769577fc2c8f5efcd429cef5acbc17d63fe15ed16d6dcbac2c5eb00849"}, + {file = "pyzmq-25.1.0-cp36-cp36m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:5873d6a60b778848ce23b6c0ac26c39e48969823882f607516b91fb323ce80e5"}, + {file = "pyzmq-25.1.0-cp36-cp36m-musllinux_1_1_aarch64.whl", hash = "sha256:f0d9e7ba6a815a12c8575ba7887da4b72483e4cfc57179af10c9b937f3f9308f"}, + {file = "pyzmq-25.1.0-cp36-cp36m-musllinux_1_1_i686.whl", hash = "sha256:414b8beec76521358b49170db7b9967d6974bdfc3297f47f7d23edec37329b00"}, + {file = "pyzmq-25.1.0-cp36-cp36m-musllinux_1_1_x86_64.whl", hash = "sha256:01f06f33e12497dca86353c354461f75275a5ad9eaea181ac0dc1662da8074fa"}, + {file = "pyzmq-25.1.0-cp36-cp36m-win32.whl", hash = "sha256:b5a07c4f29bf7cb0164664ef87e4aa25435dcc1f818d29842118b0ac1eb8e2b5"}, + {file = "pyzmq-25.1.0-cp36-cp36m-win_amd64.whl", hash = "sha256:968b0c737797c1809ec602e082cb63e9824ff2329275336bb88bd71591e94a90"}, + {file = "pyzmq-25.1.0-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:47b915ba666c51391836d7ed9a745926b22c434efa76c119f77bcffa64d2c50c"}, + {file = "pyzmq-25.1.0-cp37-cp37m-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:5af31493663cf76dd36b00dafbc839e83bbca8a0662931e11816d75f36155897"}, + {file = "pyzmq-25.1.0-cp37-cp37m-manylinux_2_5_i686.manylinux1_i686.whl", hash = "sha256:5489738a692bc7ee9a0a7765979c8a572520d616d12d949eaffc6e061b82b4d1"}, + {file = "pyzmq-25.1.0-cp37-cp37m-manylinux_2_5_x86_64.manylinux1_x86_64.whl", hash = "sha256:1fc56a0221bdf67cfa94ef2d6ce5513a3d209c3dfd21fed4d4e87eca1822e3a3"}, + {file = "pyzmq-25.1.0-cp37-cp37m-musllinux_1_1_aarch64.whl", hash = "sha256:75217e83faea9edbc29516fc90c817bc40c6b21a5771ecb53e868e45594826b0"}, + {file = "pyzmq-25.1.0-cp37-cp37m-musllinux_1_1_i686.whl", hash = "sha256:3830be8826639d801de9053cf86350ed6742c4321ba4236e4b5568528d7bfed7"}, + {file = "pyzmq-25.1.0-cp37-cp37m-musllinux_1_1_x86_64.whl", hash = "sha256:3575699d7fd7c9b2108bc1c6128641a9a825a58577775ada26c02eb29e09c517"}, + {file = "pyzmq-25.1.0-cp37-cp37m-win32.whl", hash = "sha256:95bd3a998d8c68b76679f6b18f520904af5204f089beebb7b0301d97704634dd"}, + {file = "pyzmq-25.1.0-cp37-cp37m-win_amd64.whl", hash = "sha256:dbc466744a2db4b7ca05589f21ae1a35066afada2f803f92369f5877c100ef62"}, + {file = "pyzmq-25.1.0-cp38-cp38-macosx_10_15_universal2.whl", hash = "sha256:3bed53f7218490c68f0e82a29c92335daa9606216e51c64f37b48eb78f1281f4"}, + {file = "pyzmq-25.1.0-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:eb52e826d16c09ef87132c6e360e1879c984f19a4f62d8a935345deac43f3c12"}, + {file = "pyzmq-25.1.0-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:ddbef8b53cd16467fdbfa92a712eae46dd066aa19780681a2ce266e88fbc7165"}, + {file = "pyzmq-25.1.0-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:9301cf1d7fc1ddf668d0abbe3e227fc9ab15bc036a31c247276012abb921b5ff"}, + {file = "pyzmq-25.1.0-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7e23a8c3b6c06de40bdb9e06288180d630b562db8ac199e8cc535af81f90e64b"}, + {file = "pyzmq-25.1.0-cp38-cp38-musllinux_1_1_aarch64.whl", hash = "sha256:4a82faae00d1eed4809c2f18b37f15ce39a10a1c58fe48b60ad02875d6e13d80"}, + {file = "pyzmq-25.1.0-cp38-cp38-musllinux_1_1_i686.whl", hash = "sha256:c8398a1b1951aaa330269c35335ae69744be166e67e0ebd9869bdc09426f3871"}, + {file = "pyzmq-25.1.0-cp38-cp38-musllinux_1_1_x86_64.whl", hash = "sha256:d40682ac60b2a613d36d8d3a0cd14fbdf8e7e0618fbb40aa9fa7b796c9081584"}, + {file = "pyzmq-25.1.0-cp38-cp38-win32.whl", hash = "sha256:33d5c8391a34d56224bccf74f458d82fc6e24b3213fc68165c98b708c7a69325"}, + {file = "pyzmq-25.1.0-cp38-cp38-win_amd64.whl", hash = "sha256:c66b7ff2527e18554030319b1376d81560ca0742c6e0b17ff1ee96624a5f1afd"}, + {file = "pyzmq-25.1.0-cp39-cp39-macosx_10_15_universal2.whl", hash = "sha256:af56229ea6527a849ac9fb154a059d7e32e77a8cba27e3e62a1e38d8808cb1a5"}, + {file = "pyzmq-25.1.0-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bdca18b94c404af6ae5533cd1bc310c4931f7ac97c148bbfd2cd4bdd62b96253"}, + {file = "pyzmq-25.1.0-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0b6b42f7055bbc562f63f3df3b63e3dd1ebe9727ff0f124c3aa7bcea7b3a00f9"}, + {file = "pyzmq-25.1.0-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:4c2fc7aad520a97d64ffc98190fce6b64152bde57a10c704b337082679e74f67"}, + {file = "pyzmq-25.1.0-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:be86a26415a8b6af02cd8d782e3a9ae3872140a057f1cadf0133de685185c02b"}, + {file = "pyzmq-25.1.0-cp39-cp39-musllinux_1_1_aarch64.whl", hash = "sha256:851fb2fe14036cfc1960d806628b80276af5424db09fe5c91c726890c8e6d943"}, + {file = "pyzmq-25.1.0-cp39-cp39-musllinux_1_1_i686.whl", hash = "sha256:2a21fec5c3cea45421a19ccbe6250c82f97af4175bc09de4d6dd78fb0cb4c200"}, + {file = "pyzmq-25.1.0-cp39-cp39-musllinux_1_1_x86_64.whl", hash = "sha256:bad172aba822444b32eae54c2d5ab18cd7dee9814fd5c7ed026603b8cae2d05f"}, + {file = "pyzmq-25.1.0-cp39-cp39-win32.whl", hash = "sha256:4d67609b37204acad3d566bb7391e0ecc25ef8bae22ff72ebe2ad7ffb7847158"}, + {file = "pyzmq-25.1.0-cp39-cp39-win_amd64.whl", hash = "sha256:71c7b5896e40720d30cd77a81e62b433b981005bbff0cb2f739e0f8d059b5d99"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-macosx_10_9_x86_64.whl", hash = "sha256:4cb27ef9d3bdc0c195b2dc54fcb8720e18b741624686a81942e14c8b67cc61a6"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:0c4fc2741e0513b5d5a12fe200d6785bbcc621f6f2278893a9ca7bed7f2efb7d"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:fc34fdd458ff77a2a00e3c86f899911f6f269d393ca5675842a6e92eea565bae"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:8751f9c1442624da391bbd92bd4b072def6d7702a9390e4479f45c182392ff78"}, + {file = "pyzmq-25.1.0-pp37-pypy37_pp73-win_amd64.whl", hash = "sha256:6581e886aec3135964a302a0f5eb68f964869b9efd1dbafdebceaaf2934f8a68"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-macosx_10_9_x86_64.whl", hash = "sha256:5482f08d2c3c42b920e8771ae8932fbaa0a67dff925fc476996ddd8155a170f3"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:5e7fbcafa3ea16d1de1f213c226005fea21ee16ed56134b75b2dede5a2129e62"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:adecf6d02b1beab8d7c04bc36f22bb0e4c65a35eb0b4750b91693631d4081c70"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:f6d39e42a0aa888122d1beb8ec0d4ddfb6c6b45aecb5ba4013c27e2f28657765"}, + {file = "pyzmq-25.1.0-pp38-pypy38_pp73-win_amd64.whl", hash = "sha256:7018289b402ebf2b2c06992813523de61d4ce17bd514c4339d8f27a6f6809492"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-macosx_10_9_x86_64.whl", hash = "sha256:9e68ae9864d260b18f311b68d29134d8776d82e7f5d75ce898b40a88df9db30f"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:e21cc00e4debe8f54c3ed7b9fcca540f46eee12762a9fa56feb8512fd9057161"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:2f666ae327a6899ff560d741681fdcdf4506f990595201ed39b44278c471ad98"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2f5efcc29056dfe95e9c9db0dfbb12b62db9c4ad302f812931b6d21dd04a9119"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-manylinux_2_28_x86_64.whl", hash = "sha256:48e5e59e77c1a83162ab3c163fc01cd2eebc5b34560341a67421b09be0891287"}, + {file = "pyzmq-25.1.0-pp39-pypy39_pp73-win_amd64.whl", hash = "sha256:108c96ebbd573d929740d66e4c3d1bdf31d5cde003b8dc7811a3c8c5b0fc173b"}, + {file = "pyzmq-25.1.0.tar.gz", hash = "sha256:80c41023465d36280e801564a69cbfce8ae85ff79b080e1913f6e90481fb8957"}, +] + +[package.dependencies] +cffi = {version = "*", markers = "implementation_name == \"pypy\""} + +[[package]] +name = "qtconsole" +version = "5.4.3" +description = "Jupyter Qt console" +optional = false +python-versions = ">= 3.7" +files = [ + {file = "qtconsole-5.4.3-py3-none-any.whl", hash = "sha256:35fd6e87b1f6d1fd41801b07e69339f8982e76afd4fa8ef35595bc6036717189"}, + {file = "qtconsole-5.4.3.tar.gz", hash = "sha256:5e4082a86a201796b2a5cfd4298352d22b158b51b57736531824715fc2a979dd"}, +] + +[package.dependencies] +ipykernel = ">=4.1" +ipython-genutils = "*" +jupyter-client = ">=4.1" +jupyter-core = "*" +packaging = "*" +pygments = "*" +pyzmq = ">=17.1" +qtpy = ">=2.0.1" +traitlets = "<5.2.1 || >5.2.1,<5.2.2 || >5.2.2" + +[package.extras] +doc = ["Sphinx (>=1.3)"] +test = ["flaky", "pytest", "pytest-qt"] + +[[package]] +name = "qtpy" +version = "2.3.1" +description = "Provides an abstraction layer on top of the various Qt bindings (PyQt5/6 and PySide2/6)." +optional = false +python-versions = ">=3.7" +files = [ + {file = "QtPy-2.3.1-py3-none-any.whl", hash = "sha256:5193d20e0b16e4d9d3bc2c642d04d9f4e2c892590bd1b9c92bfe38a95d5a2e12"}, + {file = "QtPy-2.3.1.tar.gz", hash = "sha256:a8c74982d6d172ce124d80cafd39653df78989683f760f2281ba91a6e7b9de8b"}, +] + +[package.dependencies] +packaging = "*" + +[package.extras] +test = ["pytest (>=6,!=7.0.0,!=7.0.1)", "pytest-cov (>=3.0.0)", "pytest-qt"] + +[[package]] +name = "requests" +version = "2.31.0" +description = "Python HTTP for Humans." +optional = false +python-versions = ">=3.7" +files = [ + {file = "requests-2.31.0-py3-none-any.whl", hash = "sha256:58cd2187c01e70e6e26505bca751777aa9f2ee0b7f4300988b709f44e013003f"}, + {file = "requests-2.31.0.tar.gz", hash = "sha256:942c5a758f98d790eaed1a29cb6eefc7ffb0d1cf7af05c3d2791656dbd6ad1e1"}, +] + +[package.dependencies] +certifi = ">=2017.4.17" +charset-normalizer = ">=2,<4" +idna = ">=2.5,<4" +urllib3 = ">=1.21.1,<3" + +[package.extras] +socks = ["PySocks (>=1.5.6,!=1.5.7)"] +use-chardet-on-py3 = ["chardet (>=3.0.2,<6)"] + +[[package]] +name = "rfc3339-validator" +version = "0.1.4" +description = "A pure python RFC3339 validator" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "rfc3339_validator-0.1.4-py2.py3-none-any.whl", hash = "sha256:24f6ec1eda14ef823da9e36ec7113124b39c04d50a4d3d3a3c2859577e7791fa"}, + {file = "rfc3339_validator-0.1.4.tar.gz", hash = "sha256:138a2abdf93304ad60530167e51d2dfb9549521a836871b88d7f4695d0022f6b"}, +] + +[package.dependencies] +six = "*" + +[[package]] +name = "rfc3986-validator" +version = "0.1.1" +description = "Pure python rfc3986 validator" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*, !=3.4.*" +files = [ + {file = "rfc3986_validator-0.1.1-py2.py3-none-any.whl", hash = "sha256:2f235c432ef459970b4306369336b9d5dbdda31b510ca1e327636e01f528bfa9"}, + {file = "rfc3986_validator-0.1.1.tar.gz", hash = "sha256:3d44bde7921b3b9ec3ae4e3adca370438eccebc676456449b145d533b240d055"}, +] + +[[package]] +name = "scikit-learn" +version = "1.2.2" +description = "A set of python modules for machine learning and data mining" +optional = false +python-versions = ">=3.8" +files = [ + {file = "scikit-learn-1.2.2.tar.gz", hash = "sha256:8429aea30ec24e7a8c7ed8a3fa6213adf3814a6efbea09e16e0a0c71e1a1a3d7"}, + {file = "scikit_learn-1.2.2-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:99cc01184e347de485bf253d19fcb3b1a3fb0ee4cea5ee3c43ec0cc429b6d29f"}, + {file = "scikit_learn-1.2.2-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:e6e574db9914afcb4e11ade84fab084536a895ca60aadea3041e85b8ac963edb"}, + {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:6fe83b676f407f00afa388dd1fdd49e5c6612e551ed84f3b1b182858f09e987d"}, + {file = "scikit_learn-1.2.2-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:2e2642baa0ad1e8f8188917423dd73994bf25429f8893ddbe115be3ca3183584"}, + {file = "scikit_learn-1.2.2-cp310-cp310-win_amd64.whl", hash = "sha256:ad66c3848c0a1ec13464b2a95d0a484fd5b02ce74268eaa7e0c697b904f31d6c"}, + {file = "scikit_learn-1.2.2-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:dfeaf8be72117eb61a164ea6fc8afb6dfe08c6f90365bde2dc16456e4bc8e45f"}, + {file = "scikit_learn-1.2.2-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:fe0aa1a7029ed3e1dcbf4a5bc675aa3b1bc468d9012ecf6c6f081251ca47f590"}, + {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:065e9673e24e0dc5113e2dd2b4ca30c9d8aa2fa90f4c0597241c93b63130d233"}, + {file = "scikit_learn-1.2.2-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:bf036ea7ef66115e0d49655f16febfa547886deba20149555a41d28f56fd6d3c"}, + {file = "scikit_learn-1.2.2-cp311-cp311-win_amd64.whl", hash = "sha256:8b0670d4224a3c2d596fd572fb4fa673b2a0ccfb07152688ebd2ea0b8c61025c"}, + {file = "scikit_learn-1.2.2-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:9c710ff9f9936ba8a3b74a455ccf0dcf59b230caa1e9ba0223773c490cab1e51"}, + {file = "scikit_learn-1.2.2-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:2dd3ffd3950e3d6c0c0ef9033a9b9b32d910c61bd06cb8206303fb4514b88a49"}, + {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:44b47a305190c28dd8dd73fc9445f802b6ea716669cfc22ab1eb97b335d238b1"}, + {file = "scikit_learn-1.2.2-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:953236889928d104c2ef14027539f5f2609a47ebf716b8cbe4437e85dce42744"}, + {file = "scikit_learn-1.2.2-cp38-cp38-win_amd64.whl", hash = "sha256:7f69313884e8eb311460cc2f28676d5e400bd929841a2c8eb8742ae78ebf7c20"}, + {file = "scikit_learn-1.2.2-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:8156db41e1c39c69aa2d8599ab7577af53e9e5e7a57b0504e116cc73c39138dd"}, + {file = "scikit_learn-1.2.2-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:fe175ee1dab589d2e1033657c5b6bec92a8a3b69103e3dd361b58014729975c3"}, + {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:7d5312d9674bed14f73773d2acf15a3272639b981e60b72c9b190a0cffed5bad"}, + {file = "scikit_learn-1.2.2-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ea061bf0283bf9a9f36ea3c5d3231ba2176221bbd430abd2603b1c3b2ed85c89"}, + {file = "scikit_learn-1.2.2-cp39-cp39-win_amd64.whl", hash = "sha256:6477eed40dbce190f9f9e9d0d37e020815825b300121307942ec2110302b66a3"}, +] + +[package.dependencies] +joblib = ">=1.1.1" +numpy = ">=1.17.3" +scipy = ">=1.3.2" +threadpoolctl = ">=2.0.0" + +[package.extras] +benchmark = ["matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "pandas (>=1.0.5)"] +docs = ["Pillow (>=7.1.2)", "matplotlib (>=3.1.3)", "memory-profiler (>=0.57.0)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)", "sphinx (>=4.0.1)", "sphinx-gallery (>=0.7.0)", "sphinx-prompt (>=1.3.0)", "sphinxext-opengraph (>=0.4.2)"] +examples = ["matplotlib (>=3.1.3)", "pandas (>=1.0.5)", "plotly (>=5.10.0)", "pooch (>=1.6.0)", "scikit-image (>=0.16.2)", "seaborn (>=0.9.0)"] +tests = ["black (>=22.3.0)", "flake8 (>=3.8.2)", "matplotlib (>=3.1.3)", "mypy (>=0.961)", "numpydoc (>=1.2.0)", "pandas (>=1.0.5)", "pooch (>=1.6.0)", "pyamg (>=4.0.0)", "pytest (>=5.3.1)", "pytest-cov (>=2.9.0)", "scikit-image (>=0.16.2)"] + +[[package]] +name = "scipy" +version = "1.9.3" +description = "Fundamental algorithms for scientific computing in Python" +optional = false +python-versions = ">=3.8" +files = [ + {file = "scipy-1.9.3-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1884b66a54887e21addf9c16fb588720a8309a57b2e258ae1c7986d4444d3bc0"}, + {file = "scipy-1.9.3-cp310-cp310-macosx_12_0_arm64.whl", hash = "sha256:83b89e9586c62e787f5012e8475fbb12185bafb996a03257e9675cd73d3736dd"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:1a72d885fa44247f92743fc20732ae55564ff2a519e8302fb7e18717c5355a8b"}, + {file = "scipy-1.9.3-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:d01e1dd7b15bd2449c8bfc6b7cc67d630700ed655654f0dfcf121600bad205c9"}, + {file = "scipy-1.9.3-cp310-cp310-win_amd64.whl", hash = "sha256:68239b6aa6f9c593da8be1509a05cb7f9efe98b80f43a5861cd24c7557e98523"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:b41bc822679ad1c9a5f023bc93f6d0543129ca0f37c1ce294dd9d386f0a21096"}, + {file = "scipy-1.9.3-cp311-cp311-macosx_12_0_arm64.whl", hash = "sha256:90453d2b93ea82a9f434e4e1cba043e779ff67b92f7a0e85d05d286a3625df3c"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:83c06e62a390a9167da60bedd4575a14c1f58ca9dfde59830fc42e5197283dab"}, + {file = "scipy-1.9.3-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:abaf921531b5aeaafced90157db505e10345e45038c39e5d9b6c7922d68085cb"}, + {file = "scipy-1.9.3-cp311-cp311-win_amd64.whl", hash = "sha256:06d2e1b4c491dc7d8eacea139a1b0b295f74e1a1a0f704c375028f8320d16e31"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:5a04cd7d0d3eff6ea4719371cbc44df31411862b9646db617c99718ff68d4840"}, + {file = "scipy-1.9.3-cp38-cp38-macosx_12_0_arm64.whl", hash = "sha256:545c83ffb518094d8c9d83cce216c0c32f8c04aaf28b92cc8283eda0685162d5"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:0d54222d7a3ba6022fdf5773931b5d7c56efe41ede7f7128c7b1637700409108"}, + {file = "scipy-1.9.3-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:cff3a5295234037e39500d35316a4c5794739433528310e117b8a9a0c76d20fc"}, + {file = "scipy-1.9.3-cp38-cp38-win_amd64.whl", hash = "sha256:2318bef588acc7a574f5bfdff9c172d0b1bf2c8143d9582e05f878e580a3781e"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:d644a64e174c16cb4b2e41dfea6af722053e83d066da7343f333a54dae9bc31c"}, + {file = "scipy-1.9.3-cp39-cp39-macosx_12_0_arm64.whl", hash = "sha256:da8245491d73ed0a994ed9c2e380fd058ce2fa8a18da204681f2fe1f57f98f95"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:4db5b30849606a95dcf519763dd3ab6fe9bd91df49eba517359e450a7d80ce2e"}, + {file = "scipy-1.9.3-cp39-cp39-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:c68db6b290cbd4049012990d7fe71a2abd9ffbe82c0056ebe0f01df8be5436b0"}, + {file = "scipy-1.9.3-cp39-cp39-win_amd64.whl", hash = "sha256:5b88e6d91ad9d59478fafe92a7c757d00c59e3bdc3331be8ada76a4f8d683f58"}, + {file = "scipy-1.9.3.tar.gz", hash = "sha256:fbc5c05c85c1a02be77b1ff591087c83bc44579c6d2bd9fb798bb64ea5e1a027"}, +] + +[package.dependencies] +numpy = ">=1.18.5,<1.26.0" + +[package.extras] +dev = ["flake8", "mypy", "pycodestyle", "typing_extensions"] +doc = ["matplotlib (>2)", "numpydoc", "pydata-sphinx-theme (==0.9.0)", "sphinx (!=4.1.0)", "sphinx-panels (>=0.5.2)", "sphinx-tabs"] +test = ["asv", "gmpy2", "mpmath", "pytest", "pytest-cov", "pytest-xdist", "scikit-umfpack", "threadpoolctl"] + +[[package]] +name = "send2trash" +version = "1.8.2" +description = "Send file to trash natively under Mac OS X, Windows and Linux" +optional = false +python-versions = "!=3.0.*,!=3.1.*,!=3.2.*,!=3.3.*,!=3.4.*,>=2.7" +files = [ + {file = "Send2Trash-1.8.2-py3-none-any.whl", hash = "sha256:a384719d99c07ce1eefd6905d2decb6f8b7ed054025bb0e618919f945de4f679"}, + {file = "Send2Trash-1.8.2.tar.gz", hash = "sha256:c132d59fa44b9ca2b1699af5c86f57ce9f4c5eb56629d5d55fbb7a35f84e2312"}, +] + +[package.extras] +nativelib = ["pyobjc-framework-Cocoa", "pywin32"] +objc = ["pyobjc-framework-Cocoa"] +win32 = ["pywin32"] + +[[package]] +name = "setuptools" +version = "68.0.0" +description = "Easily download, build, install, upgrade, and uninstall Python packages" +optional = false +python-versions = ">=3.7" +files = [ + {file = "setuptools-68.0.0-py3-none-any.whl", hash = "sha256:11e52c67415a381d10d6b462ced9cfb97066179f0e871399e006c4ab101fc85f"}, + {file = "setuptools-68.0.0.tar.gz", hash = "sha256:baf1fdb41c6da4cd2eae722e135500da913332ab3f2f5c7d33af9b492acb5235"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "pygments-github-lexers (==0.0.5)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-favicon", "sphinx-hoverxref (<2)", "sphinx-inline-tabs", "sphinx-lint", "sphinx-notfound-page (==0.8.3)", "sphinx-reredirects", "sphinxcontrib-towncrier"] +testing = ["build[virtualenv]", "filelock (>=3.4.0)", "flake8-2020", "ini2toml[lite] (>=0.9)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pip (>=19.1)", "pip-run (>=8.8)", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-mypy (>=0.9.1)", "pytest-perf", "pytest-ruff", "pytest-timeout", "pytest-xdist", "tomli-w (>=1.0.0)", "virtualenv (>=13.0.0)", "wheel"] +testing-integration = ["build[virtualenv]", "filelock (>=3.4.0)", "jaraco.envs (>=2.2)", "jaraco.path (>=3.2.0)", "pytest", "pytest-enabler", "pytest-xdist", "tomli", "virtualenv (>=13.0.0)", "wheel"] + +[[package]] +name = "shapely" +version = "1.8.5" +description = "Geometric objects, predicates, and operations" +optional = false +python-versions = ">=3.6" +files = [ + {file = "Shapely-1.8.5-cp310-cp310-macosx_10_9_universal2.whl", hash = "sha256:dbc8b2ed8e7655c0b33abbb5c8c74013699fe29d2ca15f354b6e1abd29f0ed7a"}, + {file = "Shapely-1.8.5-cp310-cp310-macosx_10_9_x86_64.whl", hash = "sha256:1c6198da94fc993049fc2d31bd183f4f4de4f33f70be8437a9807ae8788c069c"}, + {file = "Shapely-1.8.5-cp310-cp310-macosx_11_0_arm64.whl", hash = "sha256:a9a660dae5780fac8bdccaa2c68ca9ddcddb5d55330be47869fa9a8f55cb9580"}, + {file = "Shapely-1.8.5-cp310-cp310-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:6baf62a648c8745de3fb69e96a5ef93b1854159dd9c85527978e68fc2b062f76"}, + {file = "Shapely-1.8.5-cp310-cp310-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:81c1286a1e46d1224bec8c1dba8f174b286710df796b4256198e64d1e987dfc9"}, + {file = "Shapely-1.8.5-cp310-cp310-win32.whl", hash = "sha256:999952cf8ed7a033debf7d2715d7029e7f1a1144eb1bdefabe0d64aa4c9910cd"}, + {file = "Shapely-1.8.5-cp310-cp310-win_amd64.whl", hash = "sha256:7a840e7d96a01e6da6725a002dab662a1dbd1f5d53a6433f7fdbaf2a1322576b"}, + {file = "Shapely-1.8.5-cp311-cp311-macosx_10_9_universal2.whl", hash = "sha256:27238d6890f409b0744c4d740413698bbb94d8cf5249406c4668ca28ade8df3b"}, + {file = "Shapely-1.8.5-cp311-cp311-macosx_10_9_x86_64.whl", hash = "sha256:1512eb7222f4a00ec4d9c81762bd15ed622fa5e06dd9fe64721db6a5fc3345fe"}, + {file = "Shapely-1.8.5-cp311-cp311-macosx_11_0_arm64.whl", hash = "sha256:a0a5621d68ac371162768f1a05a91bcf64ac5f00a517458db5b8d8896c8e4743"}, + {file = "Shapely-1.8.5-cp311-cp311-manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:8a1545c3e539516e87f5c4a96d5a76c623e385e5defe7555b7b26232043c9c5c"}, + {file = "Shapely-1.8.5-cp311-cp311-manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:ba4d98a117ead8b49c2cc8723eb071f04da03d6eb59f340eb51364581bb6c9e0"}, + {file = "Shapely-1.8.5-cp311-cp311-win32.whl", hash = "sha256:b0b60993adb141b5a1ff7f94fc512f9be69320be6e8757d32c460f25359d51f5"}, + {file = "Shapely-1.8.5-cp311-cp311-win_amd64.whl", hash = "sha256:0d014b1d072f2f5b166c9cf57a58507089570b1839a83e14a308df56d467b809"}, + {file = "Shapely-1.8.5-cp37-cp37m-macosx_10_9_x86_64.whl", hash = "sha256:bd55b14fa82d54bf9d7eba4d11c6d9d61f96ab0ffe530c6eb7cd3bb9a6590153"}, + {file = "Shapely-1.8.5-cp37-cp37m-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:bd598f7373e1b5f317a3117d388a68b3c165fa15d5aa7de042848a1acbe4c18d"}, + {file = "Shapely-1.8.5-cp37-cp37m-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:1500f7d9fbe47c02298694e282dbd9cd30f6dbd33f0bd2efd0712bf907d9a2b7"}, + {file = "Shapely-1.8.5-cp37-cp37m-win32.whl", hash = "sha256:b1216ec40baf505dba006bfa0a7ced6b2334261925841fa23fafcdb26d9b1f54"}, + {file = "Shapely-1.8.5-cp37-cp37m-win_amd64.whl", hash = "sha256:c1f2f125b785d468c08ee698472716355b54d8cbd69f7f09081893b4e4111e3f"}, + {file = "Shapely-1.8.5-cp38-cp38-macosx_10_9_universal2.whl", hash = "sha256:eca890915cbebc879c96cfc1b7ff51a09a2326c9f94b6b3e13d6d3cdfc659ed9"}, + {file = "Shapely-1.8.5-cp38-cp38-macosx_10_9_x86_64.whl", hash = "sha256:17f1a4f0ba88e8f53975845af312a276e557e7bac8f0572e8b5805705739c892"}, + {file = "Shapely-1.8.5-cp38-cp38-macosx_11_0_arm64.whl", hash = "sha256:1aa924b11d53fe817c4e5b14ac0c8e980913b9b5f15e01c23a35e3ee53cccf41"}, + {file = "Shapely-1.8.5-cp38-cp38-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:045a9a5ba872431c7559020c6761983e4a0c13c9a0eb7fa93e427b4451f2d8cc"}, + {file = "Shapely-1.8.5-cp38-cp38-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a43b3ba408f00c6588e174f6fb4dba5739fdee87af2e186ed7e8caece6b91568"}, + {file = "Shapely-1.8.5-cp38-cp38-win32.whl", hash = "sha256:72ee40adae74ab93b6cf13d71f7183de8f91b077296a8788e791097703aa279d"}, + {file = "Shapely-1.8.5-cp38-cp38-win_amd64.whl", hash = "sha256:221c9bd94d286b6941b3f86cbebead24c3f54cbac2cb02fb6746fd768be73b80"}, + {file = "Shapely-1.8.5-cp39-cp39-macosx_10_9_universal2.whl", hash = "sha256:2c86c9006b266c64bc1b9c0c0ebc20eff8fcce1bc06d4db13bbfbac9e5e2d910"}, + {file = "Shapely-1.8.5-cp39-cp39-macosx_10_9_x86_64.whl", hash = "sha256:bffeab5498ac32e014845f675d6c4d7a85cba42cbc76ee794da828faf3c12ba6"}, + {file = "Shapely-1.8.5-cp39-cp39-macosx_11_0_arm64.whl", hash = "sha256:52f5b61ee2998a75e5bcadb609036bfd626ec472fc456241150bc95a9088c89c"}, + {file = "Shapely-1.8.5-cp39-cp39-manylinux_2_12_i686.manylinux2010_i686.whl", hash = "sha256:57df87c68e58050546fda07963d2f87a7e16db57029eb67239d27732f65f40d1"}, + {file = "Shapely-1.8.5-cp39-cp39-manylinux_2_12_x86_64.manylinux2010_x86_64.whl", hash = "sha256:a8ad6ebec3b39af4890a59a027ad60e7fece5bfd71ecc998da7b03e5f414d32a"}, + {file = "Shapely-1.8.5-cp39-cp39-win32.whl", hash = "sha256:0e6172d2e9e77aed7b6515bc68eefe623e5be7b865ced4b9892b5205166e2931"}, + {file = "Shapely-1.8.5-cp39-cp39-win_amd64.whl", hash = "sha256:4c9a777bf999eca65ecf9f90821f6c699f72633f1f2fe4ca3fa829e6114fdb45"}, + {file = "Shapely-1.8.5.tar.gz", hash = "sha256:e82b6d60ecfb124120c88fe106a478596bbeab142116d7e7f64a364dac902a92"}, +] + +[package.extras] +all = ["numpy", "pytest", "pytest-cov"] +test = ["pytest", "pytest-cov"] +vectorized = ["numpy"] + +[[package]] +name = "six" +version = "1.16.0" +description = "Python 2 and 3 compatibility utilities" +optional = false +python-versions = ">=2.7, !=3.0.*, !=3.1.*, !=3.2.*" +files = [ + {file = "six-1.16.0-py2.py3-none-any.whl", hash = "sha256:8abb2f1d86890a2dfb989f9a77cfcfd3e47c2a354b01111771326f8aa26e0254"}, + {file = "six-1.16.0.tar.gz", hash = "sha256:1e61c37477a1626458e36f7b1d82aa5c9b094fa4802892072e49de9c60c4c926"}, +] + +[[package]] +name = "sniffio" +version = "1.3.0" +description = "Sniff out which async library your code is running under" +optional = false +python-versions = ">=3.7" +files = [ + {file = "sniffio-1.3.0-py3-none-any.whl", hash = "sha256:eecefdce1e5bbfb7ad2eeaabf7c1eeb404d7757c379bd1f7e5cce9d8bf425384"}, + {file = "sniffio-1.3.0.tar.gz", hash = "sha256:e60305c5e5d314f5389259b7f22aaa33d8f7dee49763119234af3755c55b9101"}, +] + +[[package]] +name = "soupsieve" +version = "2.4.1" +description = "A modern CSS selector implementation for Beautiful Soup." +optional = false +python-versions = ">=3.7" +files = [ + {file = "soupsieve-2.4.1-py3-none-any.whl", hash = "sha256:1c1bfee6819544a3447586c889157365a27e10d88cde3ad3da0cf0ddf646feb8"}, + {file = "soupsieve-2.4.1.tar.gz", hash = "sha256:89d12b2d5dfcd2c9e8c22326da9d9aa9cb3dfab0a83a024f05704076ee8d35ea"}, +] + +[[package]] +name = "stack-data" +version = "0.6.2" +description = "Extract data from python stack frames and tracebacks for informative displays" +optional = false +python-versions = "*" +files = [ + {file = "stack_data-0.6.2-py3-none-any.whl", hash = "sha256:cbb2a53eb64e5785878201a97ed7c7b94883f48b87bfb0bbe8b623c74679e4a8"}, + {file = "stack_data-0.6.2.tar.gz", hash = "sha256:32d2dd0376772d01b6cb9fc996f3c8b57a357089dec328ed4b6553d037eaf815"}, +] + +[package.dependencies] +asttokens = ">=2.1.0" +executing = ">=1.2.0" +pure-eval = "*" + +[package.extras] +tests = ["cython", "littleutils", "pygments", "pytest", "typeguard"] + +[[package]] +name = "tenacity" +version = "8.2.2" +description = "Retry code until it succeeds" +optional = false +python-versions = ">=3.6" +files = [ + {file = "tenacity-8.2.2-py3-none-any.whl", hash = "sha256:2f277afb21b851637e8f52e6a613ff08734c347dc19ade928e519d7d2d8569b0"}, + {file = "tenacity-8.2.2.tar.gz", hash = "sha256:43af037822bd0029025877f3b2d97cc4d7bb0c2991000a3d59d71517c5c969e0"}, +] + +[package.extras] +doc = ["reno", "sphinx", "tornado (>=4.5)"] + +[[package]] +name = "termcolor" +version = "2.3.0" +description = "ANSI color formatting for output in terminal" +optional = false +python-versions = ">=3.7" +files = [ + {file = "termcolor-2.3.0-py3-none-any.whl", hash = "sha256:3afb05607b89aed0ffe25202399ee0867ad4d3cb4180d98aaf8eefa6a5f7d475"}, + {file = "termcolor-2.3.0.tar.gz", hash = "sha256:b5b08f68937f138fe92f6c089b99f1e2da0ae56c52b78bf7075fd95420fd9a5a"}, +] + +[package.extras] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "terminado" +version = "0.17.1" +description = "Tornado websocket backend for the Xterm.js Javascript terminal emulator library." +optional = false +python-versions = ">=3.7" +files = [ + {file = "terminado-0.17.1-py3-none-any.whl", hash = "sha256:8650d44334eba354dd591129ca3124a6ba42c3d5b70df5051b6921d506fdaeae"}, + {file = "terminado-0.17.1.tar.gz", hash = "sha256:6ccbbcd3a4f8a25a5ec04991f39a0b8db52dfcd487ea0e578d977e6752380333"}, +] + +[package.dependencies] +ptyprocess = {version = "*", markers = "os_name != \"nt\""} +pywinpty = {version = ">=1.1.0", markers = "os_name == \"nt\""} +tornado = ">=6.1.0" + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["pre-commit", "pytest (>=7.0)", "pytest-timeout"] + +[[package]] +name = "threadpoolctl" +version = "3.1.0" +description = "threadpoolctl" +optional = false +python-versions = ">=3.6" +files = [ + {file = "threadpoolctl-3.1.0-py3-none-any.whl", hash = "sha256:8b99adda265feb6773280df41eece7b2e6561b772d21ffd52e372f999024907b"}, + {file = "threadpoolctl-3.1.0.tar.gz", hash = "sha256:a335baacfaa4400ae1f0d8e3a58d6674d2f8828e3716bb2802c44955ad391380"}, +] + +[[package]] +name = "tinycss2" +version = "1.2.1" +description = "A tiny CSS parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tinycss2-1.2.1-py3-none-any.whl", hash = "sha256:2b80a96d41e7c3914b8cda8bc7f705a4d9c49275616e886103dd839dfc847847"}, + {file = "tinycss2-1.2.1.tar.gz", hash = "sha256:8cff3a8f066c2ec677c06dbc7b45619804a6938478d9d73c284b29d14ecb0627"}, +] + +[package.dependencies] +webencodings = ">=0.4" + +[package.extras] +doc = ["sphinx", "sphinx_rtd_theme"] +test = ["flake8", "isort", "pytest"] + +[[package]] +name = "tomli" +version = "2.0.1" +description = "A lil' TOML parser" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tomli-2.0.1-py3-none-any.whl", hash = "sha256:939de3e7a6161af0c887ef91b7d41a53e7c5a1ca976325f429cb46ea9bc30ecc"}, + {file = "tomli-2.0.1.tar.gz", hash = "sha256:de526c12914f0c550d15924c62d72abc48d6fe7364aa87328337a31007fe8a4f"}, +] + +[[package]] +name = "tornado" +version = "6.3.2" +description = "Tornado is a Python web framework and asynchronous networking library, originally developed at FriendFeed." +optional = false +python-versions = ">= 3.8" +files = [ + {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_universal2.whl", hash = "sha256:c367ab6c0393d71171123ca5515c61ff62fe09024fa6bf299cd1339dc9456829"}, + {file = "tornado-6.3.2-cp38-abi3-macosx_10_9_x86_64.whl", hash = "sha256:b46a6ab20f5c7c1cb949c72c1994a4585d2eaa0be4853f50a03b5031e964fc7c"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_17_aarch64.manylinux2014_aarch64.whl", hash = "sha256:c2de14066c4a38b4ecbbcd55c5cc4b5340eb04f1c5e81da7451ef555859c833f"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_i686.manylinux1_i686.manylinux_2_17_i686.manylinux2014_i686.whl", hash = "sha256:05615096845cf50a895026f749195bf0b10b8909f9be672f50b0fe69cba368e4"}, + {file = "tornado-6.3.2-cp38-abi3-manylinux_2_5_x86_64.manylinux1_x86_64.manylinux_2_17_x86_64.manylinux2014_x86_64.whl", hash = "sha256:5b17b1cf5f8354efa3d37c6e28fdfd9c1c1e5122f2cb56dac121ac61baa47cbe"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_aarch64.whl", hash = "sha256:29e71c847a35f6e10ca3b5c2990a52ce38b233019d8e858b755ea6ce4dcdd19d"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_i686.whl", hash = "sha256:834ae7540ad3a83199a8da8f9f2d383e3c3d5130a328889e4cc991acc81e87a0"}, + {file = "tornado-6.3.2-cp38-abi3-musllinux_1_1_x86_64.whl", hash = "sha256:6a0848f1aea0d196a7c4f6772197cbe2abc4266f836b0aac76947872cd29b411"}, + {file = "tornado-6.3.2-cp38-abi3-win32.whl", hash = "sha256:7efcbcc30b7c654eb6a8c9c9da787a851c18f8ccd4a5a3a95b05c7accfa068d2"}, + {file = "tornado-6.3.2-cp38-abi3-win_amd64.whl", hash = "sha256:0c325e66c8123c606eea33084976c832aa4e766b7dff8aedd7587ea44a604cdf"}, + {file = "tornado-6.3.2.tar.gz", hash = "sha256:4b927c4f19b71e627b13f3db2324e4ae660527143f9e1f2e2fb404f3a187e2ba"}, +] + +[[package]] +name = "tqdm" +version = "4.65.0" +description = "Fast, Extensible Progress Meter" +optional = false +python-versions = ">=3.7" +files = [ + {file = "tqdm-4.65.0-py3-none-any.whl", hash = "sha256:c4f53a17fe37e132815abceec022631be8ffe1b9381c2e6e30aa70edc99e9671"}, + {file = "tqdm-4.65.0.tar.gz", hash = "sha256:1871fb68a86b8fb3b59ca4cdd3dcccbc7e6d613eeed31f4c332531977b89beb5"}, +] + +[package.dependencies] +colorama = {version = "*", markers = "platform_system == \"Windows\""} + +[package.extras] +dev = ["py-make (>=0.1.0)", "twine", "wheel"] +notebook = ["ipywidgets (>=6)"] +slack = ["slack-sdk"] +telegram = ["requests"] + +[[package]] +name = "traitlets" +version = "5.9.0" +description = "Traitlets Python configuration system" +optional = false +python-versions = ">=3.7" +files = [ + {file = "traitlets-5.9.0-py3-none-any.whl", hash = "sha256:9e6ec080259b9a5940c797d58b613b5e31441c2257b87c2e795c5228ae80d2d8"}, + {file = "traitlets-5.9.0.tar.gz", hash = "sha256:f6cde21a9c68cf756af02035f72d5a723bf607e862e7be33ece505abf4a3bad9"}, +] + +[package.extras] +docs = ["myst-parser", "pydata-sphinx-theme", "sphinx"] +test = ["argcomplete (>=2.0)", "pre-commit", "pytest", "pytest-mock"] + +[[package]] +name = "typing-extensions" +version = "4.6.3" +description = "Backported and Experimental Type Hints for Python 3.7+" +optional = false +python-versions = ">=3.7" +files = [ + {file = "typing_extensions-4.6.3-py3-none-any.whl", hash = "sha256:88a4153d8505aabbb4e13aacb7c486c2b4a33ca3b3f807914a9b4c844c471c26"}, + {file = "typing_extensions-4.6.3.tar.gz", hash = "sha256:d91d5919357fe7f681a9f2b5b4cb2a5f1ef0a1e9f59c4d8ff0d3491e05c0ffd5"}, +] + +[[package]] +name = "tzdata" +version = "2023.3" +description = "Provider of IANA time zone data" +optional = false +python-versions = ">=2" +files = [ + {file = "tzdata-2023.3-py2.py3-none-any.whl", hash = "sha256:7e65763eef3120314099b6939b5546db7adce1e7d6f2e179e3df563c70511eda"}, + {file = "tzdata-2023.3.tar.gz", hash = "sha256:11ef1e08e54acb0d4f95bdb1be05da659673de4acbd21bf9c69e94cc5e907a3a"}, +] + +[[package]] +name = "uri-template" +version = "1.3.0" +description = "RFC 6570 URI Template Processor" +optional = false +python-versions = ">=3.7" +files = [ + {file = "uri-template-1.3.0.tar.gz", hash = "sha256:0e00f8eb65e18c7de20d595a14336e9f337ead580c70934141624b6d1ffdacc7"}, + {file = "uri_template-1.3.0-py3-none-any.whl", hash = "sha256:a44a133ea12d44a0c0f06d7d42a52d71282e77e2f937d8abd5655b8d56fc1363"}, +] + +[package.extras] +dev = ["flake8", "flake8-annotations", "flake8-bandit", "flake8-bugbear", "flake8-commas", "flake8-comprehensions", "flake8-continuation", "flake8-datetimez", "flake8-docstrings", "flake8-import-order", "flake8-literal", "flake8-modern-annotations", "flake8-noqa", "flake8-pyproject", "flake8-requirements", "flake8-typechecking-import", "flake8-use-fstring", "mypy", "pep8-naming", "types-PyYAML"] + +[[package]] +name = "urllib3" +version = "2.0.3" +description = "HTTP library with thread-safe connection pooling, file post, and more." +optional = false +python-versions = ">=3.7" +files = [ + {file = "urllib3-2.0.3-py3-none-any.whl", hash = "sha256:48e7fafa40319d358848e1bc6809b208340fafe2096f1725d05d67443d0483d1"}, + {file = "urllib3-2.0.3.tar.gz", hash = "sha256:bee28b5e56addb8226c96f7f13ac28cb4c301dd5ea8a6ca179c0b9835e032825"}, +] + +[package.extras] +brotli = ["brotli (>=1.0.9)", "brotlicffi (>=0.8.0)"] +secure = ["certifi", "cryptography (>=1.9)", "idna (>=2.0.0)", "pyopenssl (>=17.1.0)", "urllib3-secure-extra"] +socks = ["pysocks (>=1.5.6,!=1.5.7,<2.0)"] +zstd = ["zstandard (>=0.18.0)"] + +[[package]] +name = "wcwidth" +version = "0.2.6" +description = "Measures the displayed width of unicode strings in a terminal" +optional = false +python-versions = "*" +files = [ + {file = "wcwidth-0.2.6-py2.py3-none-any.whl", hash = "sha256:795b138f6875577cd91bba52baf9e445cd5118fd32723b460e30a0af30ea230e"}, + {file = "wcwidth-0.2.6.tar.gz", hash = "sha256:a5220780a404dbe3353789870978e472cfe477761f06ee55077256e509b156d0"}, +] + +[[package]] +name = "webcolors" +version = "1.13" +description = "A library for working with the color formats defined by HTML and CSS." +optional = false +python-versions = ">=3.7" +files = [ + {file = "webcolors-1.13-py3-none-any.whl", hash = "sha256:29bc7e8752c0a1bd4a1f03c14d6e6a72e93d82193738fa860cbff59d0fcc11bf"}, + {file = "webcolors-1.13.tar.gz", hash = "sha256:c225b674c83fa923be93d235330ce0300373d02885cef23238813b0d5668304a"}, +] + +[package.extras] +docs = ["furo", "sphinx", "sphinx-copybutton", "sphinx-inline-tabs", "sphinx-notfound-page", "sphinxext-opengraph"] +tests = ["pytest", "pytest-cov"] + +[[package]] +name = "webencodings" +version = "0.5.1" +description = "Character encoding aliases for legacy web content" +optional = false +python-versions = "*" +files = [ + {file = "webencodings-0.5.1-py2.py3-none-any.whl", hash = "sha256:a0af1213f3c2226497a97e2b3aa01a7e4bee4f403f95be16fc9acd2947514a78"}, + {file = "webencodings-0.5.1.tar.gz", hash = "sha256:b36a1c245f2d304965eb4e0a82848379241dc04b865afcc4aab16748587e1923"}, +] + +[[package]] +name = "websocket-client" +version = "1.6.0" +description = "WebSocket client for Python with low level API options" +optional = false +python-versions = ">=3.7" +files = [ + {file = "websocket-client-1.6.0.tar.gz", hash = "sha256:e84c7eafc66aade6d1967a51dfd219aabdf81d15b9705196e11fd81f48666b78"}, + {file = "websocket_client-1.6.0-py3-none-any.whl", hash = "sha256:72d7802608745b0a212f79b478642473bd825777d8637b6c8c421bf167790d4f"}, +] + +[package.extras] +docs = ["Sphinx (>=3.4)", "sphinx-rtd-theme (>=0.5)"] +optional = ["python-socks", "wsaccel"] +test = ["websockets"] + +[[package]] +name = "werkzeug" +version = "2.2.3" +description = "The comprehensive WSGI web application library." +optional = false +python-versions = ">=3.7" +files = [ + {file = "Werkzeug-2.2.3-py3-none-any.whl", hash = "sha256:56433961bc1f12533306c624f3be5e744389ac61d722175d543e1751285da612"}, + {file = "Werkzeug-2.2.3.tar.gz", hash = "sha256:2e1ccc9417d4da358b9de6f174e3ac094391ea1d4fbef2d667865d819dfd0afe"}, +] + +[package.dependencies] +MarkupSafe = ">=2.1.1" + +[package.extras] +watchdog = ["watchdog"] + +[[package]] +name = "widgetsnbextension" +version = "4.0.7" +description = "Jupyter interactive widgets for Jupyter Notebook" +optional = false +python-versions = ">=3.7" +files = [ + {file = "widgetsnbextension-4.0.7-py3-none-any.whl", hash = "sha256:be3228a73bbab189a16be2d4a3cd89ecbd4e31948bfdc64edac17dcdee3cd99c"}, + {file = "widgetsnbextension-4.0.7.tar.gz", hash = "sha256:ea67c17a7cd4ae358f8f46c3b304c40698bc0423732e3f273321ee141232c8be"}, +] + +[[package]] +name = "win32-setctime" +version = "1.1.0" +description = "A small Python utility to set file creation time on Windows" +optional = false +python-versions = ">=3.5" +files = [ + {file = "win32_setctime-1.1.0-py3-none-any.whl", hash = "sha256:231db239e959c2fe7eb1d7dc129f11172354f98361c4fa2d6d2d7e278baa8aad"}, + {file = "win32_setctime-1.1.0.tar.gz", hash = "sha256:15cf5750465118d6929ae4de4eb46e8edae9a5634350c01ba582df868e932cb2"}, +] + +[package.extras] +dev = ["black (>=19.3b0)", "pytest (>=4.6.2)"] + +[[package]] +name = "zipp" +version = "3.15.0" +description = "Backport of pathlib-compatible object wrapper for zip files" +optional = false +python-versions = ">=3.7" +files = [ + {file = "zipp-3.15.0-py3-none-any.whl", hash = "sha256:48904fc76a60e542af151aded95726c1a5c34ed43ab4134b597665c86d7ad556"}, + {file = "zipp-3.15.0.tar.gz", hash = "sha256:112929ad649da941c23de50f356a2b5570c954b65150642bccdd66bf194d224b"}, +] + +[package.extras] +docs = ["furo", "jaraco.packaging (>=9)", "jaraco.tidelift (>=1.4)", "rst.linker (>=1.9)", "sphinx (>=3.5)", "sphinx-lint"] +testing = ["big-O", "flake8 (<5)", "jaraco.functools", "jaraco.itertools", "more-itertools", "pytest (>=6)", "pytest-black (>=0.3.7)", "pytest-checkdocs (>=2.4)", "pytest-cov", "pytest-enabler (>=1.3)", "pytest-flake8", "pytest-mypy (>=0.9.1)"] + +[metadata] +lock-version = "2.0" +python-versions = ">=3.8,<4" +content-hash = "4f116258647de05d110407707956c559f14d6c5b3f044d8a32b2ffd7a3555a60" diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 00000000..cda7199c --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,40 @@ +[tool.poetry] +name = "perception-dataset" +version = "1.0.0" +description = "TIER IV Perception dataset has modules to convert dataset from rosbag to t4_dataset" +authors = ["Yusuke Muramatsu ", "Shunsuke Miura "] + +[tool.poetry.dependencies] +python = ">=3.8,<4" +numpy = "^1.23.1" +PyYAML = "^6.0" +nptyping = "^2.2.0" +Pillow = "^8.3.2" +scipy = "^1.9.0" +python-json-logger = "^2.0.4" +open3d = "0.16.0" +opencv-python = "^4.6.0" +nuscenes-devkit = "^1.1.9" +pydantic = "^1.10.2" +lark = "^1.1.2" +loguru = "^0.6.0" +numba = "^0.56.4" +requests = "^2.28.2" + +[tool.poetry.dev-dependencies] +pytest = "^7.1.1" +pytest-asyncio = "^0.18.3" +pytest-mock = "^3.7.0" + +[build-system] +build-backend = "poetry.core.masonry.api" +requires = ["poetry-core>=1.0.0"] + +[tool.black] +line_length = 99 + +[tool.isort] +line_length = 99 +profile = "black" +force_sort_within_sections = true +reverse_relative = true diff --git a/setup.cfg b/setup.cfg new file mode 100644 index 00000000..95c8b157 --- /dev/null +++ b/setup.cfg @@ -0,0 +1,10 @@ +[flake8] +# Modified from https://github.com/autowarefoundation/autoware/blob/main/setup.cfg +# Autoware setiing of extend-ignore = B902,C816,D100,D101,D102,D103,D104,D105,D106,D107,D203,D212,D404,I202,CNL100,E203,E501,Q000 +# Differences from autoware settings: D205,D400,D401,D403 +# Error Codes of D: https://www.pydocstyle.org/en/stable/error_codes.html +extend-ignore = B902,C816,D100,D101,D102,D103,D104,D105,D106,D107,D203,D212,D404,I202,CNL100,E203,E501,Q000,D205,D400,D401,D403 +import-order-style = pep8 +max-line-length = 99 +show-source = true +statistics = true diff --git a/tests/__init__.py b/tests/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/config/convert_synthetic_data.yaml b/tests/config/convert_synthetic_data.yaml new file mode 100644 index 00000000..ccdf07e0 --- /dev/null +++ b/tests/config/convert_synthetic_data.yaml @@ -0,0 +1,15 @@ +task: convert_rosbag2_to_t4 +conversion: + input_base: OVERWRITE_IN_TEST + output_base: OVERWRITE_IN_TEST + skip_timestamp: 0.01 + workers_number: 1 + num_load_frames: 10 + crop_frames_unit: 1 + object_topic_name: /ground_truth/objects + object_msg_type: DynamicObjectArray + world_frame_id: world + lidar_sensor: + topic: /lidar/concatenated/pointcloud + channel: LIDAR_CONCAT + camera_sensors: [] # synthetic data has no images diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 00000000..20d8409b --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,38 @@ +import pytest + + +@pytest.fixture(scope="session", autouse=False) +def scope_session(): + print("\nsetup before session.\n") + yield + print("\nteardown after session.\n") + + +@pytest.fixture(scope="module", autouse=False) +def scope_module(): + print("\nsetup before module.\n") + yield + print("\nteardown after module.\n") + + +@pytest.fixture(scope="class", autouse=False) +def scope_class(): + print("\nsetup before class.\n") + yield + print("\nteardown after class.\n") + + +@pytest.fixture(scope="function", autouse=False) +def scope_function(): + print("\nsetup before function.\n") + yield + print("\nteardown after function.\n") + + +def pytest_addoption(parser): + parser.addoption( + "--keep-output", + action="store_true", + default=False, + help="does not remove output files generated by the test", + ) diff --git a/tests/constants.py b/tests/constants.py new file mode 100644 index 00000000..112d95c6 --- /dev/null +++ b/tests/constants.py @@ -0,0 +1,4 @@ +from pathlib import Path + +TEST_CONFIG_ROOT_DIR = Path(__file__).resolve().parent / "config" +TEST_DATA_ROOT_DIR = Path(__file__).resolve().parent / "data" diff --git a/tests/deepen/test_deepen_to_t4_converter.py b/tests/deepen/test_deepen_to_t4_converter.py new file mode 100644 index 00000000..6c45b6c8 --- /dev/null +++ b/tests/deepen/test_deepen_to_t4_converter.py @@ -0,0 +1,206 @@ +from typing import Any, Dict, List + +import pytest + +from perception_dataset.deepen.deepen_to_t4_converter import DeepenToT4Converter + + +@pytest.fixture(scope="function") +def deepen_anno_list(): + # Note: only values required for t4dataset + deepen_anno_list = [ + { + "dataset_id": "dataset_xxx", + "file_id": "0.pcd", + "label_category_id": "car", + "label_id": "car:1", + "label_type": "3d_bbox", + "attributes": { + "state": "moving", + "occlusion": "none", + "cycle_state": "with_rider", + }, + "labeller_email": "test@aaa.bbb", + "sensor_id": "lidar", + "three_d_bbox": { + "cx": 1.0, + "cy": 1.0, + "cz": 1.0, + "h": 1.0, + "l": 1.0, + "w": 1.0, + "quaternion": { + "x": 1.0, + "y": 1.0, + "z": 1.0, + "w": 1.0, + }, + }, + }, + { + "dataset_id": "dataset_xxx", + "file_id": "1.pcd", + "label_category_id": "car", + "label_id": "car:1", + "label_type": "3d_bbox", + "attributes": { + "state": "moving", + "occlusion": "none", + "cycle_state": "with_rider", + }, + "labeller_email": "test@aaa.bbb", + "sensor_id": "lidar", + "three_d_bbox": { + "cx": 2.0, + "cy": 2.0, + "cz": 2.0, + "h": 2.0, + "l": 2.0, + "w": 2.0, + "quaternion": { + "x": 2.0, + "y": 2.0, + "z": 2.0, + "w": 2.0, + }, + }, + }, + { + "dataset_id": "dataset_xxx", + "file_id": "1.pcd", + "label_category_id": "car", + "label_id": "car:1", + "label_type": "3d_bbox", + "attributes": { + "state": "moving", + "occlusion": "none", + "cycle_state": "with_rider", + }, + "labeller_email": "test@aaa.bbb", + "sensor_id": "lidar", + "three_d_bbox": { + "cx": 3.0, + "cy": 3.0, + "cz": 3.0, + "h": 3.0, + "l": 3.0, + "w": 3.0, + "quaternion": { + "x": 3.0, + "y": 3.0, + "z": 3.0, + "w": 3.0, + }, + }, + }, + { + "dataset_id": "dataset_xxx", + "file_id": "1.pcd", + "label_category_id": "car", + "label_id": "car:1", + "label_type": "3d_bbox", + "attributes": { + "state": "moving", + "occlusion": "none", + "cycle_state": "with_rider", + }, + "labeller_email": "auto_interpolation", + "sensor_id": "lidar", + "three_d_bbox": { + "cx": 3.0, + "cy": 3.0, + "cz": 3.0, + "h": 3.0, + "l": 3.0, + "w": 3.0, + "quaternion": { + "x": 3.0, + "y": 3.0, + "z": 3.0, + "w": 3.0, + }, + }, + }, + ] + + return deepen_anno_list + + +class TestDeepenToT4Converter: + @pytest.fixture(scope="function") + def converter_for_test(self): + # TODO(yukke42): test with files + input_base = "" + output_base = "" + input_anno_file = "" + dataset_corresponding = {} + overwrite_mode = False + + return DeepenToT4Converter( + input_base=input_base, + output_base=output_base, + input_anno_file=input_anno_file, + dataset_corresponding=dataset_corresponding, + overwrite_mode=overwrite_mode, + description={}, + input_bag_base="", + topic_list=[], + ignore_interpolate_label=False, + ) + + @pytest.fixture(scope="function") + def converter_for_interpolate_test(self): + input_base = "" + output_base = "" + input_anno_file = "" + dataset_corresponding = {} + overwrite_mode = False + + return DeepenToT4Converter( + input_base=input_base, + output_base=output_base, + input_anno_file=input_anno_file, + dataset_corresponding=dataset_corresponding, + overwrite_mode=overwrite_mode, + description={}, + input_bag_base="", + topic_list=[], + ignore_interpolate_label=True, + ) + + def test_convert(self): + # TODO(yukke42): impl test_convert + pass + + def test__format_deepen_annotation( + self, + converter_for_test: DeepenToT4Converter, + deepen_anno_list: List[Dict[str, Any]], + ): + scenes_anno_dict = converter_for_test._format_deepen_annotation(deepen_anno_list) + + assert len(scenes_anno_dict) == 1 + assert len(scenes_anno_dict["dataset_xxx"]) == 2 + assert len(scenes_anno_dict["dataset_xxx"][0]) == 1 + assert len(scenes_anno_dict["dataset_xxx"][1]) == 3 + assert isinstance(scenes_anno_dict, dict) + assert all(isinstance(dataset_id, str) for dataset_id in scenes_anno_dict.keys()) + assert all( + isinstance(frame_index, int) for frame_index in scenes_anno_dict["dataset_xxx"].keys() + ) + assert all( + isinstance(frame_index, list) + for frame_index in scenes_anno_dict["dataset_xxx"].values() + ) + + def test__format_deepen_annotation_ignore_interpolate( + self, + converter_for_interpolate_test: DeepenToT4Converter, + deepen_anno_list: List[Dict[str, Any]], + ): + scenes_anno_dict = converter_for_interpolate_test._format_deepen_annotation( + deepen_anno_list + ) + + assert len(scenes_anno_dict["dataset_xxx"][0]) == 1 + assert len(scenes_anno_dict["dataset_xxx"][1]) == 2 diff --git a/tests/deepen/test_json_format.py b/tests/deepen/test_json_format.py new file mode 100644 index 00000000..3bc269ca --- /dev/null +++ b/tests/deepen/test_json_format.py @@ -0,0 +1,125 @@ +import numpy as np +import pytest + +from perception_dataset.constants import EXTENSION_ENUM, SENSOR_ENUM +from perception_dataset.deepen.json_format import ConfigData, ImageData + + +def test_image_data(): + frame_index = 100 + channel = SENSOR_ENUM.CAM_FRONT.value["channel"] + fileformat = EXTENSION_ENUM.PNG.value[1:] + unix_timestamp = 1635353737.10000 + device_position = np.array([1, 2, 3], dtype=np.float32) + device_heading = np.array([10, 20, 30, 40], dtype=np.float32) + fx = 100.0 + fy = 200.0 + cx = 300.0 + cy = 400.0 + camera_intrinsic_matrix = np.array( + [ + [fx, 0.0, cx], + [0.0, fy, cy], + [0.0, 0.0, 1.0], + ], + dtype=np.float32, + ) + filepath = f"data/{channel}/{frame_index}.{fileformat}" + + image_data = ImageData( + frame_index=frame_index, + channel=channel, + fileformat=fileformat, + unix_timestamp=unix_timestamp, + device_position=device_position, + device_heading=device_heading, + camera_intrinsic_matrix=camera_intrinsic_matrix, + ) + + assert image_data.filepath == filepath + + image_data_dict = image_data.to_dict() + assert isinstance(image_data_dict["fx"], float) + assert isinstance(image_data_dict["fy"], float) + assert isinstance(image_data_dict["cx"], float) + assert isinstance(image_data_dict["cy"], float) + assert isinstance(image_data_dict["timestamp"], float) + assert isinstance(image_data_dict["image_url"], str) + assert isinstance(image_data_dict["position"], dict) + assert isinstance(image_data_dict["position"]["x"], float) + assert isinstance(image_data_dict["position"]["y"], float) + assert isinstance(image_data_dict["position"]["z"], float) + assert isinstance(image_data_dict["heading"], dict) + assert isinstance(image_data_dict["heading"]["w"], float) + assert isinstance(image_data_dict["heading"]["x"], float) + assert isinstance(image_data_dict["heading"]["y"], float) + assert isinstance(image_data_dict["heading"]["z"], float) + assert isinstance(image_data_dict["camera_model"], str) + assert isinstance(image_data_dict["k1"], float) + assert isinstance(image_data_dict["k2"], float) + assert isinstance(image_data_dict["p1"], float) + assert isinstance(image_data_dict["p2"], float) + assert isinstance(image_data_dict["k3"], float) + assert isinstance(image_data_dict["k4"], float) + assert image_data_dict["fx"] == pytest.approx(fx) + assert image_data_dict["fy"] == pytest.approx(fy) + assert image_data_dict["cx"] == pytest.approx(cx) + assert image_data_dict["cy"] == pytest.approx(cy) + assert image_data_dict["timestamp"] == pytest.approx(unix_timestamp) + assert image_data_dict["image_url"] == filepath + assert image_data_dict["camera_model"] == "pinhole" + assert image_data_dict["position"]["x"] == pytest.approx(device_position[0]) + assert image_data_dict["position"]["y"] == pytest.approx(device_position[1]) + assert image_data_dict["position"]["z"] == pytest.approx(device_position[2]) + assert image_data_dict["heading"]["w"] == pytest.approx(device_heading[0]) + assert image_data_dict["heading"]["x"] == pytest.approx(device_heading[1]) + assert image_data_dict["heading"]["y"] == pytest.approx(device_heading[2]) + assert image_data_dict["heading"]["z"] == pytest.approx(device_heading[3]) + + +def test_config_data(): + frame_index = 100 + unix_timestamp = 1635353737.10000 + device_position = np.array([1, 2, 3], dtype=np.float32) + device_heading = np.array([10, 20, 30, 40], dtype=np.float32) + points = np.arange(6, dtype=np.float32).reshape(2, 3) + + config_data = ConfigData( + frame_index=frame_index, + unix_timestamp=unix_timestamp, + points=points, + device_position=device_position, + device_heading=device_heading, + ) + + assert config_data.filename == f"{frame_index}.json" + + config_data_dict = config_data.to_dict() + assert isinstance(config_data_dict["images"], list) + assert isinstance(config_data_dict["timestamp"], float) + assert isinstance(config_data_dict["device_position"], dict) + assert isinstance(config_data_dict["device_position"]["x"], float) + assert isinstance(config_data_dict["device_position"]["y"], float) + assert isinstance(config_data_dict["device_position"]["z"], float) + assert isinstance(config_data_dict["device_heading"], dict) + assert isinstance(config_data_dict["device_heading"]["w"], float) + assert isinstance(config_data_dict["device_heading"]["x"], float) + assert isinstance(config_data_dict["device_heading"]["y"], float) + assert isinstance(config_data_dict["device_heading"]["z"], float) + assert isinstance(config_data_dict["points"], list) + assert isinstance(config_data_dict["points"][0], dict) + assert isinstance(config_data_dict["points"][0]["x"], float) + assert isinstance(config_data_dict["points"][0]["y"], float) + assert isinstance(config_data_dict["points"][0]["z"], float) + assert config_data_dict["timestamp"] == pytest.approx(unix_timestamp) + assert config_data_dict["device_position"]["x"] == pytest.approx(device_position[0]) + assert config_data_dict["device_position"]["y"] == pytest.approx(device_position[1]) + assert config_data_dict["device_position"]["z"] == pytest.approx(device_position[2]) + assert config_data_dict["device_heading"]["w"] == pytest.approx(device_heading[0]) + assert config_data_dict["device_heading"]["x"] == pytest.approx(device_heading[1]) + assert config_data_dict["device_heading"]["y"] == pytest.approx(device_heading[2]) + assert config_data_dict["device_heading"]["z"] == pytest.approx(device_heading[3]) + for i in range(points.shape[0]): + assert config_data_dict["points"][i]["x"] == pytest.approx(points[i, 0]) + assert config_data_dict["points"][i]["y"] == pytest.approx(points[i, 1]) + assert config_data_dict["points"][i]["z"] == pytest.approx(points[i, 2]) diff --git a/tests/deepen/test_non_annotated_t4_to_deepen_converter.py b/tests/deepen/test_non_annotated_t4_to_deepen_converter.py new file mode 100644 index 00000000..b2b9f217 --- /dev/null +++ b/tests/deepen/test_non_annotated_t4_to_deepen_converter.py @@ -0,0 +1,33 @@ +import pytest + +from perception_dataset.deepen.non_annotated_t4_to_deepen_converter import ( + NonAnnotatedT4ToDeepenConverter, +) + + +class TestNonAnnotatedT4ToDeepenConverter: + @pytest.fixture(scope="function") + def converter_for_test(self): + # TODO(yukke42): test with files + return NonAnnotatedT4ToDeepenConverter(input_base="", output_base="", camera_sensors=[]) + + def test__convert_one_scene(self): + # TODO(yukke42): impl test__convert_one_scene + pass + + def test__get_data(self): + # TODO(yukke42): impl test__get_data + pass + + @pytest.mark.parametrize("timestamp, expected_value", [(1624164470899887, 1624164470.899887)]) + def test__timestamp_to_sec( + self, + mocker, + scope_function, + timestamp: int, + expected_value: float, + converter_for_test: NonAnnotatedT4ToDeepenConverter, + ): + timestamp_f = converter_for_test._timestamp_to_sec(timestamp) + assert isinstance(timestamp_f, float) + assert timestamp_f == pytest.approx(expected_value) diff --git a/tests/rosbag2/test_rosbag2_converter.py b/tests/rosbag2/test_rosbag2_converter.py new file mode 100644 index 00000000..6a5961f2 --- /dev/null +++ b/tests/rosbag2/test_rosbag2_converter.py @@ -0,0 +1,194 @@ +import json +import shutil + +from nuscenes.nuscenes import NuScenes +import pandas as pd +import pytest +import yaml + +from perception_dataset.constants import T4_FORMAT_DIRECTORY_NAME +from perception_dataset.rosbag2.converter_params import Rosbag2ConverterParams +from perception_dataset.rosbag2.rosbag2_to_t4_converter import Rosbag2ToT4Converter +from perception_dataset.t4_dataset.data_validator import validate_data_hz +from perception_dataset.t4_dataset.format_validator import ( + validate_directory_structure, + validate_format, +) +from tests.constants import TEST_CONFIG_ROOT_DIR, TEST_DATA_ROOT_DIR + + +@pytest.fixture +def t4_dataset_path(request): + """ + Provide data with selected storage id with `request.param` parameter, convert it + using a config file, yield the path to tests and clean up at the end. + + To find more info about how request param works with pytest.fixtures please refet to: + https://docs.pytest.org/en/latest/example/parametrize.html#indirect-parametrization + """ + # before test - convert rosbag2 to t4 + rosbag_dir = "awsim_rosbag" if request.param == "db3" else "awsim_rosbag_mcap" + input_base = TEST_DATA_ROOT_DIR / rosbag_dir + output_base = TEST_DATA_ROOT_DIR / "t4_dataset" + test_rosbag_name = "x2_nishi_shinjuku_dynamic_object_msg" + + assert input_base.exists() + + with open(TEST_CONFIG_ROOT_DIR / "convert_synthetic_data.yaml") as f: + param_args = yaml.safe_load(f) + + param_args["conversion"]["input_base"] = str(input_base) + param_args["conversion"]["output_base"] = str(output_base) + + converter_params = Rosbag2ConverterParams( + task=param_args["task"], overwrite_mode=True, **param_args["conversion"] + ) + converter = Rosbag2ToT4Converter(converter_params) + converter.convert() + + # provide a path to converted t4_dataset + yield output_base / test_rosbag_name + + # after test - remove resource + shutil.rmtree(output_base) + + +@pytest.fixture +def sample_annotation(t4_dataset_path): + with (t4_dataset_path / "annotation/sample_annotation.json").open() as f: + sample_annotation = json.load(f) + return sample_annotation + + +@pytest.mark.parametrize("t4_dataset_path", ["db3", "mcap"], indirect=True) +def test_t4_dataset_format(t4_dataset_path): + validate_directory_structure(t4_dataset_path) + + nusc = NuScenes( + version=T4_FORMAT_DIRECTORY_NAME.ANNOTATION.value, + dataroot=t4_dataset_path, + verbose=False, + ) + + validate_format(nusc, t4_dataset_path) + validate_data_hz(nusc) + + +def get_empty(df, col): + return (df.iloc[1:-1][col] == "").index.tolist() + + +@pytest.mark.parametrize("t4_dataset_path", ["db3", "mcap"], indirect=True) +def test_rosbag2_converter_dataset_consistency(sample_annotation): + # First frame doesn't have prev frame + grouped = pd.DataFrame(sample_annotation).groupby("instance_token") + for _, annotations in grouped: + if len(annotations) == 1: + assert annotations.iloc[0]["prev"] == "" + assert annotations.iloc[0]["next"] == "" + continue + + # First frame doesn't have prev frame + assert annotations.iloc[0]["prev"] == "" + assert annotations.iloc[0]["next"] + + # Last frame doesn't have next frame + assert annotations.iloc[-1]["prev"] + assert annotations.iloc[-1]["next"] == "" + + if len(annotations) <= 2: + continue + + # All other frames should have both prev and next + assert ( + annotations.iloc[1:-1]["next"] != "" + ).all(), f'next is empty at indexes {get_empty("next")}' + assert ( + annotations.iloc[1:-1]["prev"] != "" + ).all(), f'prev is empty at indexes {get_empty("prev")}' + + # All other frames should have both prev and next that are not equal + assert ( + annotations.iloc[1:-1]["prev"] != annotations.iloc[1:-1]["next"] + ).all() == True # noqa E712 + + +expected_num_lidar_pts = { + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + 10, + 139, + 12, + 13, + 14, + 11, + 16, + 17, + 15, + 19, + 18, + 21, + 22, + 23, + 152, + 25, + 26, + 27, + 28, + 29, + 24, + 31, + 32, + 33, + 34, + 154, + 36, + 37, + 549, + 38, + 40, + 41, + 42, + 550, + 44, + 45, + 39, + 57, + 570, + 59, + 62, + 65, + 69, + 70, + 71, + 75, + 78, + 80, + 81, + 84, + 86, + 88, + 89, + 219, + 92, + 97, + 227, + 231, + 4456, + 236, + 539, +} + + +@pytest.mark.parametrize("t4_dataset_path", ["db3", "mcap"], indirect=True) +def test_rosbag2_converter_num_lidar_pts(sample_annotation): + num_lidar_pts_list = [r["num_lidar_pts"] for r in sample_annotation] + assert expected_num_lidar_pts.difference(set(num_lidar_pts_list)) == set() diff --git a/tests/rosbag2/test_rosbag2_reader.py b/tests/rosbag2/test_rosbag2_reader.py new file mode 100644 index 00000000..ef939a0e --- /dev/null +++ b/tests/rosbag2/test_rosbag2_reader.py @@ -0,0 +1 @@ +# TODO(yukke42): impl test_rosbag2_reader.py diff --git a/tests/t4_dataset/__init__.py b/tests/t4_dataset/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/t4_dataset/classes/__init__.py b/tests/t4_dataset/classes/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/t4_dataset/classes/test_abstract_class.py b/tests/t4_dataset/classes/test_abstract_class.py new file mode 100644 index 00000000..10d58aa9 --- /dev/null +++ b/tests/t4_dataset/classes/test_abstract_class.py @@ -0,0 +1,162 @@ +import json +import os.path as osp +import tempfile +from typing import Dict, List + +import pytest + +from perception_dataset.constants import EXTENSION_ENUM +from perception_dataset.t4_dataset.classes.abstract_class import AbstractRecord, AbstractTable + + +class AbstractRecordForTest(AbstractRecord): + def __init__(self): + super().__init__() + + def to_dict(self) -> Dict[str, str]: + return {"token": self.token} + + +class AbstractTableForTest(AbstractTable): + FILENAME = "test" + EXTENSION_ENUM.JSON.value + + def __init__(self): + super().__init__() + + def _to_record(self, **kwargs) -> str: + return AbstractRecordForTest() + + +class TestAbstractRecord: + def test_token(self): + AbstractRecord.__abstractmethods__ = set() + rec = AbstractRecord() + + assert isinstance(rec.token, str) + assert rec.token != "" + + def test_to_dict(self): + """test of the impl for test_abstract_table""" + rec = AbstractRecordForTest() + + rec_dict = rec.to_dict() + assert isinstance(rec_dict, dict) + assert rec_dict["token"] != "" + + +class TestAbstractTable: + @pytest.fixture(scope="function") + def record_for_test(self): + return AbstractRecordForTest() + + @pytest.fixture(scope="function") + def table_for_test(self): + return AbstractTableForTest() + + def test___len__(self, table_for_test: AbstractTableForTest): + len_table = len(table_for_test) + assert isinstance(len_table, int) + assert len_table == 0 + + def test_filename(self, table_for_test: AbstractTableForTest): + """test of the impl for TestAbstractTable""" + assert isinstance(table_for_test.FILENAME, str) + assert table_for_test.FILENAME.endswith(".json") + + def test__to_record(self, table_for_test: AbstractTableForTest): + """test of the impl for TestAbstractTable""" + rec = table_for_test._to_record() + assert isinstance(rec, AbstractRecordForTest) + + def test_set_record_to_table( + self, table_for_test: AbstractTableForTest, record_for_test: AbstractRecordForTest + ): + table_for_test.set_record_to_table(record_for_test) + assert len(table_for_test) == 1 + + # check encapsulated value + assert record_for_test.token in table_for_test._token_to_record + assert table_for_test._token_to_record[record_for_test.token] == record_for_test + assert ( + table_for_test._token_to_record[record_for_test.token].token == record_for_test.token + ) + + def test_insert_into_table(self, table_for_test: AbstractTableForTest): + token = table_for_test.insert_into_table() + assert len(table_for_test) == 1 + + # check encapsulated value + assert token in table_for_test._token_to_record + assert isinstance(table_for_test._token_to_record[token], AbstractRecordForTest) + assert table_for_test._token_to_record[token].token == token + + def test_select_record_from_token(self, table_for_test: AbstractTableForTest): + token = table_for_test.insert_into_table() + + record = table_for_test.select_record_from_token(token) + assert isinstance(record, AbstractRecordForTest) + assert record.token == token + + def test_to_data(self, table_for_test: AbstractTableForTest): + token = table_for_test.insert_into_table() + + table_data = table_for_test.to_data() + assert isinstance(table_data, list) + assert all(isinstance(rec, dict) for rec in table_data) + assert table_data[0]["token"] == token + + def test_to_records(self, table_for_test: AbstractTableForTest): + token = table_for_test.insert_into_table() + + table_records = table_for_test.to_records() + assert isinstance(table_records, list) + assert all(isinstance(rec, AbstractRecordForTest) for rec in table_records) + assert table_records[0].token == token + + def test_save_json(self, table_for_test: AbstractTableForTest): + token = table_for_test.insert_into_table() + + with tempfile.TemporaryDirectory() as tmp_dir: + tmp_json_file = osp.join(tmp_dir, table_for_test.FILENAME) + table_for_test.save_json(tmp_dir) + + assert osp.exists(tmp_json_file) + + with open(tmp_json_file) as f: + json_data = json.load(f) + + assert isinstance(json_data, list) + assert isinstance(json_data[0], dict) + assert isinstance(json_data[0]["token"], str) + assert len(json_data) == 1 + assert json_data[0]["token"] == token + + def test_for_multiple_records(self, table_for_test: AbstractTableForTest): + NUM_TEST_RECORD = 3 + token_list: List[str] = [] + + for i in range(NUM_TEST_RECORD): + # test insert_into_table() + token = table_for_test.insert_into_table() + token_list.append(token) + assert isinstance(token, str) + + # test __len__() + assert len(table_for_test) == i + 1 + + # test select_record_from_token() + rec = table_for_test.select_record_from_token(token) + assert isinstance(rec, AbstractRecordForTest) + assert rec.token == token + + # test to_data() + table_data = table_for_test.to_data() + assert isinstance(table_data, list) + assert isinstance(table_data[i], dict) + assert table_data[i]["token"] == token + + # test to_data() of all records + table_data = table_for_test.to_data() + assert all(isinstance(rec, dict) for rec in table_data) + for i in range(NUM_TEST_RECORD): + assert table_data[i]["token"] == token_list[i] diff --git a/tests/t4_dataset/classes/test_attribute.py b/tests/t4_dataset/classes/test_attribute.py new file mode 100644 index 00000000..761e7000 --- /dev/null +++ b/tests/t4_dataset/classes/test_attribute.py @@ -0,0 +1,61 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.attribute import AttributeRecord, AttributeTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "name": "test_attribute_name", + "description": "the description of the attribute", + } + return d + + +class TestAttributeRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return AttributeRecord(**record_data) + + def test_to_dict(self, record_for_test: AttributeRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["name"], str) + assert isinstance(rec_dict["description"], str) + assert rec_dict["name"] == record_data["name"] + assert rec_dict["description"] == record_data["description"] + + +class TestAttributeTable: + # TODO(yukke42): impl TestAttributeTable with name_to_description + @pytest.fixture(scope="function") + def table_for_test(self): + return AttributeTable(name_to_description={}, default_value="") + + def test_filename(self, table_for_test: AttributeTable): + assert table_for_test.FILENAME == "attribute.json" + + def test__to_record(self, table_for_test: AttributeTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, AttributeRecord) + + def test_get_token_from_name(self, table_for_test: AttributeTable): + # TODO(yukke42): impl test_get_token_from_name with name_to_description + token1 = table_for_test.get_token_from_name(name="car.moving") + assert isinstance(token1, str) + assert token1 != "" + + # same token + token2 = table_for_test.get_token_from_name(name="car.moving") + assert isinstance(token2, str) + assert token2 != "" + assert token2 == token1 + + # different token + token3 = table_for_test.get_token_from_name(name="car.parked") + assert isinstance(token3, str) + assert token3 != "" + assert token3 != token1 diff --git a/tests/t4_dataset/classes/test_calibrated_sensor.py b/tests/t4_dataset/classes/test_calibrated_sensor.py new file mode 100644 index 00000000..599b273c --- /dev/null +++ b/tests/t4_dataset/classes/test_calibrated_sensor.py @@ -0,0 +1,73 @@ +from typing import Any, Dict + +import numpy as np +import pytest + +from perception_dataset.t4_dataset.classes.calibrated_sensor import ( + CalibratedSensorRecord, + CalibratedSensorTable, +) + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "sensor_token": "sensor_token_xxx", + "translation": {"x": 1.0, "y": 2.0, "z": 3.0}, + "rotation": {"w": 10.0, "x": 20.0, "y": 30.0, "z": 40.0}, + "camera_intrinsic": [], + "camera_distortion": [], + } + return d + + +class TestCalibratedSensorRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return CalibratedSensorRecord(**record_data) + + def test_to_dict(self, record_for_test: CalibratedSensorRecord, record_data: Dict[str, Any]): + translation_list = [ + record_data["translation"]["x"], + record_data["translation"]["y"], + record_data["translation"]["z"], + ] + rotation_list = [ + record_data["rotation"]["w"], + record_data["rotation"]["x"], + record_data["rotation"]["y"], + record_data["rotation"]["z"], + ] + + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["sensor_token"], str) + assert isinstance(rec_dict["translation"], list) + assert any(isinstance(t, float) for t in rec_dict["translation"]) + assert isinstance(rec_dict["rotation"], list) + assert any(isinstance(t, float) for t in rec_dict["rotation"]) + assert isinstance(rec_dict["camera_intrinsic"], list) + assert isinstance(rec_dict["camera_distortion"], list) + + assert rec_dict["sensor_token"] == record_data["sensor_token"] + assert rec_dict["translation"] == translation_list + assert rec_dict["rotation"] == rotation_list + assert len(rec_dict["camera_intrinsic"]) == 0 or np.array( + rec_dict["camera_intrinsic"] + ).shape == (3, 3) + assert len(rec_dict["camera_distortion"]) == 0 or len(rec_dict["camera_distortion"]) == 5 + # TODO(yukke42): add test value of camera_intrinsic and camera_distortion + + +class TestCalibratedSensorTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return CalibratedSensorTable() + + def test_filename(self, table_for_test: CalibratedSensorTable): + assert table_for_test.FILENAME == "calibrated_sensor.json" + + def test__to_record(self, table_for_test: CalibratedSensorTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, CalibratedSensorRecord) diff --git a/tests/t4_dataset/classes/test_category.py b/tests/t4_dataset/classes/test_category.py new file mode 100644 index 00000000..5395eca2 --- /dev/null +++ b/tests/t4_dataset/classes/test_category.py @@ -0,0 +1,61 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.category import CategoryRecord, CategoryTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "name": "test_category_name", + "description": "the description of the category", + } + return d + + +class TestCategoryRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return CategoryRecord(**record_data) + + def test_to_dict(self, record_for_test: CategoryRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["name"], str) + assert isinstance(rec_dict["description"], str) + assert rec_dict["name"] == record_data["name"] + assert rec_dict["description"] == record_data["description"] + + +class TestCategoryTable: + # TODO(yukke42): impl TestCategoryTable with name_to_description + @pytest.fixture(scope="function") + def table_for_test(self): + return CategoryTable(name_to_description={}, default_value="") + + def test_filename(self, table_for_test: CategoryTable): + assert table_for_test.FILENAME == "category.json" + + def test__to_record(self, table_for_test: CategoryTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, CategoryRecord) + + def test_get_token_from_name(self, table_for_test: CategoryTable): + # TODO(yukke42): impl test_get_token_from_name with description + token1 = table_for_test.get_token_from_name(name="car") + assert isinstance(token1, str) + assert token1 != "" + + # same token + token2 = table_for_test.get_token_from_name(name="car") + assert isinstance(token2, str) + assert token2 != "" + assert token2 == token1 + + # different token + token3 = table_for_test.get_token_from_name(name="pedestrian") + assert isinstance(token3, str) + assert token3 != "" + assert token3 != token1 diff --git a/tests/t4_dataset/classes/test_ego_pose.py b/tests/t4_dataset/classes/test_ego_pose.py new file mode 100644 index 00000000..5173f01f --- /dev/null +++ b/tests/t4_dataset/classes/test_ego_pose.py @@ -0,0 +1,60 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.ego_pose import EgoPoseRecord, EgoPoseTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "translation": {"x": 1.0, "y": 2.0, "z": 3.0}, + "rotation": {"w": 10.0, "x": 20.0, "y": 30.0, "z": 40.0}, + "timestamp": 123456789, + } + return d + + +class TestEgoPoseRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return EgoPoseRecord(**record_data) + + def test_to_dict(self, record_for_test: EgoPoseRecord, record_data: Dict[str, Any]): + translation_list = [ + record_data["translation"]["x"], + record_data["translation"]["y"], + record_data["translation"]["z"], + ] + rotation_list = [ + record_data["rotation"]["w"], + record_data["rotation"]["x"], + record_data["rotation"]["y"], + record_data["rotation"]["z"], + ] + + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["translation"], list) + assert any(isinstance(t, float) for t in rec_dict["translation"]) + assert isinstance(rec_dict["rotation"], list) + assert any(isinstance(t, float) for t in rec_dict["rotation"]) + assert isinstance(rec_dict["timestamp"], int) + + assert rec_dict["translation"] == translation_list + assert rec_dict["rotation"] == rotation_list + assert rec_dict["timestamp"] == record_data["timestamp"] + + +class TestEgoPoseTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return EgoPoseTable() + + def test_filename(self, table_for_test: EgoPoseTable): + assert table_for_test.FILENAME == "ego_pose.json" + + def test__to_record(self, table_for_test: EgoPoseTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, EgoPoseRecord) diff --git a/tests/t4_dataset/classes/test_instance.py b/tests/t4_dataset/classes/test_instance.py new file mode 100644 index 00000000..4e70c91b --- /dev/null +++ b/tests/t4_dataset/classes/test_instance.py @@ -0,0 +1,81 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.instance import InstanceRecord, InstanceTable + + +@pytest.fixture(scope="function") +def record_data(): + d = { + "category_token": "category_token_xxxxx", + } + return d + + +class TestInstanceRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return InstanceRecord(**record_data) + + def test_to_dict(self, record_for_test: InstanceRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["category_token"], str) + assert rec_dict["category_token"] == record_data["category_token"] + + def test_set_annotation_info(self, record_for_test: InstanceRecord): + nbr_annotations = 100 + first_annotation_token = "first_annotation_token_xxxxx" + last_annotation_token = "last_annotation_token_xxxxx" + + record_for_test.set_annotation_info( + nbr_annotations=nbr_annotations, + first_annotation_token=first_annotation_token, + last_annotation_token=last_annotation_token, + ) + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict["nbr_annotations"], int) + assert isinstance(rec_dict["first_annotation_token"], str) + assert isinstance(rec_dict["last_annotation_token"], str) + assert rec_dict["nbr_annotations"] == nbr_annotations + assert rec_dict["first_annotation_token"] == first_annotation_token + assert rec_dict["last_annotation_token"] == last_annotation_token + + +class TestInstanceTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return InstanceTable() + + def test_filename(self, table_for_test: InstanceTable): + assert table_for_test.FILENAME == "instance.json" + + def test__to_record(self, table_for_test: InstanceTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, InstanceRecord) + + def test_get_token_from_id(self, table_for_test: InstanceTable): + dataset_name: str = "dataset_name_xxxxx" + token1 = table_for_test.get_token_from_id( + instance_id="car1", category_token="car_xxxxx", dataset_name=dataset_name + ) + assert isinstance(token1, str) + assert token1 != "" + + # same token + token2 = table_for_test.get_token_from_id( + instance_id="car1", category_token="car_xxxxx", dataset_name=dataset_name + ) + assert isinstance(token2, str) + assert token2 != "" + assert token2 == token1 + + # different token + token3 = table_for_test.get_token_from_id( + instance_id="car2", category_token="car_xxxxx", dataset_name=dataset_name + ) + assert isinstance(token3, str) + assert token3 != "" + assert token3 != token1 diff --git a/tests/t4_dataset/classes/test_log.py b/tests/t4_dataset/classes/test_log.py new file mode 100644 index 00000000..44dbd08b --- /dev/null +++ b/tests/t4_dataset/classes/test_log.py @@ -0,0 +1,48 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.log import LogRecord, LogTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "logfile": "path_to_logfile", + "vehicle": "vehicle_name", + "data_captured": "2020-01-01-00-00-00", + "location": "log_location", + } + return d + + +class TestLogRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return LogRecord(**record_data) + + def test_to_dict(self, record_for_test: LogRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["logfile"], str) + assert isinstance(rec_dict["vehicle"], str) + assert isinstance(rec_dict["data_captured"], str) + assert isinstance(rec_dict["location"], str) + + for key, value in record_data.items(): + assert rec_dict[key] == value + + +class TestLogTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return LogTable() + + def test_filename(self, table_for_test: LogTable): + assert table_for_test.FILENAME == "log.json" + + def test__to_record(self, table_for_test: LogTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, LogRecord) diff --git a/tests/t4_dataset/classes/test_map.py b/tests/t4_dataset/classes/test_map.py new file mode 100644 index 00000000..17e41fea --- /dev/null +++ b/tests/t4_dataset/classes/test_map.py @@ -0,0 +1,47 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.map import MapRecord, MapTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "log_tokens": ["log_token_xxx"], + "category": "map category", + "filename": "map_filename", + } + return d + + +class TestMapRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return MapRecord(**record_data) + + def test_to_dict(self, record_for_test: MapRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["log_tokens"], list) + assert any(isinstance(log, str) for log in rec_dict["log_tokens"]) + assert isinstance(rec_dict["category"], str) + assert isinstance(rec_dict["category"], str) + + for key, value in record_data.items(): + assert rec_dict[key] == value, key + + +class TestMapTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return MapTable() + + def test_filename(self, table_for_test: MapTable): + assert table_for_test.FILENAME == "map.json" + + def test__to_record(self, table_for_test: MapTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, MapRecord) diff --git a/tests/t4_dataset/classes/test_sample.py b/tests/t4_dataset/classes/test_sample.py new file mode 100644 index 00000000..2b3860a5 --- /dev/null +++ b/tests/t4_dataset/classes/test_sample.py @@ -0,0 +1,51 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.sample import SampleRecord, SampleTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "timestamp": 123456789, + "scene_token": "scene_token_xxx", + "next_token": "next_token_xxx", + "prev_token": "prev_token_xxx", + } + return d + + +class TestSampleRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return SampleRecord(**record_data) + + def test_to_dict(self, record_for_test: SampleRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["timestamp"], int) + assert isinstance(rec_dict["scene_token"], str) + assert isinstance(rec_dict["next"], str) + assert isinstance(rec_dict["prev"], str) + + for key, value in record_data.items(): + if key == "next_token" or key == "prev_token": + key = key.replace("_token", "") + continue + assert rec_dict[key] == value, key + + +class TestSampleTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return SampleTable() + + def test_filename(self, table_for_test: SampleTable): + assert table_for_test.FILENAME == "sample.json" + + def test__to_record(self, table_for_test: SampleTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, SampleRecord) diff --git a/tests/t4_dataset/classes/test_sample_annotation.py b/tests/t4_dataset/classes/test_sample_annotation.py new file mode 100644 index 00000000..73c59bc4 --- /dev/null +++ b/tests/t4_dataset/classes/test_sample_annotation.py @@ -0,0 +1,95 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.sample_annotation import ( + SampleAnnotationRecord, + SampleAnnotationTable, +) + + +@pytest.fixture(scope="function") +def record_data(): + d = { + "sample_token": "sample_token_xxxxx", + "instance_token": "instance_token_xxxxx", + "attribute_tokens": ["attribute_token_xxxxx", "attribute_token_yyyyy"], + "visibility_token": "visibility_token_xxxxx", + "translation": {"x": 1.0, "y": 2.0, "z": 3.0}, + "size": {"width": 10.0, "length": 20.0, "height": 30.0}, + "rotation": {"w": 100.0, "x": 200.0, "y": 300.0, "z": 400.0}, + "num_lidar_pts": 1000, + "num_radar_pts": 2000, + } + return d + + +class TestSampleAnnotationRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return SampleAnnotationRecord(**record_data) + + def text_next(self, record_for_test: SampleAnnotationRecord): + next_token = "next_token_xxxxx" + record_for_test.next = next_token + assert record_for_test.next == next_token + + def text_prev(self, record_for_test: SampleAnnotationRecord): + prev_token = "prev_token_xxxxx" + record_for_test.prev = prev_token + assert record_for_test.prev == prev_token + + def test_to_dict(self, record_for_test: SampleAnnotationRecord, record_data: Dict[str, Any]): + translation_list = [ + record_data["translation"]["x"], + record_data["translation"]["y"], + record_data["translation"]["z"], + ] + size_list = [ + record_data["size"]["width"], + record_data["size"]["length"], + record_data["size"]["height"], + ] + rotation_list = [ + record_data["rotation"]["w"], + record_data["rotation"]["x"], + record_data["rotation"]["y"], + record_data["rotation"]["z"], + ] + + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["sample_token"], str) + assert isinstance(rec_dict["instance_token"], str) + assert isinstance(rec_dict["attribute_tokens"], list) + assert all(isinstance(token, str) for token in rec_dict["attribute_tokens"]) + assert isinstance(rec_dict["visibility_token"], str) + for key in ["translation", "size", "rotation"]: + assert isinstance(rec_dict[key], list) + assert all(isinstance(v, float) for v in rec_dict[key]) + assert isinstance(rec_dict["num_lidar_pts"], int) + assert isinstance(rec_dict["num_radar_pts"], int) + + assert rec_dict["sample_token"] == record_data["sample_token"] + assert rec_dict["instance_token"] == record_data["instance_token"] + assert rec_dict["attribute_tokens"] == record_data["attribute_tokens"] + assert rec_dict["visibility_token"] == record_data["visibility_token"] + assert rec_dict["translation"] == translation_list + assert rec_dict["size"] == size_list + assert rec_dict["rotation"] == rotation_list + assert rec_dict["num_lidar_pts"] == record_data["num_lidar_pts"] + assert rec_dict["num_radar_pts"] == record_data["num_radar_pts"] + + +class TestSampleAnnotationTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return SampleAnnotationTable() + + def test_filename(self, table_for_test: SampleAnnotationTable): + assert table_for_test.FILENAME == "sample_annotation.json" + + def test__to_record(self, table_for_test: SampleAnnotationTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, SampleAnnotationRecord) diff --git a/tests/t4_dataset/classes/test_sample_data.py b/tests/t4_dataset/classes/test_sample_data.py new file mode 100644 index 00000000..79e5b702 --- /dev/null +++ b/tests/t4_dataset/classes/test_sample_data.py @@ -0,0 +1,66 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.sample_data import SampleDataRecord, SampleDataTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "sample_token": "sample_token_xxx", + "ego_pose_token": "ego_pose_token_xxx", + "calibrated_sensor_token": "calibrated_sensor_token_xxx", + "filename": "data/LIDAR_TOP/0.pcd.bin", + "fileformat": "pcd.bin", + "width": 100, + "height": 200, + "timestamp": 123456789, + "is_key_frame": True, + "next_token": "next_token_xxx", + "prev_token": "prev_token_xxx", + "is_valid": True, + } + return d + + +class TestSampleDataRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return SampleDataRecord(**record_data) + + def test_to_dict(self, record_for_test: SampleDataRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["sample_token"], str) + assert isinstance(rec_dict["ego_pose_token"], str) + assert isinstance(rec_dict["calibrated_sensor_token"], str) + assert isinstance(rec_dict["filename"], str) + assert isinstance(rec_dict["fileformat"], str) + assert isinstance(rec_dict["width"], int) + assert isinstance(rec_dict["height"], int) + assert isinstance(rec_dict["timestamp"], int) + assert isinstance(rec_dict["is_key_frame"], bool) + assert isinstance(rec_dict["next"], str) + assert isinstance(rec_dict["prev"], str) + + for key, value in record_data.items(): + if key == "next_token" or key == "prev_token": + key = key.replace("_token", "") + continue + assert rec_dict[key] == value, key + + +class TestSampleDataTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return SampleDataTable() + + def test_filename(self, table_for_test: SampleDataTable): + assert table_for_test.FILENAME == "sample_data.json" + + def test__to_record(self, table_for_test: SampleDataTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, SampleDataRecord) diff --git a/tests/t4_dataset/classes/test_scene.py b/tests/t4_dataset/classes/test_scene.py new file mode 100644 index 00000000..620ed32a --- /dev/null +++ b/tests/t4_dataset/classes/test_scene.py @@ -0,0 +1,52 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.scene import SceneRecord, SceneTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "name": "the name of scene", + "description": "the description of scene", + "log_token": "log_token_xxx", + "nbr_samples": 10, + "first_sample_token": "first_sample_token_xxx", + "last_sample_token": "last_sample_token_xxx", + } + return d + + +class TestSceneRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return SceneRecord(**record_data) + + def test_to_dict(self, record_for_test: SceneRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["name"], str) + assert isinstance(rec_dict["description"], str) + assert isinstance(rec_dict["log_token"], str) + assert isinstance(rec_dict["nbr_samples"], int) + assert isinstance(rec_dict["first_sample_token"], str) + assert isinstance(rec_dict["last_sample_token"], str) + + for key, value in record_data.items(): + assert rec_dict[key] == value, key + + +class TestSceneTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return SceneTable() + + def test_filename(self, table_for_test: SceneTable): + assert table_for_test.FILENAME == "scene.json" + + def test__to_record(self, table_for_test: SceneTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, SceneRecord) diff --git a/tests/t4_dataset/classes/test_sensor.py b/tests/t4_dataset/classes/test_sensor.py new file mode 100644 index 00000000..ec3b3a3b --- /dev/null +++ b/tests/t4_dataset/classes/test_sensor.py @@ -0,0 +1,48 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.sensor import SensorRecord, SensorTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "channel": "LIDAR_TOP", + "modality": "lidar", + } + return d + + +class TestSensorRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return SensorRecord(**record_data) + + def test_to_dict(self, record_for_test: SensorRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["token"], str) + assert isinstance(rec_dict["channel"], str) + assert isinstance(rec_dict["modality"], str) + + for key, value in record_data.items(): + assert rec_dict[key] == value, key + + +class TestSceneTable: + @pytest.fixture(scope="function") + def table_for_test(self): + return SensorTable(channel_to_modality={"LIDAR_TOP": "lidar"}) + + def test_filename(self, table_for_test: SensorTable): + assert table_for_test.FILENAME == "sensor.json" + + def test__to_record(self, table_for_test: SensorTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, SensorRecord) + + def test_get_token_from_channel(self, table_for_test: SensorTable): + # TODO(yukke42): impl test_get_token_from_channel + pass diff --git a/tests/t4_dataset/classes/test_visibility.py b/tests/t4_dataset/classes/test_visibility.py new file mode 100644 index 00000000..8d4e2321 --- /dev/null +++ b/tests/t4_dataset/classes/test_visibility.py @@ -0,0 +1,60 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.classes.visibility import VisibilityRecord, VisibilityTable + + +@pytest.fixture(scope="function", autouse=True) +def record_data(): + d = { + "level": "test_visibility_level", + "description": "the description of the visibility", + } + return d + + +class TestAttributeRecord: + @pytest.fixture(scope="function") + def record_for_test(self, record_data): + return VisibilityRecord(**record_data) + + def test_to_dict(self, record_for_test: VisibilityRecord, record_data: Dict[str, Any]): + rec_dict = record_for_test.to_dict() + assert isinstance(rec_dict, dict) + assert isinstance(rec_dict["level"], str) + assert isinstance(rec_dict["description"], str) + assert rec_dict["level"] == record_data["level"] + assert rec_dict["description"] == record_data["description"] + + +class TestAttributeTable: + # TODO(yukke42): impl TestAttributeTable with level_to_description + @pytest.fixture(scope="function") + def table_for_test(self): + return VisibilityTable(level_to_description={}, default_value="") + + def test_filename(self, table_for_test: VisibilityTable): + assert table_for_test.FILENAME == "visibility.json" + + def test__to_record(self, table_for_test: VisibilityTable, record_data: Dict[str, Any]): + record = table_for_test._to_record(**record_data) + assert isinstance(record, VisibilityRecord) + + def test_get_token_from_level(self, table_for_test: VisibilityTable): + # TODO(yukke42): impl test_get_token_from_level with level_to_description + token1 = table_for_test.get_token_from_level(level="v0-40") + assert isinstance(token1, str) + assert token1 != "" + + # same token + token2 = table_for_test.get_token_from_level(level="v0-40") + assert isinstance(token2, str) + assert token2 != "" + assert token2 == token1 + + # different token + token3 = table_for_test.get_token_from_level(level="v40-60") + assert isinstance(token3, str) + assert token3 != "" + assert token3 != token1 diff --git a/tests/t4_dataset/test_annotation_files_generator.py b/tests/t4_dataset/test_annotation_files_generator.py new file mode 100644 index 00000000..83da50bd --- /dev/null +++ b/tests/t4_dataset/test_annotation_files_generator.py @@ -0,0 +1,369 @@ +from typing import Any, Dict + +import pytest + +from perception_dataset.t4_dataset.annotation_files_generator import AnnotationFilesGenerator + + +def _default_three_d_box() -> Dict[str, Dict[str, float]]: + box = { + "translation": { + "x": 1.0, + "y": 2.0, + "z": 3.0, + }, + "size": { + "width": 10.0, + "length": 20.0, + "height": 30.0, + }, + "rotation": { + "w": 0.0, + "x": 0.0, + "y": 0.0, + "z": 0.0, + }, + } + + return box + + +# Note: test case1, case2 use the same scene_anno_dict +class TestAnnotationFilesGenerator: + @pytest.fixture(scope="function") + def instance_for_test(self): + description = { + "visibility": { + "full": "No occlusion of the object.", + "most": "Object is occluded, but by less than 50%.", + "partial": "The object is occluded by more than 50% (but not completely).", + "none": "The object is 90-100% occluded and no points/pixels are visible in the label.", + }, + "camera_index": { + "CAM_FRONT": 0, + "CAM_FRONT_RIGHT": 1, + "CAM_BACK_RIGHT": 2, + "CAM_BACK": 3, + "CAM_BACK_LEFT": 4, + "CAM_FRONT_LEFT": 5, + }, + } + return AnnotationFilesGenerator(description=description) + + @pytest.mark.parametrize( + "scene_anno_dict, frame_index_to_sample_token, expected_values", + [ + # test case 1 (3 sample, 1 instance) + ( + # scene_anno_dict + { + 0: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + 1: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + 2: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + }, + # frame_index_to_sample_token + { + 0: "0_xxx", + 1: "1_xxx", + 2: "2_xxx", + }, + # expected values + { + "attribute": { + "len": 1, + "names": ["attr_xxx"], + }, + "category": { + "len": 1, + "names": ["name_xxx"], + }, + "sample_annotation": { + "len": 3, + }, + "instance": { + "len": 1, + "nbr_annotations": [3], + }, + }, + ), + # test case 2 (3 sample, 2 instance) + ( + # scene_anno_dict + { + 0: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + { + "category_name": "name_yyy", + "instance_id": "id_yyy", + "attribute_names": ["attr_yyy", "attr_zzz"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + 1: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + }, + # frame_index_to_sample_token + { + 0: "0_xxx", + 1: "1_xxx", + }, + # expected values + { + "attribute": { + "len": 3, + "names": ["attr_xxx", "attr_yyy", "attr_zzz"], + }, + "category": { + "len": 2, + "names": ["name_xxx", "name_yyy"], + }, + "sample_annotation": { + "len": 3, + }, + "instance": { + "len": 2, + }, + }, + ), + ], + ) + def test__convert_to_t4_format( + self, + mocker, + scope_function, + scene_anno_dict: Dict[int, Dict[str, Any]], + frame_index_to_sample_token: Dict[int, str], + expected_values: Dict[str, Any], + instance_for_test: AnnotationFilesGenerator, + ): + instance_for_test._convert_to_t4_format( + scene_anno_dict=scene_anno_dict, + frame_index_to_sample_token=frame_index_to_sample_token, + dataset_name="test_dataset", + # for 2d annotations but not used in this test case + frame_index_to_sample_data_token=[], + mask=[], + ) + + # check encapsulated value + attr_data = instance_for_test._attribute_table.to_data() + assert len(attr_data) == expected_values["attribute"]["len"] + assert [d["name"] for d in attr_data] == expected_values["attribute"]["names"] + + category_data = instance_for_test._category_table.to_data() + assert len(category_data) == expected_values["category"]["len"] + assert [d["name"] for d in category_data] == expected_values["category"]["names"] + + annotation_data = instance_for_test._sample_annotation_table.to_data() + assert len(annotation_data) == expected_values["sample_annotation"]["len"] + + instance_data = instance_for_test._instance_table.to_data() + assert len(instance_data) == expected_values["instance"]["len"] + + @pytest.mark.parametrize( + "scene_anno_dict, frame_index_to_sample_token, expected_values", + [ + # test case 1 (3 sample, 1 instance) + ( + # scene_anno_dict + { + 0: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + 1: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + 2: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + }, + # frame_index_to_sample_token + { + 0: "0_xxx", + 1: "1_xxx", + 2: "2_xxx", + }, + # expected values + { + "sample_annotation": { + "connection_indices_list": [ + [0, 1, 2], + ], + }, + "instance": { + "nbr_annotations": [3], + "first_token_indices": [0], + "last_token_indices": [2], + }, + }, + ), + # test case 2 (3 sample, 2 instance) + ( + # scene_anno_dict + { + 0: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + { + "category_name": "name_yyy", + "instance_id": "id_yyy", + "attribute_names": ["attr_yyy", "attr_zzz"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + 1: [ + { + "category_name": "name_xxx", + "instance_id": "id_xxx", + "attribute_names": ["attr_xxx"], + "three_d_box": _default_three_d_box(), + "num_lidar_pts": 0, + "num_radar_pts": 0, + }, + ], + }, + # frame_index_to_sample_token + { + 0: "0_xxx", + 1: "1_xxx", + }, + # expected values + { + "sample_annotation": { + "connection_indices_list": [ + [0, 2], + [1], + ], + }, + "instance": { + "nbr_annotations": [2, 1], + "first_token_indices": [0, 1], + "last_token_indices": [2, 1], + }, + }, + ), + ], + ) + def test__connect_annotations_in_scene( + self, + mocker, + scope_function, + scene_anno_dict: Dict[int, Dict[str, Any]], + frame_index_to_sample_token: Dict[int, str], + expected_values: Dict[str, Any], + instance_for_test: AnnotationFilesGenerator, + ): + instance_for_test._convert_to_t4_format( + scene_anno_dict=scene_anno_dict, + frame_index_to_sample_token=frame_index_to_sample_token, + dataset_name="test_dataset", + # for 2d annotations but not used in this test case + frame_index_to_sample_data_token=[], + mask=[], + ) + instance_for_test._connect_annotations_in_scene() + + # check encapsulated value + anno_data = instance_for_test._sample_annotation_table.to_data() + for connect_indices in expected_values["sample_annotation"]["connection_indices_list"]: + for ci in range(len(connect_indices)): + sample_i = connect_indices[ci] + if ci == 0: + # the first token + assert anno_data[sample_i]["prev"] == "" + else: + prev_sample_i = connect_indices[ci - 1] + assert anno_data[sample_i]["prev"] == anno_data[prev_sample_i]["token"] + + if ci == len(connect_indices) - 1: + # the last token + assert anno_data[sample_i]["next"] == "" + else: + next_sample_i = connect_indices[ci + 1] + assert anno_data[sample_i]["next"] == anno_data[next_sample_i]["token"] + + expected_first_tokens = [ + anno_data[i]["token"] for i in expected_values["instance"]["first_token_indices"] + ] + expected_last_tokens = [ + anno_data[i]["token"] for i in expected_values["instance"]["last_token_indices"] + ] + instance_data = instance_for_test._instance_table.to_data() + assert [d["nbr_annotations"] for d in instance_data] == expected_values["instance"][ + "nbr_annotations" + ] + assert [d["first_annotation_token"] for d in instance_data] == expected_first_tokens + assert [d["last_annotation_token"] for d in instance_data] == expected_last_tokens diff --git a/tests/utils/test_gen_tokens.py b/tests/utils/test_gen_tokens.py new file mode 100644 index 00000000..2a79ea3e --- /dev/null +++ b/tests/utils/test_gen_tokens.py @@ -0,0 +1,28 @@ +import pytest + +from perception_dataset.utils.gen_tokens import generate_token + + +def test_generate_token(): + assert type(generate_token(16, "bytes")) == bytes + assert len(generate_token(16, "bytes")) == 16 + assert len(generate_token(32, "bytes")) == 32 + assert generate_token(16, "bytes") != generate_token(16, "bytes") + + assert type(generate_token(16, "hex")) == str + assert len(generate_token(16, "hex")) == 32 + assert len(generate_token(32, "hex")) == 64 + assert generate_token(16, "hex") != generate_token(16, "hex") + + assert type(generate_token(16, "urlsafe")) == str + assert len(generate_token(16, "urlsafe")) == 22 + assert len(generate_token(32, "urlsafe")) == 43 + assert generate_token(16, "urlsafe") != generate_token(16, "urlsafe") + + with pytest.raises(ValueError) as e: + generate_token(16, "noexist") + e.match("Invalid argument 'mode'='noexist'") + + with pytest.raises(ValueError) as e: + generate_token(15, "hex") + e.match("nbytes 15 is too short. Give >= 16.") diff --git a/tests/utils/test_misc.py b/tests/utils/test_misc.py new file mode 100644 index 00000000..7f44de85 --- /dev/null +++ b/tests/utils/test_misc.py @@ -0,0 +1,13 @@ +def test_unix_timestamp_to_nusc_timestamp(): + # TODO(yukke42): impl test_unix_timestamp_to_nusc_timestamp + pass + + +def test_nusc_timestamp_to_unix_timestamp(): + # TODO(yukke42): impl test_unix_timestamp_to_nusc_timestamp + pass + + +def test_get_sample_data_filename(): + # TODO(yukke42): impl test_unix_timestamp_to_nusc_timestamp + pass diff --git a/tests/utils/test_rosbag2.py b/tests/utils/test_rosbag2.py new file mode 100644 index 00000000..b88473d8 --- /dev/null +++ b/tests/utils/test_rosbag2.py @@ -0,0 +1,23 @@ +def test_pointcloud_msg_to_numpy(): + # TODO(yukke42): impl test_pointcloud_msg_to_numpy + pass + + +def test_compressed_msg_to_numpy(): + # TODO(yukke42): impl test_compressed_msg_to_numpy + pass + + +def test_stamp_to_unix_timestamp(): + # TODO(yukke42): impl test_stamp_to_unix_timestamp + pass + + +def test_unix_timestamp_to_stamp(): + # TODO(yukke42): impl test_unix_timestamp_to_stamp + pass + + +def test_stamp_to_nusc_timestamp(): + # TODO(yukke42): impl test_stamp_to_nusc_timestamp + pass