diff --git a/.github/workflows/test_packages.yml b/.github/workflows/test_packages.yml
index 29c2511dc6879259e054508a5118a08550320c24..147a0d96022a460ea697bbdb5f22fca24fffbba4 100644
--- a/.github/workflows/test_packages.yml
+++ b/.github/workflows/test_packages.yml
@@ -7,21 +7,17 @@ on:
   schedule:
     - cron:  '0 23 * * *'
 
+concurrency:
+  group: ${{ github.workflow }}-${{ github.ref }}
+  cancel-in-progress: true
+
 defaults:
   run:
     shell: bash
 
 
 jobs:
-  cleanup-runs:
-    if: ${{ contains(github.event.pull_request.labels.*.name, 'test packages') || github.event_name == 'schedule' }}
-    runs-on: ubuntu-latest
-    steps:
-    - uses: rokroskar/workflow-run-cleanup-action@master
-      env:
-        GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
   test-wheel:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test packages') || github.event_name == 'schedule' }}
     strategy:
       matrix:
@@ -43,15 +39,16 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
           - perception/facial_expression_recognition
-          # - perception/object_detection_3d
+          - perception/object_detection_3d
           # - control/mobile_manipulation
           # - simulation/human_model_generation
           # - control/single_demo_grasp
-          # - perception/object_tracking_3d
+          - perception/object_tracking_3d
     runs-on: ${{ matrix.os }}
     steps:
     - uses: actions/checkout@v2
@@ -70,7 +67,6 @@ jobs:
         pip install opendr-toolkit
         python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
   test-docker:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test packages') || github.event_name == 'schedule' }}
     strategy:
       matrix:
@@ -92,6 +88,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -100,7 +97,7 @@ jobs:
           - control/mobile_manipulation
           - simulation/human_model_generation
           - control/single_demo_grasp
-          # - perception/object_tracking_3d
+          - perception/object_tracking_3d
     runs-on: ${{ matrix.os }}
     steps:
     - name: Set up Python 3.8
@@ -109,6 +106,6 @@ jobs:
         python-version: 3.8
     - name: Test Docker
       run: |
-        docker run --name toolkit -i opendr/opendr-toolkit:cpu_latest bash
+        docker run --name toolkit -i opendr/opendr-toolkit:cpu_v1.1.1 bash
         docker start toolkit
         docker exec -i toolkit bash -c "source bin/activate.sh && source tests/sources/tools/control/mobile_manipulation/run_ros.sh && python -m unittest discover -s tests/sources/tools/${{ matrix.package }}"
diff --git a/.github/workflows/tests_suite.yml b/.github/workflows/tests_suite.yml
index 1a4e252b042155ab8370532f12f8712bec1a2d13..ea7167e86de561a7a6ba202782048ada279cc09f 100644
--- a/.github/workflows/tests_suite.yml
+++ b/.github/workflows/tests_suite.yml
@@ -6,20 +6,16 @@ on:
   schedule:
     - cron:  '0 23 * * *'
 
+concurrency:
+  group: ${{ github.workflow }}-${{ github.ref }}
+  cancel-in-progress: true
+
 defaults:
   run:
     shell: bash
 
 jobs:
-  cleanup-runs:
-    if: ${{ contains(github.event.pull_request.labels.*.name, 'test sources') || contains(github.event.pull_request.labels.*.name, 'test tools') || contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
-    runs-on: ubuntu-latest
-    steps:
-    - uses: rokroskar/workflow-run-cleanup-action@master
-      env:
-        GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
   test-sources:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test sources') || github.event_name == 'schedule' }}
     strategy:
       matrix:
@@ -51,9 +47,9 @@ jobs:
         pip install -r tests/requirements.txt
         python -m unittest discover -s tests
   test-tools:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test tools') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -76,6 +72,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -115,7 +112,6 @@ jobs:
             python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
         fi
   build-wheel:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     runs-on: ubuntu-20.04
     steps:
@@ -140,7 +136,6 @@ jobs:
         path:
           dist/*.tar.gz
   build-docker:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     runs-on: ubuntu-20.04
     steps:
@@ -164,6 +159,7 @@ jobs:
     needs: build-wheel
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -184,6 +180,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -235,6 +232,7 @@ jobs:
     needs: build-wheel
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -255,6 +253,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -312,6 +311,7 @@ jobs:
     needs: build-docker
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -332,6 +332,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
diff --git a/.github/workflows/tests_suite_develop.yml b/.github/workflows/tests_suite_develop.yml
index 89f963969f7f81291a7a2cbdac28c4f993ee74c1..bea4b78ae65611e6031aa5a015b1d8f4e5eb1eda 100644
--- a/.github/workflows/tests_suite_develop.yml
+++ b/.github/workflows/tests_suite_develop.yml
@@ -6,20 +6,16 @@ on:
   schedule:
     - cron:  '0 23 * * *'
 
+concurrency:
+  group: ${{ github.workflow }}-${{ github.ref }}
+  cancel-in-progress: true
+
 defaults:
   run:
     shell: bash
 
 jobs:
-  cleanup-runs:
-    if: ${{ contains(github.event.pull_request.labels.*.name, 'test sources') || contains(github.event.pull_request.labels.*.name, 'test tools') || contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
-    runs-on: ubuntu-latest
-    steps:
-    - uses: rokroskar/workflow-run-cleanup-action@master
-      env:
-        GITHUB_TOKEN: "${{ secrets.GITHUB_TOKEN }}"
   test-sources:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test sources') || github.event_name == 'schedule' }}
     strategy:
       matrix:
@@ -52,9 +48,9 @@ jobs:
         pip install -r tests/requirements.txt
         python -m unittest discover -s tests
   test-tools:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test tools') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -77,6 +73,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -117,7 +114,6 @@ jobs:
             python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
         fi
   build-wheel:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     runs-on: ubuntu-20.04
     steps:
@@ -143,7 +139,6 @@ jobs:
         path:
           dist/*.tar.gz
   build-docker:
-    needs: cleanup-runs
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     runs-on: ubuntu-20.04
     steps:
@@ -168,6 +163,7 @@ jobs:
     needs: build-wheel
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -188,6 +184,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -230,9 +227,9 @@ jobs:
           fi
           echo "Installing $package package"
           if [ "$package" == "opendr" ]; then
-            pip install ./artifact/artifact/opendr-toolkit-*.tar.gz
+            pip install ./artifact/wheel-artifact/opendr-toolkit-*.tar.gz
           else
-            pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz
+            pip install ./artifact/wheel-artifact/opendr-toolkit-$package-*.tar.gz
           fi
         done < packages.txt
         python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
@@ -240,6 +237,7 @@ jobs:
     needs: build-wheel
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -260,6 +258,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
@@ -294,11 +293,11 @@ jobs:
         source venv/bin/activate
         pip install wheel
         # Install engine and requirements for other packages
-        pip install ./artifact/artifact/opendr-toolkit-engine-*.tar.gz
+        pip install ./artifact/wheel-artifact/opendr-toolkit-engine-*.tar.gz
         # The following two are dependecies for some other packages and pip cannot automatically install them if they are not on a repo
-        pip install ./artifact/artifact/opendr-toolkit-compressive-learning-*.tar.gz
-        pip install ./artifact/artifact/opendr-toolkit-object-detection-2d-*.tar.gz
-
+        pip install ./artifact/wheel-artifact/opendr-toolkit-compressive-learning-*.tar.gz
+        pip install ./artifact/wheel-artifact/opendr-toolkit-object-detection-2d-*.tar.gz
+        pip install ./artifact/wheel-artifact/opendr-toolkit-pose-estimation-*.tar.gz
         # Install specific package for testing
         package=$(sed "s/_/-/g" <<< ${{ matrix.package }})
         arr=(${package//// })
@@ -308,16 +307,16 @@ jobs:
         echo "Installing $package package"
         # Utils contains hyperparameter tuning
         if [ "$package" == "utils" ]; then
-              pip install ./artifact/artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz
-
+              pip install ./artifact/wheel-artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz
         else
-              pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz
+              pip install ./artifact/wheel-artifact/opendr-toolkit-$package-*.tar.gz
         fi
         python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
   test-docker:
     needs: build-docker
     if: ${{ contains(github.event.pull_request.labels.*.name, 'test release') || github.event_name == 'schedule' }}
     strategy:
+      fail-fast: false
       matrix:
         os: [ubuntu-20.04]
         package:
@@ -338,6 +337,7 @@ jobs:
           - perception/object_detection_2d/detr
           - perception/object_detection_2d/gem
           - perception/object_detection_2d/ssd
+          - perception/object_detection_2d/nanodet
           - perception/object_detection_2d/yolov3
           - perception/object_detection_2d/retinaface
           - perception/object_detection_2d/nms
diff --git a/.gitignore b/.gitignore
index af7ac1235eadac0c4e1231b7cb83e4f22f891e4b..58ae3992240f2080d6c0b5910eaf8d87404aca40 100644
--- a/.gitignore
+++ b/.gitignore
@@ -70,4 +70,4 @@ temp/
 # ROS interface
 projects/opendr_ws/.catkin_workspace
 projects/opendr_ws/devel/
-projects/control/eagerx/eagerx_ws/
+projects/python/control/eagerx/eagerx_ws/
diff --git a/CHANGELOG.md b/CHANGELOG.md
index 85847af40af9eec533f87ed81487e4266d16d782..dbc3a1ab958cb4c8a43e2d3b5c05001f34bc4109 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,7 +1,14 @@
 # OpenDR Toolkit Change Log
 
-## Version 1.X
-Released on XX, XXth, 2022.
+## Version 1.1.1
+Released on June, 30th, 2022.
+
+  - Bug Fixes:
+    - Fix Efficient Panoptic Segmentation submodule commit ([#268](https://github.com/opendr-eu/opendr/pull/268)).
+    - Fix Face Recognition compilation error ([#267](https://github.com/opendr-eu/opendr/pull/267)).
+
+## Version 1.1.0
+Released on June, 14th, 2022.
 
   - New Features:
     - Added end-to-end planning tool ([#223](https://github.com/opendr-eu/opendr/pull/223)).
diff --git a/Dockerfile-cuda b/Dockerfile-cuda
index 6d14e48d9e4241548e5b04040fa3e09c163a7f7e..2a57bc5b91a2e90bfad9ce79085b7570ccfe627d 100644
--- a/Dockerfile-cuda
+++ b/Dockerfile-cuda
@@ -1,6 +1,6 @@
 FROM nvidia/cuda:11.2.0-cudnn8-devel-ubuntu20.04
 
-ARG branch=master
+ARG branch=develop
 
 # Fix NVIDIA CUDA Linux repository key rotation
 ENV APT_KEY_DONT_WARN_ON_DANGEROUS_USAGE=1
diff --git a/README.md b/README.md
index ed14f17f671b862f53d45dabbaf360f98a247b76..edae04772e02657930e51208114b50db4a8ec9e4 100644
--- a/README.md
+++ b/README.md
@@ -7,11 +7,13 @@ ______________________________________________________________________
 
 <p align="center">
   <a href="https://www.opendr.eu/">Website</a> •
-  <a href="#about">About</a> •
   <a href="docs/reference/installation.md">Installation</a> •
-  <a href="#using-opendr-toolkit">Using OpenDR toolkit</a> •
-  <a href="projects">Examples</a> •
+  <a href="projects/python">Python Examples</a> •
+  <a href="projects/opendr_ws">ROS1</a> •
+  <a href="projects/opendr_ws_2">ROS2</a> •
+  <a href="projects/c_api">C API</a> •
   <a href="docs/reference/customize.md">Customization</a> •
+  <a href="docs/reference/issues.md">Known Issues</a> •
   <a href="#roadmap">Roadmap</a> •
   <a href="CHANGELOG.md">Changelog</a> •
   <a href="LICENSE">License</a>
@@ -34,19 +36,40 @@ OpenDR focuses on the **AI and Cognition core technology** in order to provide t
 As a result, the developed OpenDR toolkit will also enable cooperative human-robot interaction as well as the development of cognitive mechatronics where sensing and actuation are closely coupled with cognitive systems thus contributing to another two core technologies beyond AI and Cognition.
 OpenDR aims to develop, train, deploy and evaluate deep learning models that improve the technical capabilities of the core technologies beyond the current state of the art.
 
-## Installing OpenDR Toolkit
 
+## Where to start?
+
+You can start by [installing](docs/reference/installation.md) the OpenDR toolkit. 
 OpenDR can be installed in the following ways:
 1. By *cloning* this repository (CPU/GPU support)
 2. Using *pip* (CPU/GPU support only)
 3. Using *docker* (CPU/GPU support)
 
-You can find detailed installation instruction in the [documentation](docs/reference/installation.md).
 
-## Using OpenDR toolkit
+## What OpenDR provides?
+
 OpenDR provides an intuitive and easy to use **[Python interface](src/opendr)**, a **[C API](src/c_api) for performance critical application**, a wealth of **[usage examples and supporting tools](projects)**, as well as **ready-to-use [ROS nodes](projects/opendr_ws)**.
 OpenDR is built to support [Webots Open Source Robot Simulator](https://cyberbotics.com/), while it also extensively follows industry standards, such as [ONNX model format](https://onnx.ai/) and [OpenAI Gym Interface](https://gym.openai.com/).
-You can find detailed documentation in OpenDR [wiki](https://github.com/tasostefas/opendr_internal/wiki), as well as in the [tools index](docs/reference/index.md).
+
+## How can I start using OpenDR?
+
+You can find detailed documentation in OpenDR [wiki](https://github.com/opendr-eu/opendr/wiki). 
+The main point of reference after installing the toolkit is the [tools index](docs/reference/index.md).
+Starting from there, you can find detailed documentation for all the tools included in OpenDR.
+
+- If you are interested in ready-to-use ROS nodes, then you can directly jump to our [ROS1](projects/opendr_ws) and [ROS2](projects/opendr_ws_2) workspaces.
+- If you are interested for ready-to-use examples, then you can checkout the [projects](projects/python) folder, which contains examples and tutorials for [perception](projects/python/perception), [control](projects/python/control), [simulation](projects/python/simulation) and [hyperparameter tuning](projects/python/utils) tools.
+- If you want to explore our C API, then you explore the provided [C demos](projects/c_api).
+
+## How can I interface OpenDR?
+
+OpenDR is built upon Python.
+Therefore, the main OpenDR interface is written in Python and it is available through the [opendr](src/opendr) package.
+Furthermore, OpenDR provides [ROS1](projects/opendr_ws) and [ROS2](projects/opendr_ws_2) interfaces, as well as a [C interface](projects/c_api).
+Note that you can use as many tools as you wish at the same time, since there is no hardware limitation on the number of tools that can run at the same time.
+However, hardware limitations (e.g., GPU memory) might restrict the number of tools that can run at any given moment.
+
+
 
 ## Roadmap
 OpenDR has the following roadmap:
@@ -55,15 +78,15 @@ OpenDR has the following roadmap:
 - **v3.0 (2023)**: Active perception-enabled deep learning tools for improved robotic perception
 
 ## How to contribute
-Please follow the instructions provided in the [wiki](https://github.com/tasostefas/opendr_internal/wiki).
+Please follow the instructions provided in the [wiki](https://github.com/opendr-eu/opendr/wiki).
 
 ## How to cite us
 If you use OpenDR for your research, please cite the following paper that introduces OpenDR architecture and design:
 <pre>
-@article{opendr2022,
+@inproceedings{opendr2022,
   title={OpenDR: An Open Toolkit for Enabling High Performance, Low Footprint Deep Learning for Robotics},
   author={Passalis, Nikolaos and Pedrazzi, Stefania and Babuska, Robert and Burgard, Wolfram and Dias, Daniel and Ferro, Francesco and Gabbouj, Moncef and Green, Ole and Iosifidis, Alexandros and Kayacan, Erdal and Kober, Jens and Michel, Olivier and Nikolaidis, Nikos and Nousi, Paraskevi and Pieters, Roel and Tzelepi, Maria and Valada, Abhinav and Tefas, Anastasios},
-  journal={arXiv preprint arXiv:2203.00403},
+    booktitle = {Proceedings of the 2022 IEEE/RSJ International Conference on Intelligent Robots and Systems (to appear)},
   year={2022}
 }
 </pre>
diff --git a/bin/install.sh b/bin/install.sh
index f0eb9552bc1c851077f11cf922b19fbb1c5b47ad..d6a75fe65a1f1767a2fe32d8c79e05f4de4465cf 100755
--- a/bin/install.sh
+++ b/bin/install.sh
@@ -51,7 +51,7 @@ if [[ "${OPENDR_DEVICE}" == "gpu" ]]; then
   echo "[INFO] Replacing torch==1.9.0+cu111 to enable CUDA acceleration."
   pip3 install torch==1.9.0+cu111 torchvision==0.10.0+cu111 torchaudio==0.9.0 -f https://download.pytorch.org/whl/torch_stable.html
   echo "[INFO] Reinstalling detectronv2."
-  pip3 install 'git+https://github.com/facebookresearch/detectron2.git'
+  pip3 install 'git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13'
 fi
 
 make libopendr
diff --git a/dependencies/parse_dependencies.py b/dependencies/parse_dependencies.py
index 31fdc20829d5b15443f0167a421c4bddf21c590c..608abdcd51c9bdd938b2615a905ce9e0b1a25c05 100644
--- a/dependencies/parse_dependencies.py
+++ b/dependencies/parse_dependencies.py
@@ -65,7 +65,7 @@ read_ini('dependencies.ini')
 # Loop through tools and extract dependencies
 if not global_dependencies:
     opendr_home = os.environ.get('OPENDR_HOME')
-    for dir_to_walk in ['src', 'projects/control/eagerx']:
+    for dir_to_walk in ['src', 'projects/python/control/eagerx']:
         for subdir, dirs, files in os.walk(os.path.join(opendr_home, dir_to_walk)):
             for filename in files:
                 if filename == 'dependencies.ini':
diff --git a/docs/reference/customize.md b/docs/reference/customize.md
index 9c1e3988ae96f6ac19686cc1b806c90a84f3a548..9e1258be747976d6fccc793c04f7f5337d84e904 100644
--- a/docs/reference/customize.md
+++ b/docs/reference/customize.md
@@ -2,11 +2,14 @@
 
 OpenDR is fully open-source and can be readily customized to meet the needs of several different application areas, since the source code for all the developed tools is provided.
 Several ready-to-use examples, which are expected to cover a wide range of different needs, are provided.
-For example, users can readily use the existing [ROS nodes](projects/opendr_ws), e.g., by including the required triggers or by combining several nodes into one to build custom nodes that will fit their needs. 
-Furthermore, note that several tools can be combined within a ROS node, as showcased in [face recognition ROS node](projects/opendr_ws/src/perception/scripts/face_recognition.py). 
+For example, users can readily use the existing [ROS nodes](../../projects/opendr_ws), e.g., by including the required triggers or by combining several nodes into one to build custom nodes that will fit their needs. 
+Furthermore, note that several tools can be combined within a ROS node, as showcased in [face recognition ROS node](../../projects/opendr_ws/src/perception/scripts/face_recognition.py). 
 You can use these nodes as a template for customizing the toolkit to your own needs.
 The rest of this document includes instructions for:
-1. Building docker images using the provided docker files. 
+1. [Building docker images using the provided docker files](#building-custom-docker-images)
+2. [Customizing existing docker images](#customizing-existing-docker-images)
+3. [Changing the behavior of ROS nodes](#changing-the-behavior-of-ros-nodes)
+4. [Building docker images that do not contain the whole toolkit](#building-docker-images-that-do-not-contain-the-whole-toolkit)
 
 
 ## Building custom docker images
@@ -56,3 +59,43 @@ and
 ```
 sudo docker run --gpus all -p 8888:8888 opendr/opendr-toolkit:cuda
 ```
+
+## Customizing existing docker images
+Building docker images from scratch can take a lot of time, especially for embedded systems without cross-compilation support.
+If you need to modify a docker image without rebuilding it (e.g., for changing some source files inside it or adding support for custom pipelines), then you can simply start with the image that you are interesting in, make the changes and use the [docker commit](https://docs.docker.com/engine/reference/commandline/commit/) command. In this way, the changes that have been made will be saved in a new image.
+
+
+## Changing the behavior of ROS nodes
+ROS nodes are provided as examples that demonstrate how various tools can be used. 
+As a result, customization might be needed in order to make them appropriate for your specific needs.
+Currently, all nodes support changing the input/output topics (please refer to the [README](../../projects/opendr_ws/src/perception/README.md) for more information for each node).
+However, if you need to change anything else (e.g., load a custom model), then you should appropriately modify the source code of the nodes.
+This is very easy, since the Python API of OpenDR is used in all of the provided nodes.
+You can refer to [Python API documentation](https://github.com/opendr-eu/opendr/blob/master/docs/reference/index.md) for more details for the tool that you are interested in.
+
+### Loading a custom model
+Loading a custom model in a ROS node is very easy. 
+First, locate the node that you want to modify (e.g., [pose estimation](../../projects/opendr_ws/src/perception/scripts/pose_estimation.py)).
+Then, search for the line where the learner loads the model (i.e., calls the `load()` function). 
+For the aforementioned node, this happens at [line 76](../../projects/opendr_ws/src/perception/scripts/pose_estimation.py#L76).
+Then, replace the path to the `load()` function with the path to your custom model.
+You can also optionally remove the call to `download()` function (e.g., [line 75](../../projects/opendr_ws/src/perception/scripts/pose_estimation.py#L75)) to make the node start up faster. 
+
+
+## Building docker images that do not contain the whole toolkit
+To build custom docker images that do not contain the whole toolkit you should follow these steps:
+1. Identify the tools that are using and note them.
+2. Start from a clean clone of the repository and remove all modules under [src/opendr] that you are not using. 
+To this end, use the `rm` command from the root folder of the toolkit and write down the commands that you are issuing.
+Please note that you should NOT remove the `engine` package. 
+4. Add the `rm` commands that you have issued in the dockerfile (e.g., in the main [dockerfile](https://github.com/opendr-eu/opendr/blob/master/Dockerfile)) after the `WORKDIR command` and before the `RUN ./bin/install.sh` command.
+5. Build the dockerfile as usual.
+
+By removing the tools that you are not using, you are also removing the corresponding `requirements.txt` file. 
+In this way, the `install.sh` script will not pull and install the corresponding dependencies, allowing for having smaller and more lightweight docker images.
+
+Things to keep in mind:
+1. ROS noetic is manually installed by the installation script. 
+If you want to install another version, you should modify both `install.sh` and `Makefile`.
+2. `mxnet`, `torch` and `detectron` are manually installed by the `install.sh` script if you have set `OPENDR_DEVICE=gpu`.
+If you do not need these dependencies, then you should manually remove them.
diff --git a/docs/reference/detr.md b/docs/reference/detr.md
index d54f267ac0dfbc41f81c3db463eebf154a31efe4..b2007cb601f22a19a96ff8e609b740b410cccd48 100644
--- a/docs/reference/detr.md
+++ b/docs/reference/detr.md
@@ -230,10 +230,10 @@ Documentation on how to use this node can be found [here](../../projects/opendr_
 #### Tutorials and Demos
 
 A tutorial on performing inference is available
-[here](../../projects/perception/object_detection_2d/detr/inference_tutorial.ipynb).
-Furthermore, demos on performing [training](../../projects/perception/object_detection_2d/detr/train_demo.py),
-[evaluation](../../projects/perception/object_detection_2d/detr/eval_demo.py) and
-[inference](../../projects/perception/object_detection_2d/detr/inference_demo.py) are also available.
+[here](../../projects/python/perception/object_detection_2d/detr/inference_tutorial.ipynb).
+Furthermore, demos on performing [training](../../projects/python/perception/object_detection_2d/detr/train_demo.py),
+[evaluation](../../projects/python/perception/object_detection_2d/detr/eval_demo.py) and
+[inference](../../projects/python/perception/object_detection_2d/detr/inference_demo.py) are also available.
 
 
 
diff --git a/docs/reference/eagerx.md b/docs/reference/eagerx.md
index 53f3eae930d6c162a40b983d4bc4cbe242824f4b..537e128c3dfe3b5b6a461ab1eab2512272420af0 100644
--- a/docs/reference/eagerx.md
+++ b/docs/reference/eagerx.md
@@ -24,21 +24,21 @@ Documentation is available online: [https://eagerx.readthedocs.io](https://eager
 
 **Prerequisites**: EAGERx requires ROS Noetic and Python 3.8 to be installed.
 
-1. **[demo_full_state](../../projects/control/eagerx/demos/demo_full_state.py)**:  
+1. **[demo_full_state](../../projects/python/control/eagerx/demos/demo_full_state.py)**:  
    Here, we wrap the OpenAI gym within EAGERx.
    The agent learns to map low-dimensional angular observations to torques.
-2. **[demo_pid](../../projects/control/eagerx/demos/demo_pid.py)**:   
+2. **[demo_pid](../../projects/python/control/eagerx/demos/demo_pid.py)**:   
    Here, we add a PID controller, tuned to stabilize the pendulum in the upright position, as a pre-processing node.
    The agent now maps low-dimensional angular observations to reference torques.
    In turn, the reference torques are converted to torques by the PID controller, and applied to the system.
-3. **[demo_classifier](../../projects/control/eagerx/demos/demo_classifier.py)**:   
+3. **[demo_classifier](../../projects/python/control/eagerx/demos/demo_classifier.py)**:   
    Instead of using low-dimensional angular observations, the environment now produces pixel images of the pendulum.
    In order to speed-up learning, we use a pre-trained classifier to convert these pixel images to estimated angular observations.
    Then, the agent uses these estimated angular observations similarly as in 'demo_2_pid' to successfully swing-up the pendulum.
 
 Example usage:
 ```bash
-cd $OPENDR_HOME/projects/control/eagerx/demos
+cd $OPENDR_HOME/projects/python/control/eagerx/demos
 python3 [demo_name]
 ```
 
diff --git a/docs/reference/face-detection-2d-retinaface.md b/docs/reference/face-detection-2d-retinaface.md
index 90dfd67f53a737e06d5a8a81b4614c11c0a3b61d..976c60e26d8eabeeacf6baa154aebd1c112cdad2 100644
--- a/docs/reference/face-detection-2d-retinaface.md
+++ b/docs/reference/face-detection-2d-retinaface.md
@@ -175,7 +175,7 @@ Parameters:
   The WIDER Face detection dataset is supported for training, implemented as a `DetectionDataset` subclass. This example assumes the data has been downloaded and placed in the directory referenced by `data_root`.
 
   ```python
-  from opendr.perception.object_detection_2d import YOLOv3DetectorLearner
+  from opendr.perception.object_detection_2d import RetinaFaceLearner, WiderFaceDataset
   from opendr.engine.datasets import ExternalDataset
   
   dataset = WiderFaceDataset(root=data_root, splits=['train'])
@@ -246,4 +246,4 @@ The platform compatibility evaluation is also reported below:
 #### References
 <a name="retinaface-1" href="https://arxiv.org/abs/1905.00641">[1]</a> RetinaFace: Single-stage Dense Face Localisation in the Wild,
 [arXiv](https://arxiv.org/abs/1905.00641).
- 
\ No newline at end of file
+ 
diff --git a/docs/reference/fall-detection.md b/docs/reference/fall-detection.md
index 3d535a633cd14690bbdc20de4383cf31170681d5..567ff89993c63bd9ab8e47093cd96aa9d9846245 100644
--- a/docs/reference/fall-detection.md
+++ b/docs/reference/fall-detection.md
@@ -5,9 +5,18 @@ The *fall_detection* module contains the *FallDetectorLearner* class, which inhe
 ### Class FallDetectorLearner
 Bases: `engine.learners.Learner`
 
-The *FallDetectorLearner* class contains the implementation of a naive fall detector algorithm.
+The *FallDetectorLearner* class contains the implementation of a rule-based fall detector algorithm.
 It can be used to perform fall detection on images (inference) using a pose estimator.
 
+This rule-based method can provide **cheap and fast** fall detection capabilities when pose estimation
+is already being used. Its inference time cost is ~0.1% of pose estimation, adding negligible overhead.
+
+However, it **has known limitations** due to its nature. Working with 2D poses means that depending on the 
+orientation of the person, it cannot detect most fallen poses that face the camera. 
+Another example of known false-positive detection occurs when a person is sitting with their knees 
+detectable, but ankles obscured or undetectable, this however is critical for detecting a fallen person
+whose ankles are not visible.
+
 The [FallDetectorLearner](/src/opendr/perception/fall_detection/fall_detector_learner.py) class has the
 following public methods:
 
diff --git a/docs/reference/fmp_gmapping.md b/docs/reference/fmp_gmapping.md
index 6df53abfa17c54b4f6c2387b29e5b90c92df8915..913bd886091ab245c635a2ac3eb08e3942be4324 100644
--- a/docs/reference/fmp_gmapping.md
+++ b/docs/reference/fmp_gmapping.md
@@ -3,9 +3,9 @@
 Traditional *SLAM* algorithm for estimating a robot's position and a 2D, grid-based map of the environment from planar LiDAR scans.
 Based on OpenSLAM GMapping, with additional functionality for computing the closed-form Full Map Posterior Distribution.
 
-For more details on the launchers and tools, see the [FMP_Eval Readme](../../projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/README.md).
+For more details on the launchers and tools, see the [FMP_Eval Readme](../../projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/README.md).
 
-For more details on the actual SLAM algorithm and its ROS node wrapper, see the [SLAM_GMapping Readme](../../projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/README.md).
+For more details on the actual SLAM algorithm and its ROS node wrapper, see the [SLAM_GMapping Readme](../../projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/README.md).
 
 ## Demo Usage
 A demo ROSBag for a square corridor can be found in the Map Simulator submodule in `src/map_simulator/rosbags/`, as well as preconfigured ***roslaunch***
@@ -25,4 +25,4 @@ This will start the following processes and nodes:
 
 Other ROSBags can be easily generated with the map simulator script from either new custom scenarios, or from the test configuration files in `src/map_simulator/scenarios/robots/` directory.
 
-For more information on how to define custom test scenarios and converting them to ROSBags, see the [Map_Simulator Readme](../../projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/README.md).
\ No newline at end of file
+For more information on how to define custom test scenarios and converting them to ROSBags, see the [Map_Simulator Readme](../../projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/README.md).
\ No newline at end of file
diff --git a/docs/reference/gem.md b/docs/reference/gem.md
index 27e19ae9b74b11508ea1998741ccb5474bc6747f..88826b60f10b872ca5076b8bd1e00490eace34b6 100644
--- a/docs/reference/gem.md
+++ b/docs/reference/gem.md
@@ -216,8 +216,8 @@ Parameters:
 
 #### Demo and Tutorial
 
-An inference [demo](../../projects/perception/object_detection_2d/gem/inference_demo.py) and
-[tutorial](../../projects/perception/object_detection_2d/gem/inference_tutorial.ipynb) are available.
+An inference [demo](../../projects/python/perception/object_detection_2d/gem/inference_demo.py) and
+[tutorial](../../projects/python/perception/object_detection_2d/gem/inference_tutorial.ipynb) are available.
 
 #### Examples
 
diff --git a/docs/reference/human-model-generation.md b/docs/reference/human-model-generation.md
index 8bd3997cb81f4a8ee3a675a32d71699d1fd23f79..71bac046de17c96f0bc2efd2e18b47f8cb890245 100644
--- a/docs/reference/human-model-generation.md
+++ b/docs/reference/human-model-generation.md
@@ -77,7 +77,7 @@ Documentation on how to use this node can be found [here](../../projects/opendr_
 #### Tutorials and Demos
 
 A demo in the form of a Jupyter Notebook is available
-[here](../../projects/simulation/human_model_generation/demos/model_generation.ipynb).
+[here](../../projects/python/simulation/human_model_generation/demos/model_generation.ipynb).
 
 #### Example 
 
@@ -95,8 +95,8 @@ A demo in the form of a Jupyter Notebook is available
   OPENDR_HOME = os.environ["OPENDR_HOME"]
 
   # We load a full-body image of a human as well as an image depicting its corresponding silhouette. 
-  rgb_img = Image.open(os.path.join(OPENDR_HOME, 'projects/simulation/human_model_generation/demos', 'imgs_input/rgb/result_0004.jpg'))
-  msk_img = Image.open(os.path.join(OPENDR_HOME, 'projects/simulation/human_model_generation/demos', 'imgs_input/msk/result_0004.jpg'))
+  rgb_img = Image.open(os.path.join(OPENDR_HOME, 'projects/python/simulation/human_model_generation/demos', 'imgs_input/rgb/result_0004.jpg'))
+  msk_img = Image.open(os.path.join(OPENDR_HOME, 'projects/python/simulation/human_model_generation/demos', 'imgs_input/msk/result_0004.jpg'))
 
   # We initialize learner. Using the infer method, we generate human 3D model. 
   model_generator = PIFuGeneratorLearner(device='cuda', checkpoint_dir='./temp')
diff --git a/docs/reference/index.md b/docs/reference/index.md
index 8d9a7d720267dc733738e08254f45cbb9ccc49f2..7b02e5358d342c4e03e41d27c97bafbe6a10ac78 100644
--- a/docs/reference/index.md
+++ b/docs/reference/index.md
@@ -16,6 +16,8 @@ Neither the copyright holder nor any applicable licensor will be liable for any
 
 ## Table of Contents
 
+- [Installation](/docs/reference/installation.md)
+- [Customization](/docs/reference/customize.md)
 - Inference and Training API
     - `engine` Module
         - [engine.data Module](engine-data.md)
@@ -37,6 +39,7 @@ Neither the copyright holder nor any applicable licensor will be liable for any
             - [edgespeechnets Module](edgespeechnets.md)
             - [quadraticselfonn Module](quadratic-selfonn.md)
         - object detection 2d:
+            - [nanodet Module](nanodet.md)
             - [detr Module](detr.md)
             - [gem Module](gem.md)
             - [retinaface Module](face-detection-2d-retinaface.md)
@@ -89,48 +92,50 @@ Neither the copyright holder nor any applicable licensor will be liable for any
     - `C API` Module
         - [face recognition Demo](/projects/c_api)
     - `control` Module
-        - [mobile_manipulation Demo](/projects/control/mobile_manipulation)
-        - [single_demo_grasp Demo](/projects/control/single_demo_grasp)
+        - [mobile_manipulation Demo](/projects/python/control/mobile_manipulation)
+        - [single_demo_grasp Demo](/projects/python/control/single_demo_grasp)
     - `opendr workspace` Module
         - [opendr_ws](/projects/opendr_ws)
     - `perception` Module
         - activity recognition:
-            - [activity_recognition Demo](/projects/perception/activity_recognition/demos/online_recognition)
+            - [activity_recognition Demo](/projects/python/perception/activity_recognition/demos/online_recognition)
         - face recognition:
-            - [face_recognition_Demo](/projects/perception/face_recognition)
+            - [face_recognition_Demo](/projects/python/perception/face_recognition)
         - facial expression recognition:
-            - [landmark_based_facial_expression_recognition Demo](/projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition)
+            - [landmark_based_facial_expression_recognition Demo](/projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition)
         - heart anomaly detection:
-            - [heart anomaly detection Demo](/projects/perception/heart_anomaly_detection)
+            - [heart anomaly detection Demo](/projects/python/perception/heart_anomaly_detection)
         - pose estimation:
-            - [lightweight_open_pose Demo](/projects/perception/lightweight_open_pose)
+            - [lightweight_open_pose Demo](/projects/python/perception/lightweight_open_pose)
         - multimodal human centric:
-            - [rgbd_hand_gesture_learner Demo](/projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition)
-            - [audiovisual_emotion_recognition Demo](/projects/perception/multimodal_human_centric/audiovisual_emotion_recognition)
+            - [rgbd_hand_gesture_learner Demo](/projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition)
+            - [audiovisual_emotion_recognition Demo](/projects/python/perception/multimodal_human_centric/audiovisual_emotion_recognition)
         - object detection 2d:
-            - [detr Demo](/projects/perception/object_detection_2d/detr)
-            - [gem Demo](/projects/perception/object_detection_2d/gem)
-            - [retinaface Demo](/projects/perception/object_detection_2d/retinaface)
-            - [centernet Demo](/projects/perception/object_detection_2d/centernet)
-            - [ssd Demo](/projects/perception/object_detection_2d/ssd)
-            - [yolov3 Demo](/projects/perception/object_detection_2d/yolov3)
-            - [seq2seq-nms Demo](/projects/perception/object_detection_2d/nms/seq2seq-nms)
+            - [nanodet Demo](/projects/python/perception/object_detection_2d/nanodet)
+            - [detr Demo](/projects/python/perception/object_detection_2d/detr)
+            - [gem Demo](/projects/python/perception/object_detection_2d/gem)
+            - [retinaface Demo](/projects/python/perception/object_detection_2d/retinaface)
+            - [centernet Demo](/projects/python/perception/object_detection_2d/centernet)
+            - [ssd Demo](/projects/python/perception/object_detection_2d/ssd)
+            - [yolov3 Demo](/projects/python/perception/object_detection_2d/yolov3)
+            - [seq2seq-nms Demo](/projects/python/perception/object_detection_2d/nms/seq2seq-nms)
         - object detection 3d:
-            - [voxel Demo](/projects/perception/object_detection_3d/demos/voxel_object_detection_3d)
+            - [voxel Demo](/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d)
         - object tracking 2d:
-            - [fair_mot Demo](/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort)
+            - [fair_mot Demo](/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort)
         - panoptic segmentation:
-            - [efficient_ps Demo](/projects/perception/panoptic_segmentation/efficient_ps)
+            - [efficient_ps Demo](/projects/python/perception/panoptic_segmentation/efficient_ps)
         - semantic segmentation:
-            - [bisnet Demo](/projects/perception/semantic_segmentation/bisenet)
+            - [bisnet Demo](/projects/python/perception/semantic_segmentation/bisenet)
         - action recognition:
-            - [skeleton_based_action_recognition Demo](/projects/perception/skeleton_based_action_recognition)
+            - [skeleton_based_action_recognition Demo](/projects/python/perception/skeleton_based_action_recognition)
         - fall detection:
-            - [fall_detection Demo](/projects/perception/fall_detection.md)
-        - [full_map_posterior_slam Module](/projects/perception/slam/full_map_posterior_gmapping)
+            - [fall_detection Demo](/projects/python/perception/fall_detection.md)
+        - [full_map_posterior_slam Module](/projects/python/perception/slam/full_map_posterior_gmapping)
     - `simulation` Module
-        - [SMPL+D Human Models Dataset](/projects/simulation/SMPL%2BD_human_models)
-        - [Human-Data-Generation-Framework](/projects/simulation/human_dataset_generation)
-        - [Human Model Generation Demos](/projects/simulation/human_dataset_generation)
+        - [SMPL+D Human Models Dataset](/projects/python/simulation/SMPL%2BD_human_models)
+        - [Human-Data-Generation-Framework](/projects/python/simulation/human_dataset_generation)
+        - [Human Model Generation Demos](/projects/python/simulation/human_dataset_generation)
     - `utils` Module
-        - [Hyperparameter Tuning Module](/projects/utils/hyperparameter_tuner)
+        - [Hyperparameter Tuning Module](/projects/python/utils/hyperparameter_tuner)
+- [Known Issues](/docs/reference/issues.md)
diff --git a/docs/reference/installation.md b/docs/reference/installation.md
index d546c9ebf8c1c68712139f104eb10e5ab9609378..97465d10c16caef9297578cef738ddc1601cc360 100644
--- a/docs/reference/installation.md
+++ b/docs/reference/installation.md
@@ -64,6 +64,10 @@ pip install mxnet-cu112==1.8.0post0
 pip install opendr-toolkit-engine
 pip install opendr-toolkit
 ```
+If you encounter any issue installing the latest version of detectron, then you can try installing a previous commit:
+```bash
+pip install 'git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13'
+```
 
 ## Installing only a *particular* tool using *pip* (CPU/GPU)
 
@@ -119,18 +123,18 @@ source bin/activate.sh
 If you want to display GTK-based applications from the Docker container (e.g., visualize results using OpenCV `imshow()`), then you should mount the X server socket inside the container, e.g.,
 ```bash
 xhost +local:root
-sudo docker run -it -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$DISPLAY opendr/opendr-toolkit:cpu_latest /bin/bash
+sudo docker run -it -v /tmp/.X11-unix:/tmp/.X11-unix -e DISPLAY=unix$DISPLAY opendr/opendr-toolkit:cpu_v1.1.1 /bin/bash
 ```
 
 ## GPU docker
 If you want to use a CUDA-enabled container please install [nvidia-docker](https://github.com/NVIDIA/nvidia-docker).
 Then, you can directly run the latest image with the command:
 ```bash
-sudo docker run --gpus all -p 8888:8888 opendr/opendr-toolkit:cuda_latest
+sudo docker run --gpus all -p 8888:8888 opendr/opendr-toolkit:cuda_v1.1.1
 ```
 or, for an interactive session:
 ```bash
-sudo docker run --gpus all -it opendr/opendr-toolkit:cuda_latest /bin/bash
+sudo docker run --gpus all -it opendr/opendr-toolkit:cuda_v1.1.1 /bin/bash
 ```
 In this case, do not forget to enable the virtual environment with:
 ```bash
diff --git a/docs/reference/issues.md b/docs/reference/issues.md
new file mode 100644
index 0000000000000000000000000000000000000000..3520eaef9b693f078968c89d616eec8c2e48a1dd
--- /dev/null
+++ b/docs/reference/issues.md
@@ -0,0 +1,39 @@
+# Known Issues
+
+This page includes known issues, compatibility issues as well as possible workarounds.
+
+
+## Issue: Some ROS nodes have a noticable lag
+
+You should make sure that queue size is set to 1 and the buffer size is large enough to hold the input message.
+Even though we have set the appropriate default values for topics in order to avoid this issue, this also depends on your system configuration (e.g., size messages published in input topics).
+Be sure to check the discussion and explanatation of this behavior in [#275](https://github.com/opendr-eu/opendr/issues/275).
+Essentially, due to the way ROS handles message a latency of at least 2 frames is expected.
+
+
+## Issue: Docker image do not fit my embedded device
+
+This can affect several embedded devices, such as NX and TX2, which have limited storage on board.
+The easiest solution to this issue is to use external storage (e.g., an SD card or an external SSD).
+You can also check the [customization](develop/docs/reference/customize.md) instructions on how you can manually build a docker image that can fit your device.
+
+## Issue: I am trying to install the toolkit on Ubuntu 18.04/20.10/XX.XX, WLS, or any other linux distribution and it doesn't work.
+
+OpenDR toolkit targets native installation on Ubuntu 20.04.
+For any other system you are advised to use the docker images that are expected to work out-of-the-box on any configuration and operating system.
+
+
+## Issue: I cannot install the tookit using `pip` \ I cannot install the toolkit on colab
+
+OpenDR toolkit is officially targeting Ubuntu 20.04.
+For other systems, slight modifications might be needed in order to ensure that all dependencies are in place.
+Most parts of the toolkit will be probably installed without any issue on colab or any other Ubuntu-like system.
+However, the behavior of `pip`'s dependency solver might cause issues (e.g., endless loops when trying to solve dependencies).
+In this case, it is suggested to remove any package that could cause any conflict, e.g.:
+```
+pip uninstall -y torch torchaudio fastai torchvision torchtext torchsummary kapre google-cloud-bigquery-storage yellowbrick tensorflow-metadata tensorflow-datasets numba imbalanced-learn googleapis-common-protos google-api-core  imageio tensorboard
+```
+and then install the toolkit using the `--use-deprecated=legacy-resolver` flag, e.g.:
+```
+DISABLE_BCOLZ_AVX2=true pip install opendr-toolkit --use-deprecated=legacy-resolver
+```
diff --git a/docs/reference/mobile-manipulation.md b/docs/reference/mobile-manipulation.md
index b40fe513e129ad339527e88650e8e9ade849e708..310ff1e60d905ad9ef2a2c014e758e4d37f16242 100644
--- a/docs/reference/mobile-manipulation.md
+++ b/docs/reference/mobile-manipulation.md
@@ -130,7 +130,7 @@ The dependencies for this module automatically set up and compile a catkin works
 To start required ROS nodes, please run the following before using the `MobileRLLearner` class:
 
 ```sh
-source ${OPENDR_HOME}/projects/control/mobile_manipulation/mobile_manipulation_ws/devel/setup.bash
+source ${OPENDR_HOME}/projects/python/control/mobile_manipulation/mobile_manipulation_ws/devel/setup.bash
 roslaunch mobile_manipulation_rl [pr2,tiago]_analytical.launch
 ````
 
diff --git a/docs/reference/nanodet.md b/docs/reference/nanodet.md
new file mode 100644
index 0000000000000000000000000000000000000000..765f2106730dd3da2173f6643db1db7784c3469f
--- /dev/null
+++ b/docs/reference/nanodet.md
@@ -0,0 +1,289 @@
+## nanodet module
+
+The *nanodet* module contains the *NanodetLearner* class, which inherits from the abstract class *Learner*.
+
+### Class NanodetLearner
+Bases: `engine.learners.Learner`
+
+The *NanodetLearner* class is a wrapper of the Nanodet object detection algorithms based on the original
+[Nanodet implementation](https://github.com/RangiLyu/nanodet).
+It can be used to perform object detection on images (inference) and train All predefined Nanodet object detection models and new modular models from the user.
+
+The [NanodetLearner](../../src/opendr/perception/object_detection_2d/nanodet/nanodet_learner.py) class has the
+following public methods:
+
+#### `NanodetLearner` constructor
+```python
+NanodetLearner(self, model_to_use, iters, lr, batch_size, checkpoint_after_iter, checkpoint_load_iter, temp_path, device,
+               weight_decay, warmup_steps, warmup_ratio, lr_schedule_T_max, lr_schedule_eta_min, grad_clip)
+```
+
+Constructor parameters:
+
+- **model_to_use**: *{"EfficientNet_Lite0_320", "EfficientNet_Lite1_416", "EfficientNet_Lite2_512", "RepVGG_A0_416",
+  "t", "g", "m", "m_416", "m_0.5x", "m_1.5x", "m_1.5x_416", "plus_m_320", "plus_m_1.5x_320", "plus_m_416",
+  "plus_m_1.5x_416", "custom"}, default=plus_m_1.5x_416*\
+  Specifies the model to use and the config file that contains all hyperparameters for training, evaluation and inference as the original
+  [Nanodet implementation](https://github.com/RangiLyu/nanodet). If you want to overwrite some of the parameters you can
+  put them as parameters in the learner.
+- **iters**: *int, default=None*\
+  Specifies the number of epochs the training should run for.
+- **lr**: *float, default=None*\
+  Specifies the initial learning rate to be used during training.
+- **batch_size**: *int, default=None*\
+  Specifies number of images to be bundled up in a batch during training.
+  This heavily affects memory usage, adjust according to your system.
+- **checkpoint_after_iter**: *int, default=None*\
+  Specifies per how many training iterations a checkpoint should be saved.
+  If it is set to 0 no checkpoints will be saved.
+- **checkpoint_load_iter**: *int, default=None*\
+  Specifies which checkpoint should be loaded.
+  If it is set to 0, no checkpoints will be loaded.
+- **temp_path**: *str, default=''*\
+  Specifies a path where the algorithm looks for saving the checkpoints along with the logging files. If *''* the `cfg.save_dir` will be used instead.
+- **device**: *{'cpu', 'cuda'}, default='cuda'*\
+  Specifies the device to be used.
+- **weight_decay**: *float, default=None*\
+- **warmup_steps**: *int, default=None*\
+- **warmup_ratio**: *float, default=None*\
+- **lr_schedule_T_max**: *int, default=None*\
+- **lr_schedule_eta_min**: *float, default=None*\
+- **grad_clip**: *int, default=None*\
+
+#### `NanodetLearner.fit`
+```python
+NanodetLearner.fit(self, dataset, val_dataset, logging_path, verbose, seed)
+```
+
+This method is used for training the algorithm on a train dataset and validating on a val dataset.
+
+Parameters:
+
+- **dataset**: *ExternalDataset*\
+  Object that holds the training dataset.
+  Can be of type `ExternalDataset`.
+- **val_dataset** : *ExternalDataset, default=None*\
+  Object that holds the validation dataset.
+  Can be of type `ExternalDataset`.
+- **logging_path** : *str, default=''*\
+  Subdirectory in temp_path to save log files and TensorBoard.
+- **verbose** : *bool, default=True*\
+  Enables the maximum verbosity and the logger.
+- **seed** : *int, default=123*\
+  Seed for repeatability.
+
+#### `NanodetLearner.eval`
+```python
+NanodetLearner.eval(self, dataset, verbose)
+```
+
+This method is used to evaluate a trained model on an evaluation dataset.
+Saves a txt logger file containing stats regarding evaluation.
+
+Parameters:
+
+- **dataset** : *ExternalDataset*\
+  Object that holds the evaluation dataset.
+- **verbose**: *bool, default=True*\
+  Enables the maximum verbosity and logger.
+
+#### `NanodetLearner.infer`
+```python
+NanodetLearner.infer(self, input, thershold, verbose)
+```
+
+This method is used to perform object detection on an image.
+Returns an `engine.target.BoundingBoxList` object, which contains bounding boxes that are described by the left-top corner and
+its width and height, or returns an empty list if no detections were made of the image in input.
+
+Parameters:
+- **input** : *Image*\
+  Image type object to perform inference on it. 
+  - **threshold**: *float, default=0.35*\
+  Specifies the threshold for object detection inference.
+  An object is detected if the confidence of the output is higher than the specified threshold.
+- **verbose**: *bool, default=True*\
+  Enables the maximum verbosity and logger.
+
+#### `NanodetLearner.save`
+```python
+NanodetLearner.save(self, path, verbose)
+```
+
+This method is used to save a trained model with its metadata.
+Provided with the path, it creates the "path" directory, if it does not already exist.
+Inside this folder, the model is saved as *"nanodet_{model_name}.pth"* and a metadata file *"nanodet_{model_name}.json"*.
+If the directory already exists, the *"nanodet_{model_name}.pth"* and *"nanodet_{model_name}.json"* files are overwritten.
+
+Parameters:
+
+- **path**: *str, default=None*\
+  Path to save the model, if None it will be the `"temp_folder"` or the `"cfg.save_dir"` from learner.
+- **verbose**: *bool, default=True*\
+  Enables the maximum verbosity and logger.
+
+#### `NanodetLearner.load`
+```python
+NanodetLearner.load(self, path, verbose)
+```
+
+This method is used to load a previously saved model from its saved folder.
+Loads the model from inside the directory of the path provided, using the metadata .json file included.
+
+Parameters:
+
+- **path**: *str, default=None*\
+  Path of the model to be loaded.
+- **verbose**: *bool, default=True*\
+  Enables the maximum verbosity and logger.
+
+#### `NanodetLearner.download`
+```python
+NanodetLearner.download(self, path, mode, model, verbose, url)
+```
+
+Downloads data needed for the various functions of the learner, e.g., pretrained models as well as test data.
+
+Parameters:
+
+- **path**: *str, default=None*\
+  Specifies the folder where data will be downloaded. If *None*, the *self.temp_path* directory is used instead.
+- **mode**: *{'pretrained', 'images', 'test_data'}, default='pretrained'*\
+  If *'pretrained'*, downloads a pretrained detector model from the *model_to_use* architecture which was chosen at learner initialization.
+  If *'images'*, downloads an image to perform inference on. If *'test_data'* downloads a dummy dataset for testing purposes.
+- **verbose**: *bool, default=False*\
+  Enables the maximum verbosity and logger.
+- **url**: *str, default=OpenDR FTP URL*\
+  URL of the FTP server.
+
+
+#### Tutorials and Demos
+
+A tutorial on performing inference is available.
+Furthermore, demos on performing [training](../../projects/perception/object_detection_2d/nanodet/train_demo.py),
+[evaluation](../../projects/perception/object_detection_2d/nanodet/eval_demo.py) and
+[inference](../../projects/perception/object_detection_2d/nanodet/inference_demo.py) are also available.
+
+
+
+#### Examples
+
+* **Training example using an `ExternalDataset`.**
+
+  To train properly, the architecture weights must be downloaded in a predefined directory before fit is called, in this case the directory name is "predefined_examples".
+  Default architecture is *'plus-m-1.5x_416'*.
+  The training and evaluation dataset root should be present in the path provided, along with the annotation files.
+  The default COCO 2017 training data can be found [here](https://cocodataset.org/#download) (train, val, annotations).
+  All training parameters (optimizer, lr schedule, losses, model parameters etc.) can be changed in the model config file 
+  in [config directori](../../src/opendr/perception/object_detection_2d/nanodet/algorithm/config). 
+  You can find more informations in [config file detail](../../src/opendr/perception/object_detection_2d/nanodet/algorithm/config/config_file_detail.md).
+  For easier use, with NanodetLearner parameters user can overwrite the following parameters:
+  (iters, lr, batch_size, checkpoint_after_iter, checkpoint_load_iter, temp_path, device, weight_decay, warmup_steps,
+  warmup_ratio, lr_schedule_T_max, lr_schedule_eta_min, grad_clip)
+  
+  **Note**
+  
+  The Nanodet tool can be used with any PASCAL VOC or COCO like dataset. The only thing is needed is to provide the correct root and dataset type.
+  
+  If *'voc'* is choosed for *dataset* the directory must look like this:
+  
+  - root folder
+    - train
+      - Annotations
+        - image1.xml
+        - image2.xml
+        - ...
+      - JPEGImages
+        - image1.jpg
+        - image2.jpg
+        - ...
+    - val
+      - Annotations
+        - image1.xml
+        - image2.xml
+        - ...
+      - JPEGImages
+        - image1.jpg
+        - image2.jpg
+        - ...
+
+  On the other hand if *'coco'* is choosed for *dataset* the directory must look like this:
+  
+  - root folder
+    - train2017
+      - image1.jpg
+      - image2.jpg
+      - ...
+    - val2017
+      - image1.jpg
+      - image2.jpg
+      - ...
+    - annotations
+      - instances_train2017.json 
+      - instances_val2017.json
+   
+  You can change the default annotation and image directories in [dataset](../../src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/__init__.py)
+
+  ```python
+  import argparse
+
+  from opendr.engine.datasets import ExternalDataset
+  from opendr.perception.object_detection_2d import NanodetLearner
+  
+  
+  if __name__ == '__main__':
+      parser = argparse.ArgumentParser()
+      parser.add_argument("--dataset", help="Dataset to train on", type=str, default="coco", choices=["voc", "coco"])
+      parser.add_argument("--data-root", help="Dataset root folder", type=str)
+      parser.add_argument("--model", help="Model that config file will be used", type=str)
+      parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+      parser.add_argument("--batch-size", help="Batch size to use for training", type=int, default=6)
+      parser.add_argument("--lr", help="Learning rate to use for training", type=float, default=5e-4)
+      parser.add_argument("--checkpoint-freq", help="Frequency in-between checkpoint saving and evaluations", type=int, default=50)
+      parser.add_argument("--n-epochs", help="Number of total epochs", type=int, default=300)
+      parser.add_argument("--resume-from", help="Epoch to load checkpoint file and resume training from", type=int, default=0)
+  
+      args = parser.parse_args()
+  
+      if args.dataset == 'voc':
+          dataset = ExternalDataset(args.data_root, 'voc')
+          val_dataset = ExternalDataset(args.data_root, 'voc')
+      elif args.dataset == 'coco':
+          dataset = ExternalDataset(args.data_root, 'coco')
+          val_dataset = ExternalDataset(args.data_root, 'coco')
+  
+      nanodet = NanodetLearner(model_to_use=args.model, iters=args.n_epochs, lr=args.lr, batch_size=args.batch_size,
+                               checkpoint_after_iter=args.checkpoint_freq, checkpoint_load_iter=args.resume_from,
+                               device=args.device)
+  
+      nanodet.download("./predefined_examples", mode="pretrained")
+      nanodet.load("./predefined_examples/nanodet-{}/nanodet-{}.ckpt".format(args.model, args.model), verbose=True)
+      nanodet.fit(dataset, val_dataset)
+      nanodet.save()
+  ```
+  
+* **Inference and result drawing example on a test image.**
+
+  This example shows how to perform inference on an image and draw the resulting bounding boxes using a nanodet model that is pretrained on the COCO dataset.
+  Moreover, inference can be used in all images in a folder, frames of a video or a webcam feedback with the provided *mode*.
+  In this example first is downloaded a pre-trained model as in training example and then an image to be inference.
+  With the same *path* parameter you can choose a folder or a video file to be used as inference. Last but not least, if 'webcam' is
+  used in *mode* the *camid* parameter of inference must be used to determine the webcam device in your machine.
+  
+  ```python
+  import argparse
+  from opendr.perception.object_detection_2d import NanodetLearner
+  
+  if __name__ == '__main__':
+      parser = argparse.ArgumentParser()
+      parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+      parser.add_argument("--model", help="Model that config file will be used", type=str)
+      args = parser.parse_args()
+  
+      nanodet = NanodetLearner(model_to_use=args.model, device=args.device)
+  
+      nanodet.download("./predefined_examples", mode="pretrained")
+      nanodet.load("./predefined_examples/nanodet-{}/nanodet-{}.ckpt".format(args.model, args.model), verbose=True)
+      nanodet.download("./predefined_examples", mode="images")
+      boxes = nanodet.infer(path="./predefined_examples/000000000036.jpg")
+  ```
\ No newline at end of file
diff --git a/docs/reference/object-detection-2d-nms-seq2seq_nms.md b/docs/reference/object-detection-2d-nms-seq2seq_nms.md
index 513233c833350e5e434001719be29e4dda35c5ca..c1269c108f61963e004af3c48981f4acc5f0888d 100644
--- a/docs/reference/object-detection-2d-nms-seq2seq_nms.md
+++ b/docs/reference/object-detection-2d-nms-seq2seq_nms.md
@@ -262,7 +262,7 @@ Parameters:
   ssd = SingleShotDetectorLearner(device='cuda')
   ssd.download(".", mode="pretrained")
   ssd.load("./ssd_default_person", verbose=True)
-  img = Image.open(OPENDR_HOME + '/projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
+  img = Image.open(OPENDR_HOME + '/projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
   if not isinstance(img, Image):
       img = Image(img)
   boxes = ssd.infer(img, threshold=0.25, custom_nms=seq2SeqNMSLearner)
diff --git a/docs/reference/rosbridge.md b/docs/reference/rosbridge.md
index 6e19acbc51e37d8206f199b61828d40b4a10863d..d0c155e4d7d8184d78706154925ba85aaeaad284 100755
--- a/docs/reference/rosbridge.md
+++ b/docs/reference/rosbridge.md
@@ -59,25 +59,28 @@ ROSBridge.from_ros_pose(self,
                         ros_pose)
 ```
 
-Converts a ROS pose into an OpenDR pose.
+Converts an OpenDRPose2D message into an OpenDR Pose.
 
 Parameters:
 
-- **message**: *ros_bridge.msg.Pose*\
-  ROS pose to be converted into an OpenDR pose.
+- **ros_pose**: *ros_bridge.msg.OpenDRPose2D*\
+  ROS pose to be converted into an OpenDR Pose.
 
 #### `ROSBridge.to_ros_pose`
 
 ```python
 ROSBridge.to_ros_pose(self,
-                      ros_pose)
+                      pose)
 ```
-Converts an OpenDR pose into a ROS pose.
+Converts an OpenDR Pose into a OpenDRPose2D msg that can carry the same information, i.e. a list of keypoints, 
+the pose detection confidence and the pose id.
+Each keypoint is represented as an OpenDRPose2DKeypoint with x, y pixel position on input image with (0, 0) 
+being the top-left corner.
 
 Parameters:
 
-- **message**: *engine.target.Pose*\
-  OpenDR pose to be converted to ROS pose.
+- **pose**: *engine.target.Pose*\
+  OpenDR Pose to be converted to ROS OpenDRPose2D.
 
 
 #### `ROSBridge.to_ros_category`
diff --git a/docs/reference/single-demonstration-grasping.md b/docs/reference/single-demonstration-grasping.md
index 7332a0adb0dc5d62cdae27302e9ebcb5753c94d1..a4d8f67dad315dda6bb44d935b393565c1912de6 100644
--- a/docs/reference/single-demonstration-grasping.md
+++ b/docs/reference/single-demonstration-grasping.md
@@ -113,7 +113,7 @@ $ make install_runtime_dependencies
 after installing dependencies, the user must source the workspace in the shell in order to detect the packages:
 
 ```
-$ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+$ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 ```
 
 ## Demos
@@ -125,7 +125,7 @@ Three different nodes must be launched consecutively in order to properly run th
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 4. $ export WEBOTS_HOME=/usr/local/webots
 5. $ roslaunch single_demo_grasping_demo panda_sim.launch
 ```
@@ -134,7 +134,7 @@ Three different nodes must be launched consecutively in order to properly run th
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 4. $ roslaunch single_demo_grasping_demo camera_stream_inference.launch
 ```
 
@@ -142,7 +142,7 @@ Three different nodes must be launched consecutively in order to properly run th
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 4. $ roslaunch single_demo_grasping_demo panda_sim_control.launch
 ```
 
@@ -150,14 +150,14 @@ Three different nodes must be launched consecutively in order to properly run th
 
 You can find an example on how to use the learner class to run inference and see the result in the following directory:
 ```
-$ cd projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
+$ cd projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
 ```
 simply run:
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
-4. $ cd projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
+4. $ cd projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
 5. $ ./single_demo_inference.py
 ```
 
diff --git a/docs/reference/smpld_models.md b/docs/reference/smpld_models.md
index b5c741847273c4c005eb329e779d009f85c4a37e..e0bba4dff7b650917f41d3618e746d74027cdbcb 100644
--- a/docs/reference/smpld_models.md
+++ b/docs/reference/smpld_models.md
@@ -6,10 +6,10 @@ This folder contains code for:
 <br> 
 
 <p float="left">
-  <img src="./../../projects/simulation/SMPL+D_human_models/examples/model_1.png" width=150 />
-  <img src="./../../projects/simulation/SMPL+D_human_models/examples/model_4.png" width=150 />
-  <img src="./../../projects/simulation/SMPL+D_human_models/examples/model_3.png" width=150 />
-  <img src="./../../projects/simulation/SMPL+D_human_models/examples/model_2.png" width=150 />
+  <img src="./../../projects/python/simulation/SMPL+D_human_models/examples/model_1.png" width=150 />
+  <img src="./../../projects/python/simulation/SMPL+D_human_models/examples/model_4.png" width=150 />
+  <img src="./../../projects/python/simulation/SMPL+D_human_models/examples/model_3.png" width=150 />
+  <img src="./../../projects/python/simulation/SMPL+D_human_models/examples/model_2.png" width=150 />
 </p>
 
 ### Download the raw SMPL+D models only (≈12.5Gb)
diff --git a/projects/README.md b/projects/README.md
index 6cf05ca17af0999c86f463e4531aeb91d3a62d1b..d755cc679459a2578f61fe8e6b2e1915625b156b 100644
--- a/projects/README.md
+++ b/projects/README.md
@@ -1,3 +1,8 @@
 # Projects
-
 This folder contains sample applications demonstrating the OpenDR toolkit functionalities.
+
+This includes:
+- [Python usage examples and tutorials](python)
+- [C_API usage examples](c_api)
+- [ROS 1 nodes](opendr_ws)
+- [ROS 2 nodes](opendr_ws_2)
diff --git a/projects/opendr_ws/README.md b/projects/opendr_ws/README.md
index 2985a9f062f161c9ab5b8a4eccff073f2e1f5524..2fabf14d5dcd9f7ce4fa8390553d8eddcaf8e661 100755
--- a/projects/opendr_ws/README.md
+++ b/projects/opendr_ws/README.md
@@ -37,23 +37,30 @@ source devel/setup.bash
 ```
 ## Structure
 
-Currently, apart from tools, opendr_ws contains the following ROS nodes:
+Currently, apart from tools, opendr_ws contains the following ROS nodes (categorized according to the input they receive):
 
 ### [Perception](src/perception/README.md)
-1. Pose Estimation
-2. Fall Detection
-3. 2D Object Detection
-4. Face Detection
-5. Panoptic Segmentation
-6. Face Recognition
-7. Semantic Segmentation
-8. RGBD Hand Gesture Recognition
-9. Heart Anomaly Detection
-10. Video Human Activity Recognition
-11. Landmark-based Facial Expression Recognition
-12. Skeleton-based Human Action Recognition
-13. Speech Command Recognition
-14. Voxel Object Detection 3D
-15. AB3DMOT Object Tracking 3D
-16. FairMOT Object Tracking 2D
-17. Deep Sort Object Tracking 2D
+## RGB input
+1. [Pose Estimation](src/perception/README.md#pose-estimation-ros-node)
+2. [Fall Detection](src/perception/README.md#fall-detection-ros-node)
+3. [Face Recognition](src/perception/README.md#face-recognition-ros-node)
+4. [2D Object Detection](src/perception/README.md#2d-object-detection-ros-nodes)
+5. [Face Detection](src/perception/README.md#face-detection-ros-node)
+6. [Panoptic Segmentation](src/perception/README.md#panoptic-segmentation-ros-node)
+7. [Semantic Segmentation](src/perception/README.md#semantic-segmentation-ros-node)
+8. [Video Human Activity Recognition](src/perception/README.md#human-action-recognition-ros-node)
+9. [Landmark-based Facial Expression Recognition](src/perception/README.md#landmark-based-facial-expression-recognition-ros-node)
+10. [Deep Sort Object Tracking 2D](src/perception/README.md#deep-sort-object-tracking-2d-ros-node)
+11. [Skeleton-based Human Action Recognition](src/perception/README.md#skeleton-based-human-action-recognition-ros-node)
+## Point cloud input
+1. [Voxel Object Detection 3D](src/perception/README.md#voxel-object-detection-3d-ros-node)
+2. [AB3DMOT Object Tracking 3D](src/perception/README.md#ab3dmot-object-tracking-3d-ros-node)
+3. [FairMOT Object Tracking 2D](src/perception/README.md#fairmot-object-tracking-2d-ros-node)
+## RGB + Infrared input
+1. [End-to-End Multi-Modal Object Detection (GEM)](src/perception/README.md#gem-ros-node)
+## RGBD input nodes
+1. [RGBD Hand Gesture Recognition](src/perception/README.md#rgbd-hand-gesture-recognition-ros-node)
+## Biosignal input
+1. [Heart Anomaly Detection](src/perception/README.md#heart-anomaly-detection-ros-node)
+## Audio input
+1. [Speech Command Recognition](src/perception/README.md#speech-command-recognition-ros-node)
diff --git a/projects/opendr_ws/src/data_generation/package.xml b/projects/opendr_ws/src/data_generation/package.xml
index cd332807fba0a2d1ced61bb94cfab4339dfd7143..57d1e6e1f797efa75ce442b3e89a3df029499dbf 100644
--- a/projects/opendr_ws/src/data_generation/package.xml
+++ b/projects/opendr_ws/src/data_generation/package.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <package format="2">
   <name>data_generation</name>
-  <version>1.0.0</version>
+  <version>1.1.1</version>
   <description>OpenDR's ROS nodes for data generation package</description>
   <maintainer email="tefas@csd.auth.gr">OpenDR Project Coordinator</maintainer>
   <license>Apache License v2.0 </license>
diff --git a/projects/opendr_ws/src/perception/CMakeLists.txt b/projects/opendr_ws/src/perception/CMakeLists.txt
index a47f5f9c4bfd99b8c77051a1a3a15d7ed57055f2..682fbb2d8a25719c7cc59c110085d9632cad7e69 100644
--- a/projects/opendr_ws/src/perception/CMakeLists.txt
+++ b/projects/opendr_ws/src/perception/CMakeLists.txt
@@ -30,6 +30,7 @@ include_directories(
 catkin_install_python(PROGRAMS
    scripts/pose_estimation.py
    scripts/fall_detection.py
+   scripts/object_detection_2d_nanodet.py
    scripts/object_detection_2d_detr.py
    scripts/object_detection_2d_gem.py
    scripts/semantic_segmentation_bisenet.py
diff --git a/projects/opendr_ws/src/perception/README.md b/projects/opendr_ws/src/perception/README.md
index ba0ab81059be781933458a5f43473eaf7f98327d..eebb580b8673569c46d6affea28919eeea2b1765 100755
--- a/projects/opendr_ws/src/perception/README.md
+++ b/projects/opendr_ws/src/perception/README.md
@@ -31,14 +31,25 @@ Assuming that you have already [activated the OpenDR environment](../../../../do
 rosrun usb_cam usb_cam_node
 ```
 
-2. You are then ready to start the pose detection node
+2. You are then ready to start the pose detection node (use `-h` to print out help for various arguments)
 
 ```shell
 rosrun perception pose_estimation.py
 ```
 
 3. You can examine the annotated image stream using `rqt_image_view` (select the topic `/opendr/image_pose_annotated`) or
-   `rostopic echo /opendr/poses`
+   `rostopic echo /opendr/poses`. 
+
+Note that to use the pose messages properly, you need to create an appropriate subscriber that will convert the ROS pose messages back to OpenDR poses which you can access as described in the [documentation](https://github.com/opendr-eu/opendr/blob/master/docs/reference/engine-target.md#posekeypoints-confidence):
+```python
+        ... 
+        rospy.Subscriber("opendr/poses", Detection2DArray, self.callback)
+        ...
+        def callback(self, data):
+            opendr_pose = self.bridge.from_ros_pose(data)
+            print(opendr_pose)
+            print(opendr_pose['r_eye'])
+```
 
 ## Fall Detection ROS Node
 Assuming that you have already [activated the OpenDR environment](../../../../docs/reference/installation.md), [built your workspace](../../README.md) and started roscore (i.e., just run `roscore`), then you can
@@ -90,15 +101,22 @@ Reference images should be placed in a defined structure like:
 under `/opendr/face_recognition_id`.
 
 ## 2D Object Detection ROS Nodes
-ROS nodes are implemented for the SSD, YOLOv3, CenterNet and DETR generic object detectors. Steps 1, 2 from above must run first.
-Then, to initiate the SSD detector node, run:
+ROS nodes are implemented for the SSD, YOLOv3, CenterNet, DETR and Nanodet generic object detectors.
+Assuming that you have already [activated the OpenDR environment](../../../../docs/reference/installation.md), [built your workspace](../../README.md) and started roscore (i.e., just run `roscore`).
+
+1. Start the node responsible for publishing images. If you have a USB camera, then you can use the corresponding node (assuming you have installed the corresponding package):
+```shell
+rosrun usb_cam usb_cam_node
+```
+
+2. Then, to initiate the SSD detector node, run:
 
 ```shell
 rosrun perception object_detection_2d_ssd.py
 ```
 The annotated image stream can be viewed using `rqt_image_view`, and the default topic name is
 `/opendr/image_boxes_annotated`. The bounding boxes alone are also published as `/opendr/objects`.
-Similarly, the YOLOv3, CenterNet and DETR detector nodes can be run with:
+Similarly, the YOLOv3, CenterNet, DETR and Nanodet detector nodes can be run with:
 ```shell
 rosrun perception object_detection_2d_yolov3.py
 ```
@@ -110,6 +128,10 @@ or
 ```shell
 rosrun perception object_detection_2d_detr.py
 ```
+or
+```shell
+rosrun perception object_detection_2d_nanodet.py
+```
 respectively.
 
 ## Face Detection ROS Node
@@ -153,15 +175,16 @@ rosrun perception object_detection_2d_gem.py
 A ROS node for performing panoptic segmentation on a specified RGB image stream using the [EfficientPS](../../../../src/opendr/perception/panoptic_segmentation/README.md) network.
 Assuming that the OpenDR catkin workspace has been sourced, the node can be started with:
 ```shell
-rosrun perception panoptic_segmentation_efficient_ps.py CHECKPOINT IMAGE_TOPIC
+rosrun perception panoptic_segmentation_efficient_ps.py
 ```
-with `CHECKPOINT` pointing to the path to the trained model weights and `IMAGE_TOPIC` specifying the ROS topic, to which the node will subscribe.
 
-Additionally, the following optional arguments are available:
+The following optional arguments are available:
 - `-h, --help`: show a help message and exit
-- `--heamap_topic HEATMAP_TOPIC`: publish the semantic and instance maps on `HEATMAP_TOPIC`
-- `--visualization_topic VISUALIZATION_TOPIC`: publish the panoptic segmentation map as an RGB image on `VISUALIZATION_TOPIC` or a more detailed overview if using the `--detailed_visualization` flag
-- `--detailed_visualization`: generate a combined overview of the input RGB image and the semantic, instance, and panoptic segmentation maps
+- `--input_rgb_image_topic INPUT_RGB_IMAGE_TOPIC` : listen to RGB images on this topic (default=`/usb_cam/image_raw`)
+- `--checkpoint CHECKPOINT` : download pretrained models [cityscapes, kitti] or load from the provided path (default=`cityscapes`)
+- `--output_rgb_image_topic OUTPUT_RGB_IMAGE_TOPIC`: publish the semantic and instance maps on this topic as `OUTPUT_HEATMAP_TOPIC/semantic` and `OUTPUT_HEATMAP_TOPIC/instance` (default=`/opendir/panoptic`)
+- `--visualization_topic VISUALIZATION_TOPIC`: publish the panoptic segmentation map as an RGB image on `VISUALIZATION_TOPIC` or a more detailed overview if using the `--detailed_visualization` flag (default=`/opendr/panoptic/rgb_visualization`)
+- `--detailed_visualization`: generate a combined overview of the input RGB image and the semantic, instance, and panoptic segmentation maps and publish it on `OUTPUT_RGB_IMAGE_TOPIC` (default=deactivated)
 
 
 ## Semantic Segmentation ROS Node
diff --git a/projects/opendr_ws/src/perception/package.xml b/projects/opendr_ws/src/perception/package.xml
index db7c42d2f7fa5d8d3ceed221410b6ba56e8fbb34..7b7c0e00c92105453ba62d04ca56ac4913e13c20 100644
--- a/projects/opendr_ws/src/perception/package.xml
+++ b/projects/opendr_ws/src/perception/package.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <package format="2">
   <name>perception</name>
-  <version>1.0.0</version>
+  <version>1.1.1</version>
   <description>OpenDR's ROS nodes for perception package</description>
   <maintainer email="tefas@csd.auth.gr">OpenDR Project Coordinator</maintainer>
   <license>Apache License v2.0 </license>
diff --git a/projects/opendr_ws/src/perception/scripts/audiovisual_emotion_recognition.py b/projects/opendr_ws/src/perception/scripts/audiovisual_emotion_recognition.py
index c4fe3e126a665955105d03520bcc0a559686e6db..8c0ad8e53597cffbcb79d043fade8710981abf28 100644
--- a/projects/opendr_ws/src/perception/scripts/audiovisual_emotion_recognition.py
+++ b/projects/opendr_ws/src/perception/scripts/audiovisual_emotion_recognition.py
@@ -19,6 +19,7 @@ import argparse
 import numpy as np
 import torch
 import librosa
+import cv2
 
 import rospy
 import message_filters
@@ -35,28 +36,25 @@ from opendr.engine.data import Video, Timeseries
 class AudiovisualEmotionNode:
 
     def __init__(self, input_video_topic="/usb_cam/image_raw", input_audio_topic="/audio/audio",
-                 annotations_topic="/opendr/audiovisual_emotion", buffer_size=3.6, device="cuda"):
+                 output_emotions_topic="/opendr/audiovisual_emotion", buffer_size=3.6, device="cuda"):
         """
         Creates a ROS Node for audiovisual emotion recognition
         :param input_video_topic: Topic from which we are reading the input video. Expects detected face of size 224x224
         :type input_video_topic: str
         :param input_audio_topic: Topic from which we are reading the input audio
         :type input_audio_topic: str
-        :param annotations_topic: Topic to which we are publishing the predicted class
-        :type annotations_topic: str
+        :param output_emotions_topic: Topic to which we are publishing the predicted class
+        :type output_emotions_topic: str
         :param buffer_size: length of audio and video in sec
         :type buffer_size: float
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         """
 
-        self.publisher = rospy.Publisher(annotations_topic, Classification2D, queue_size=10)
+        self.publisher = rospy.Publisher(output_emotions_topic, Classification2D, queue_size=10)
 
-        video_sub = message_filters.Subscriber(input_video_topic, ROS_Image)
-        audio_sub = message_filters.Subscriber(input_audio_topic, AudioData)
-        # synchronize video and audio data topics
-        ts = message_filters.ApproximateTimeSynchronizer([video_sub, audio_sub], 10, 0.1, allow_headerless=True)
-        ts.registerCallback(self.callback)
+        self.input_video_topic = input_video_topic
+        self.input_audio_topic = input_audio_topic
 
         self.bridge = ROSBridge()
 
@@ -78,20 +76,30 @@ class AudiovisualEmotionNode:
         Start the node and begin processing input data
         """
         rospy.init_node('opendr_audiovisualemotion_recognition', anonymous=True)
+
+        video_sub = message_filters.Subscriber(self.input_video_topic, ROS_Image)
+        audio_sub = message_filters.Subscriber(self.input_audio_topic, AudioData)
+        # synchronize video and audio data topics
+        ts = message_filters.ApproximateTimeSynchronizer([video_sub, audio_sub], 10, 0.1, allow_headerless=True)
+        ts.registerCallback(self.callback)
+
         rospy.loginfo("Audiovisual emotion recognition node started!")
         rospy.spin()
 
     def callback(self, image_data, audio_data):
         """
         Callback that process the input data and publishes to the corresponding topics
-        :param image_data: input image message, face image of size 224x224
+        :param image_data: input image message, face image
         :type image_data: sensor_msgs.msg.Image
         :param audio_data: input audio message, speech
         :type audio_data: audio_common_msgs.msg.AudioData
         """
         audio_data = np.reshape(np.frombuffer(audio_data.data, dtype=np.int16)/32768.0, (1, -1))
         self.data_buffer = np.append(self.data_buffer, audio_data)
+
         image_data = self.bridge.from_ros_image(image_data, encoding='bgr8').convert(format='channels_last')
+        image_data = cv2.resize(image_data, (224, 224))
+
         self.video_buffer = np.append(self.video_buffer, np.expand_dims(image_data.data, 0), axis=0)
 
         if self.data_buffer.shape[0] > 16000*self.buffer_size:
@@ -116,16 +124,36 @@ class AudiovisualEmotionNode:
 
 def select_distributed(m, n): return [i*n//m + n//(2*m) for i in range(m)]
 
-if __name__ == '__main__':
-    device = 'cuda' if torch.cuda.is_available() else 'cpu'
 
+if __name__ == '__main__':
     parser = argparse.ArgumentParser()
-    parser.add_argument('--video_topic', type=str, help='listen to video input data on this topic')
-    parser.add_argument('--audio_topic', type=str, help='listen to audio input data on this topic')
-    parser.add_argument('--buffer_size', type=float, default=3.6, help='size of the audio buffer in seconds')
+    parser.add_argument("--input_video_topic", type=str, default="/usb_cam/image_raw",
+                        help="Listen to video input data on this topic")
+    parser.add_argument("--input_audio_topic", type=str, default="/audio/audio",
+                        help="Listen to audio input data on this topic")
+    parser.add_argument("--output_emotions_topic", type=str, default="/opendr/audiovisual_emotion",
+                        help="Topic name for output emotions recognition")
+    parser.add_argument("--buffer_size", type=float, default=3.6,
+                        help="Size of the audio buffer in seconds")
+    parser.add_argument("--device", type=str, default="cuda",
+                        help="Device to use (cpu, cuda)", choices=["cuda", "cpu"])
     args = parser.parse_args()
 
-    avnode = AudiovisualEmotionNode(input_video_topic=args.video_topic, input_audio_topic=args.audio_topic,
-                                    annotations_topic="/opendr/audiovisual_emotion",
+    # Select the device for running
+    try:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
+            print("GPU not found. Using CPU instead.")
+            device = "cpu"
+        else:
+            print("Using CPU")
+            device = "cpu"
+    except:
+        print("Using CPU")
+        device = "cpu"
+
+    avnode = AudiovisualEmotionNode(input_video_topic=args.input_video_topic, input_audio_topic=args.input_audio_topic,
+                                    output_emotions_topic=args.output_emotions_topic,
                                     buffer_size=args.buffer_size, device=device)
     avnode.listen()
diff --git a/projects/opendr_ws/src/perception/scripts/face_detection_retinaface.py b/projects/opendr_ws/src/perception/scripts/face_detection_retinaface.py
index 7227951b1757c1e4ad040334b53d3ad0d052924d..6588e749253051de934157bd0be6dcb8faa28b8c 100755
--- a/projects/opendr_ws/src/perception/scripts/face_detection_retinaface.py
+++ b/projects/opendr_ws/src/perception/scripts/face_detection_retinaface.py
@@ -13,115 +13,132 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import rospy
+import argparse
 import mxnet as mx
+
+import rospy
 from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
 from opendr_bridge import ROSBridge
+
+from opendr.engine.data import Image
 from opendr.perception.object_detection_2d import RetinaFaceLearner
 from opendr.perception.object_detection_2d import draw_bounding_boxes
-from opendr.engine.data import Image
 
 
 class FaceDetectionNode:
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_boxes_annotated",
-                 face_detections_topic="/opendr/faces", device="cuda", backbone="resnet"):
+
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_faces_annotated", detections_topic="/opendr/faces",
+                 device="cuda", backbone="resnet"):
         """
-        Creates a ROS Node for face detection
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param face_detections_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotated pose annotations)
-        :type face_detections_topic:  str
+        Creates a ROS Node for face detection with Retinaface.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no face detection message
+        is published)
+        :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
-        :param backbone: retinaface backbone, options are ('mnet' and 'resnet'), where 'mnet' detects masked faces as well
+        :param backbone: retinaface backbone, options are either 'mnet' or 'resnet',
+        where 'mnet' detects masked faces as well
         :type backbone: str
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
 
-        # Initialize the face detector
-        self.face_detector = RetinaFaceLearner(backbone=backbone, device=device)
-        self.face_detector.download(path=".", verbose=True)
-        self.face_detector.load("retinaface_{}".format(backbone))
-        self.class_names = ["face", "masked_face"]
-
-        # Initialize OpenDR ROSBridge object
-        self.bridge = ROSBridge()
-
-        # setup communications
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
         else:
             self.image_publisher = None
 
-        if face_detections_topic is not None:
-            self.face_publisher = rospy.Publisher(face_detections_topic, Detection2DArray, queue_size=10)
+        if detections_topic is not None:
+            self.face_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
         else:
             self.face_publisher = None
 
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
+        self.bridge = ROSBridge()
+
+        # Initialize the face detector
+        self.face_detector = RetinaFaceLearner(backbone=backbone, device=device)
+        self.face_detector.download(path=".", verbose=True)
+        self.face_detector.load("retinaface_{}".format(backbone))
+        self.class_names = ["face", "masked_face"]
+
+    def listen(self):
+        """
+        Start the node and begin processing input data.
+        """
+        rospy.init_node('face_detection_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Face detection RetinaFace node started.")
+        rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
-        :param data: input message
+        Callback that processes the input data and publishes to the corresponding topics.
+        :param data: Input image message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
 
-        # Run pose estimation
+        # Run face detection
         boxes = self.face_detector.infer(image)
 
-        # Get an OpenCV image back
-        image = image.opencv()
-
-        # Convert detected boxes to ROS type and publish
-        ros_boxes = self.bridge.to_ros_boxes(boxes)
+        # Publish detections in ROS message
+        ros_boxes = self.bridge.to_ros_boxes(boxes)  # Convert to ROS boxes
         if self.face_publisher is not None:
             self.face_publisher.publish(ros_boxes)
-            rospy.loginfo("Published face boxes")
 
-        # Annotate image and publish result
-        # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,
-        # only used to test the corresponding bridge methods
-        odr_boxes = self.bridge.from_ros_boxes(ros_boxes)
-        image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)
         if self.image_publisher is not None:
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
-            rospy.loginfo("Published annotated image")
+            # Get an OpenCV image back
+            image = image.opencv()
+            # Annotate image with face detection boxes
+            image = draw_bounding_boxes(image, boxes, class_names=self.class_names)
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_faces_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/faces")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--backbone",
+                        help="Retinaface backbone, options are either 'mnet' or 'resnet', where 'mnet' detects "
+                             "masked faces as well",
+                        type=str, default="resnet", choices=["resnet", "mnet"])
+    args = parser.parse_args()
 
-
-if __name__ == '__main__':
-    # Automatically run on GPU/CPU
     try:
-        if mx.context.num_gpus() > 0:
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and mx.context.num_gpus() > 0:
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU.")
+        device = "cpu"
 
-    # initialize ROS node
-    rospy.init_node('opendr_face_detection', anonymous=True)
-    rospy.loginfo("Face detection node started!")
+    face_detection_node = FaceDetectionNode(device=device, backbone=args.backbone,
+                                            input_rgb_image_topic=args.input_rgb_image_topic,
+                                            output_rgb_image_topic=args.output_rgb_image_topic,
+                                            detections_topic=args.detections_topic)
+    face_detection_node.listen()
 
-    # get network backbone ("mnet" detects masked faces as well)
-    backbone = rospy.get_param("~backbone", "resnet")
-    input_image_topic = rospy.get_param("~input_image_topic", "/videofile/image_raw")
 
-    rospy.loginfo("Using backbone: {}".format(backbone))
-    assert backbone in ["resnet", "mnet"], "backbone should be one of ['resnet', 'mnet']"
-
-    # created node object
-    face_detection_node = FaceDetectionNode(device=device, backbone=backbone,
-                                            input_image_topic=input_image_topic)
-    # begin ROS communications
-    rospy.spin()
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/face_recognition.py b/projects/opendr_ws/src/perception/scripts/face_recognition.py
index 9bbe783f33fbde30f532f8eee1aaf51968698970..ba17aac74c87e1986f22748781982d952cd85ed8 100755
--- a/projects/opendr_ws/src/perception/scripts/face_recognition.py
+++ b/projects/opendr_ws/src/perception/scripts/face_recognition.py
@@ -13,14 +13,17 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import argparse
+import cv2
+import torch
 
 import rospy
-import torch
-from vision_msgs.msg import ObjectHypothesis
 from std_msgs.msg import String
+from vision_msgs.msg import ObjectHypothesis
 from sensor_msgs.msg import Image as ROS_Image
 from opendr_bridge import ROSBridge
 
+from opendr.engine.data import Image
 from opendr.perception.face_recognition import FaceRecognitionLearner
 from opendr.perception.object_detection_2d import RetinaFaceLearner
 from opendr.perception.object_detection_2d.datasets.transforms import BoundingBoxListToNumpyArray
@@ -28,24 +31,48 @@ from opendr.perception.object_detection_2d.datasets.transforms import BoundingBo
 
 class FaceRecognitionNode:
 
-    def __init__(self, input_image_topic="/usb_cam/image_raw",
-                 face_recognition_topic="/opendr/face_recognition",
-                 face_id_topic="/opendr/face_recognition_id",
-                 database_path="./database", device="cuda",
-                 backbone='mobilefacenet'):
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_face_reco_annotated",
+                 detections_topic="/opendr/face_recognition", detections_id_topic="/opendr/face_recognition_id",
+                 database_path="./database", device="cuda", backbone="mobilefacenet"):
         """
-        Creates a ROS Node for face recognition
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param face_recognition_topic: Topic to which we are publishing the recognized face info
-        (if None, we are not publishing the info)
-        :type face_recognition_topic: str
-        :param face_id_topic: Topic to which we are publishing the ID of the recognized person
-         (if None, we are not publishing the ID)
-        :type face_id_topic:  str
-        :param device: device on which we are running inference ('cpu' or 'cuda')
+        Creates a ROS Node for face recognition.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the recognized face information (if None,
+        no face recognition message is published)
+        :type detections_topic:  str
+        :param detections_id_topic: Topic to which we are publishing the ID of the recognized person (if None,
+        no ID message is published)
+        :type detections_id_topic:  str
+        :param device: Device on which we are running inference ('cpu' or 'cuda')
         :type device: str
+        :param backbone: Backbone network
+        :type backbone: str
+        :param database_path: Path of the directory where the images of the faces to be recognized are stored
+        :type database_path: str
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
+
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
+        else:
+            self.image_publisher = None
+
+        if detections_topic is not None:
+            self.face_publisher = rospy.Publisher(detections_topic, ObjectHypothesis, queue_size=1)
+        else:
+            self.face_publisher = None
+
+        if detections_id_topic is not None:
+            self.face_id_publisher = rospy.Publisher(detections_id_topic, String, queue_size=1)
+        else:
+            self.face_id_publisher = None
+
+        self.bridge = ROSBridge()
 
         # Initialize the face recognizer
         self.recognizer = FaceRecognitionLearner(device=device, mode='backbone_only', backbone=backbone)
@@ -59,27 +86,24 @@ class FaceRecognitionNode:
         self.face_detector.load("retinaface_{}".format('mnet'))
         self.class_names = ["face", "masked_face"]
 
-        if face_recognition_topic is not None:
-            self.face_publisher = rospy.Publisher(face_recognition_topic, ObjectHypothesis, queue_size=10)
-        else:
-            self.face_publisher = None
-
-        if face_id_topic is not None:
-            self.face_id_publisher = rospy.Publisher(face_id_topic, String, queue_size=10)
-        else:
-            self.face_id_publisher = None
-
-        self.bridge = ROSBridge()
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
+    def listen(self):
+        """
+        Start the node and begin processing input data.
+        """
+        rospy.init_node('face_recognition_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Face recognition node started.")
+        rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
-        :param data: input message
+        Callback that processes the input data and publishes to the corresponding topics.
+        :param data: Input image message
         :type data: sensor_msgs.msg.Image
         """
         # Convert sensor_msgs.msg.Image into OpenDR Image
-        image = self.bridge.from_ros_image(data)
+        image = self.bridge.from_ros_image(data, encoding='bgr8')
+        # Get an OpenCV image back
         image = image.opencv()
 
         # Run face detection and recognition
@@ -90,59 +114,74 @@ class FaceRecognitionNode:
                 boxes = bounding_boxes[:, :4]
                 for idx, box in enumerate(boxes):
                     (startX, startY, endX, endY) = int(box[0]), int(box[1]), int(box[2]), int(box[3])
-                    img = image[startY:endY, startX:endX]
-                    result = self.recognizer.infer(img)
-
-                    if result.data is not None:
-                        if self.face_publisher is not None:
-                            ros_face = self.bridge.to_ros_face(result)
-                            self.face_publisher.publish(ros_face)
-
-                        if self.face_id_publisher is not None:
-                            ros_face_id = self.bridge.to_ros_face_id(result)
-                            self.face_id_publisher.publish(ros_face_id.data)
-
-                    else:
-                        result.description = "Unknown"
-                        if self.face_publisher is not None:
-                            ros_face = self.bridge.to_ros_face(result)
-                            self.face_publisher.publish(ros_face)
+                    frame = image[startY:endY, startX:endX]
+                    result = self.recognizer.infer(frame)
+
+                    # Publish face information and ID
+                    if self.face_publisher is not None:
+                        self.face_publisher.publish(self.bridge.to_ros_face(result))
+
+                    if self.face_id_publisher is not None:
+                        self.face_id_publisher.publish(self.bridge.to_ros_face_id(result))
+
+                    if self.image_publisher is not None:
+                        if result.description != 'Not found':
+                            color = (0, 255, 0)
+                        else:
+                            color = (0, 0, 255)
+                        # Annotate image with face detection/recognition boxes
+                        cv2.rectangle(image, (startX, startY), (endX, endY), color, thickness=2)
+                        cv2.putText(image, result.description, (startX, endY - 10), cv2.FONT_HERSHEY_SIMPLEX,
+                                    1, color, 2, cv2.LINE_AA)
+
+            if self.image_publisher is not None:
+                # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+                self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_face_reco_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/face_recognition")
+    parser.add_argument("-id", "--detections_id_topic", help="Topic name for detection ID messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/face_recognition_id")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--backbone", help="Backbone network, defaults to mobilefacenet",
+                        type=str, default="mobilefacenet", choices=["mobilefacenet"])
+    parser.add_argument("--dataset_path",
+                        help="Path of the directory where the images of the faces to be recognized are stored, "
+                             "defaults to \"./database\"",
+                        type=str, default="./database")
+    args = parser.parse_args()
 
-                        if self.face_id_publisher is not None:
-                            ros_face_id = self.bridge.to_ros_face_id(result)
-                            self.face_id_publisher.publish(ros_face_id.data)
+    try:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
+            print("GPU not found. Using CPU instead.")
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
+    except:
+        print("Using CPU.")
+        device = "cpu"
 
-                # We get can the data back using self.bridge.from_ros_face(ros_face)
-                # e.g.
-                # face = self.bridge.from_ros_face(ros_face)
-                # face.description = self.recognizer.database[face.id][0]
+    face_recognition_node = FaceRecognitionNode(device=device, backbone=args.backbone, database_path=args.dataset_path,
+                                                input_rgb_image_topic=args.input_rgb_image_topic,
+                                                output_rgb_image_topic=args.output_rgb_image_topic,
+                                                detections_topic=args.detections_topic,
+                                                detections_id_topic=args.detections_id_topic)
+    face_recognition_node.listen()
 
 
 if __name__ == '__main__':
-    # Select the device for running the
-    try:
-        if torch.cuda.is_available():
-            print("GPU found.")
-            device = 'cuda'
-        else:
-            print("GPU not found. Using CPU instead.")
-            device = 'cpu'
-    except:
-        device = 'cpu'
-
-    # initialize ROS node
-    rospy.init_node('opendr_face_recognition', anonymous=True)
-    rospy.loginfo("Face recognition node started!")
-
-    # get network backbone
-    backbone = rospy.get_param("~backbone", "mobilefacenet")
-    input_image_topic = rospy.get_param("~input_image_topic", "/usb_cam/image_raw")
-    database_path = rospy.get_param('~database_path', './')
-    rospy.loginfo("Using backbone: {}".format(backbone))
-    assert backbone in ["mobilefacenet", "ir_50"], "backbone should be one of ['mobilefacenet', 'ir_50']"
-
-    face_recognition_node = FaceRecognitionNode(device=device, backbone=backbone,
-                                                input_image_topic=input_image_topic,
-                                                database_path=database_path)
-    # begin ROS communications
-    rospy.spin()
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/fall_detection.py b/projects/opendr_ws/src/perception/scripts/fall_detection.py
index ef456d2ec8051145d1673400a2e3ca7997f359a6..40fde10400a925b3c237bfb42c132cb026e9058d 100644
--- a/projects/opendr_ws/src/perception/scripts/fall_detection.py
+++ b/projects/opendr_ws/src/perception/scripts/fall_detection.py
@@ -13,76 +13,89 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import cv2
+import argparse
+import torch
 
 import rospy
-import torch
-import cv2
 from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
 from opendr_bridge import ROSBridge
+
+from opendr.engine.data import Image
+from opendr.engine.target import BoundingBox, BoundingBoxList
 from opendr.perception.pose_estimation import get_bbox
 from opendr.perception.pose_estimation import LightweightOpenPoseLearner
 from opendr.perception.fall_detection import FallDetectorLearner
-from opendr.engine.data import Image
-from opendr.engine.target import BoundingBox, BoundingBoxList
 
 
 class FallDetectionNode:
 
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_fall_annotated",
-                 fall_annotations_topic="/opendr/falls", device="cuda"):
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_fallen_annotated", detections_topic="/opendr/fallen",
+                 device="cuda", num_refinement_stages=2, use_stride=False, half_precision=False):
         """
-        Creates a ROS Node for fall detection
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param fall_annotations_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotated fall annotations)
-        :type fall_annotations_topic:  str
+        Creates a ROS Node for rule-based fall detection based on Lightweight OpenPose.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no fall detection message
+        is published)
+        :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
+        :param num_refinement_stages: Specifies the number of pose estimation refinement stages are added on the
+        model's head, including the initial stage. Can be 0, 1 or 2, with more stages meaning slower and more accurate
+        inference
+        :type num_refinement_stages: int
+        :param use_stride: Whether to add a stride value in the model, which reduces accuracy but increases
+        inference speed
+        :type use_stride: bool
+        :param half_precision: Enables inference using half (fp16) precision instead of single (fp32) precision.
+        Valid only for GPU-based inference
+        :type half_precision: bool
         """
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+        self.input_rgb_image_topic = input_rgb_image_topic
+
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
         else:
             self.image_publisher = None
 
-        if fall_annotations_topic is not None:
-            self.fall_publisher = rospy.Publisher(fall_annotations_topic, Detection2DArray, queue_size=10)
+        if detections_topic is not None:
+            self.fall_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
         else:
             self.fall_publisher = None
 
-        self.input_image_topic = input_image_topic
-
         self.bridge = ROSBridge()
 
-        # Initialize the pose estimation
-        self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=2,
-                                                         mobilenet_use_stride=False,
-                                                         half_precision=False)
+        # Initialize the pose estimation learner
+        self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=num_refinement_stages,
+                                                         mobilenet_use_stride=use_stride,
+                                                         half_precision=half_precision)
         self.pose_estimator.download(path=".", verbose=True)
         self.pose_estimator.load("openpose_default")
 
+        # Initialize the fall detection learner
         self.fall_detector = FallDetectorLearner(self.pose_estimator)
 
     def listen(self):
         """
-        Start the node and begin processing input data
+        Start the node and begin processing input data.
         """
-        rospy.init_node('opendr_fall_detection', anonymous=True)
-        rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)
-        rospy.loginfo("Fall detection node started!")
+        rospy.init_node('fall_detection_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Fall detection node started.")
         rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
-        :param data: input message
+        Callback that processes the input data and publishes to the corresponding topics.
+        :param data: Input image message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
 
@@ -93,41 +106,78 @@ class FallDetectionNode:
         image = image.opencv()
 
         bboxes = BoundingBoxList([])
+        fallen_pose_id = 0
         for detection in detections:
             fallen = detection[0].data
-            pose = detection[2]
 
             if fallen == 1:
-                color = (0, 0, 255)
+                pose = detection[2]
                 x, y, w, h = get_bbox(pose)
-                bbox = BoundingBox(left=x, top=y, width=w, height=h, name=0)
-                bboxes.data.append(bbox)
-
-                cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
-                cv2.putText(image, "Detected fallen person", (5, 55), cv2.FONT_HERSHEY_SIMPLEX,
-                            0.75, color, 1, cv2.LINE_AA)
+                if self.image_publisher is not None:
+                    # Paint person bounding box inferred from pose
+                    color = (0, 0, 255)
+                    cv2.rectangle(image, (x, y), (x + w, y + h), color, 2)
+                    cv2.putText(image, "Fallen person", (x, y + h - 10), cv2.FONT_HERSHEY_SIMPLEX,
+                                1, color, 2, cv2.LINE_AA)
 
-                # Convert detected boxes to ROS type and publish
-                ros_boxes = self.bridge.to_ros_boxes(bboxes)
                 if self.fall_publisher is not None:
-                    self.fall_publisher.publish(ros_boxes)
+                    # Convert detected boxes to ROS type and add to list
+                    bboxes.data.append(BoundingBox(left=x, top=y, width=w, height=h, name=fallen_pose_id))
+                    fallen_pose_id += 1
 
-        if self.image_publisher is not None:
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
+        if self.fall_publisher is not None:
+            if len(bboxes) > 0:
+                self.fall_publisher.publish(self.bridge.to_ros_boxes(bboxes))
 
+        if self.image_publisher is not None:
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_fallen_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/fallen")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--accelerate", help="Enables acceleration flags (e.g., stride)", default=False,
+                        action="store_true")
+    args = parser.parse_args()
 
-if __name__ == '__main__':
-    # Select the device for running the
     try:
-        if torch.cuda.is_available():
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
-
-    fall_detection_node = FallDetectionNode(device=device)
+        print("Using CPU.")
+        device = "cpu"
+
+    if args.accelerate:
+        stride = True
+        stages = 0
+        half_prec = True
+    else:
+        stride = False
+        stages = 2
+        half_prec = False
+
+    fall_detection_node = FallDetectionNode(device=device,
+                                            input_rgb_image_topic=args.input_rgb_image_topic,
+                                            output_rgb_image_topic=args.output_rgb_image_topic,
+                                            detections_topic=args.detections_topic,
+                                            num_refinement_stages=stages, use_stride=stride, half_precision=half_prec)
     fall_detection_node.listen()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/heart_anomaly_detection.py b/projects/opendr_ws/src/perception/scripts/heart_anomaly_detection.py
index 4e72471b9d5df26fadc44bc7263f5a76f04c9723..b36ecbdbdba63234f237716d0b3e813d60a8ea62 100755
--- a/projects/opendr_ws/src/perception/scripts/heart_anomaly_detection.py
+++ b/projects/opendr_ws/src/perception/scripts/heart_anomaly_detection.py
@@ -25,22 +25,23 @@ from opendr.perception.heart_anomaly_detection import GatedRecurrentUnitLearner,
 
 class HeartAnomalyNode:
 
-    def __init__(self, input_topic="/ecg/ecg", prediction_topic="/opendr/heartanomaly", device="cuda", model='anbof'):
+    def __init__(self, input_ecg_topic="/ecg/ecg", output_heart_anomaly_topic="/opendr/heart_anomaly",
+                 device="cuda", model="anbof"):
         """
         Creates a ROS Node for heart anomaly (atrial fibrillation) detection from ecg data
-        :param input_topic: Topic from which we are reading the input array data
-        :type input_topic: str
-        :param prediction_topic: Topic to which we are publishing the predicted class
-        :type prediction_topic: str
+        :param input_ecg_topic: Topic from which we are reading the input array data
+        :type input_ecg_topic: str
+        :param output_heart_anomaly_topic: Topic to which we are publishing the predicted class
+        :type output_heart_anomaly_topic: str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         :param model: model to use: anbof or gru
         :type model: str
         """
 
-        self.publisher = rospy.Publisher(prediction_topic, Classification2D, queue_size=10)
+        self.publisher = rospy.Publisher(output_heart_anomaly_topic, Classification2D, queue_size=10)
 
-        rospy.Subscriber(input_topic, Float32MultiArray, self.callback)
+        rospy.Subscriber(input_ecg_topic, Float32MultiArray, self.callback)
 
         self.bridge = ROSBridge()
 
@@ -70,8 +71,8 @@ class HeartAnomalyNode:
     def callback(self, msg_data):
         """
         Callback that process the input data and publishes to the corresponding topics
-        :param data: input message
-        :type data: std_msgs.msg.Float32MultiArray
+        :param msg_data: input message
+        :type msg_data: std_msgs.msg.Float32MultiArray
         """
         # Convert Float32MultiArray to OpenDR Timeseries
         data = self.bridge.from_rosarray_to_timeseries(msg_data, self.channels, self.series_length)
@@ -83,17 +84,34 @@ class HeartAnomalyNode:
         ros_class = self.bridge.from_category_to_rosclass(class_pred)
         self.publisher.publish(ros_class)
 
-if __name__ == '__main__':
-    # Select the device for running
-    try:
-        device = 'cuda' if torch.cuda.is_available() else 'cpu'
-    except:
-        device = 'cpu'
 
+if __name__ == '__main__':
     parser = argparse.ArgumentParser()
-    parser.add_argument('input_topic', type=str, help='listen to input data on this topic')
-    parser.add_argument('model', type=str, help='model to be used for prediction: anbof or gru')
+    parser.add_argument("--input_ecg_topic", type=str, default="/ecg/ecg",
+                        help="listen to input ECG data on this topic")
+    parser.add_argument("--model", type=str, default="anbof", help="model to be used for prediction: anbof or gru",
+                        choices=["anbof", "gru"])
+    parser.add_argument("--output_heart_anomaly_topic", type=str, default="/opendr/heart_anomaly",
+                        help="Topic name for heart anomaly detection topic")
+    parser.add_argument("--device", type=str, default="cuda", help="Device to use (cpu, cuda)",
+                        choices=["cuda", "cpu"])
+
     args = parser.parse_args()
 
-    gesture_node = HeartAnomalyNode(input_topic=args.input_topic, model=args.model, device=device)
+    try:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
+            print("GPU not found. Using CPU instead.")
+            device = "cpu"
+        else:
+            print("Using CPU")
+            device = "cpu"
+    except:
+        print("Using CPU")
+        device = "cpu"
+
+    gesture_node = HeartAnomalyNode(input_ecg_topic=args.input_ecg_topic,
+                                    output_heart_anomaly_topic=args.output_heart_anomaly_topic,
+                                    model=args.model, device=device)
     gesture_node.listen()
diff --git a/projects/opendr_ws/src/perception/scripts/object_detection_2d_centernet.py b/projects/opendr_ws/src/perception/scripts/object_detection_2d_centernet.py
index c1615f99a7dd00b7a70707ea9b07a1c99ba96a91..db64dd199baa289d37e75f2679768327114b1772 100755
--- a/projects/opendr_ws/src/perception/scripts/object_detection_2d_centernet.py
+++ b/projects/opendr_ws/src/perception/scripts/object_detection_2d_centernet.py
@@ -13,110 +13,127 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import rospy
+import argparse
 import mxnet as mx
-import numpy as np
+
+import rospy
 from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
-from opendr.engine.data import Image
 from opendr_bridge import ROSBridge
+
+from opendr.engine.data import Image
 from opendr.perception.object_detection_2d import CenterNetDetectorLearner
 from opendr.perception.object_detection_2d import draw_bounding_boxes
 
 
 class ObjectDetectionCenterNetNode:
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_boxes_annotated",
-                 detections_topic="/opendr/objects", device="cuda", backbone="resnet50_v1b"):
+
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_objects_annotated", detections_topic="/opendr/objects",
+                 device="cuda", backbone="resnet50_v1b"):
         """
-        Creates a ROS Node for face detection
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param detections_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotated pose annotations)
+        Creates a ROS Node for object detection with Centernet.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no object detection message
+        is published)
         :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         :param backbone: backbone network
         :type backbone: str
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
 
-        # Initialize the face detector
-        self.object_detector = CenterNetDetectorLearner(backbone=backbone, device=device)
-        self.object_detector.download(path=".", verbose=True)
-        self.object_detector.load("centernet_default")
-        self.class_names = self.object_detector.classes
-
-        # Initialize OpenDR ROSBridge object
-        self.bridge = ROSBridge()
-
-        # setup communications
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
         else:
             self.image_publisher = None
 
         if detections_topic is not None:
-            self.bbox_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=10)
+            self.object_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
         else:
-            self.bbox_publisher = None
+            self.object_publisher = None
+
+        self.bridge = ROSBridge()
 
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
+        # Initialize the object detector
+        self.object_detector = CenterNetDetectorLearner(backbone=backbone, device=device)
+        self.object_detector.download(path=".", verbose=True)
+        self.object_detector.load("centernet_default")
+
+    def listen(self):
+        """
+        Start the node and begin processing input data.
+        """
+        rospy.init_node('object_detection_centernet_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Object detection Centernet node started.")
+        rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
+        Callback that processes the input data and publishes to the corresponding topics.
         :param data: input message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
 
-        # Run pose estimation
+        # Run object detection
         boxes = self.object_detector.infer(image, threshold=0.45, keep_size=False)
 
-        # Get an OpenCV image back
-        image = np.float32(image.opencv())
-
-        # Convert detected boxes to ROS type and publish
-        ros_boxes = self.bridge.to_ros_boxes(boxes)
-        if self.bbox_publisher is not None:
-            self.bbox_publisher.publish(ros_boxes)
-            rospy.loginfo("Published face boxes")
+        # Publish detections in ROS message
+        ros_boxes = self.bridge.to_ros_boxes(boxes)  # Convert to ROS boxes
+        if self.object_publisher is not None:
+            self.object_publisher.publish(ros_boxes)
 
-        # Annotate image and publish result
-        # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,
-        # only used to test the corresponding bridge methods
-        odr_boxes = self.bridge.from_ros_boxes(ros_boxes)
-        image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)
         if self.image_publisher is not None:
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
-            rospy.loginfo("Published annotated image")
+            # Get an OpenCV image back
+            image = image.opencv()
+            # Annotate image with object detection boxes
+            image = draw_bounding_boxes(image, boxes, class_names=self.object_detector.classes)
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_objects_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/objects")
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--backbone", help="Backbone network, defaults to \"resnet50_v1b\"",
+                        type=str, default="resnet50_v1b", choices=["resnet50_v1b"])
+    args = parser.parse_args()
 
-
-if __name__ == '__main__':
-    # Automatically run on GPU/CPU
     try:
-        if mx.context.num_gpus() > 0:
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and mx.context.num_gpus() > 0:
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU.")
+        device = "cpu"
 
-    # initialize ROS node
-    rospy.init_node('opendr_object_detection', anonymous=True)
-    rospy.loginfo("Object detection node started!")
+    object_detection_centernet_node = ObjectDetectionCenterNetNode(device=device, backbone=args.backbone,
+                                                                   input_rgb_image_topic=args.input_rgb_image_topic,
+                                                                   output_rgb_image_topic=args.output_rgb_image_topic,
+                                                                   detections_topic=args.detections_topic)
+    object_detection_centernet_node.listen()
 
-    input_image_topic = rospy.get_param("~input_image_topic", "/videofile/image_raw")
 
-    # created node object
-    object_detection_node = ObjectDetectionCenterNetNode(device=device, input_image_topic=input_image_topic)
-    # begin ROS communications
-    rospy.spin()
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/object_detection_2d_detr.py b/projects/opendr_ws/src/perception/scripts/object_detection_2d_detr.py
index ec98c4ddf0b9f8651c06104e1d5c1fa9bc30daa7..42f7000966580569347ea5f35ca0fd274f6b30b6 100644
--- a/projects/opendr_ws/src/perception/scripts/object_detection_2d_detr.py
+++ b/projects/opendr_ws/src/perception/scripts/object_detection_2d_detr.py
@@ -14,46 +14,48 @@
 # limitations under the License.
 
 
-import rospy
+import argparse
 import torch
-import numpy as np
+
+import rospy
 from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
-from opendr.engine.data import Image
 from opendr_bridge import ROSBridge
-from opendr.perception.object_detection_2d.detr.algorithm.util.draw import draw
+
+from opendr.engine.data import Image
 from opendr.perception.object_detection_2d import DetrLearner
+from opendr.perception.object_detection_2d.detr.algorithm.util.draw import draw
 
 
-class DetrNode:
+class ObjectDetectionDetrNode:
 
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_boxes_annotated",
-                 detection_annotations_topic="/opendr/objects", device="cuda"):
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_objects_annotated", detections_topic="/opendr/objects",
+                 device="cuda"):
         """
-        Creates a ROS Node for object detection with DETR
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param detection_annotations_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotations)
-        :type detection_annotations_topic:  str
+        Creates a ROS Node for object detection with DETR.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no object detection message
+        is published)
+        :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
 
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
         else:
             self.image_publisher = None
 
-        if detection_annotations_topic is not None:
-            self.detection_publisher = rospy.Publisher(detection_annotations_topic, Detection2DArray, queue_size=10)
+        if detections_topic is not None:
+            self.object_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
         else:
-            self.detection_publisher = None
-
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
+            self.object_publisher = None
 
         self.bridge = ROSBridge()
 
@@ -63,52 +65,71 @@ class DetrNode:
 
     def listen(self):
         """
-        Start the node and begin processing input data
+        Start the node and begin processing input data.
         """
-        rospy.init_node('detr', anonymous=True)
-        rospy.loginfo("DETR node started!")
+        rospy.init_node('object_detection_detr_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Object detection DETR node started.")
         rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
+        Callback that processes the input data and publishes to the corresponding topics.
         :param data: input message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
 
-        # Run detection estimation
+        # Run object detection
         boxes = self.detr_learner.infer(image)
 
         # Get an OpenCV image back
-        image = np.float32(image.opencv())
+        image = image.opencv()
 
-        #  Annotate image and publish results:
-        if self.detection_publisher is not None:
-            ros_detection = self.bridge.to_ros_bounding_box_list(boxes)
-            self.detection_publisher.publish(ros_detection)
-            # We get can the data back using self.bridge.from_ros_bounding_box_list(ros_detection)
-            # e.g., opendr_detection = self.bridge.from_ros_bounding_box_list(ros_detection)
+        # Publish detections in ROS message
+        ros_boxes = self.bridge.to_ros_bounding_box_list(boxes)  # Convert to ROS bounding_box_list
+        if self.object_publisher is not None:
+            self.object_publisher.publish(ros_boxes)
 
         if self.image_publisher is not None:
+            # Annotate image with object detection boxes
             image = draw(image, boxes)
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
-
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=str, default="/opendr/image_objects_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=str, default="/opendr/objects")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    args = parser.parse_args()
 
-if __name__ == '__main__':
-    # Select the device for running the
     try:
-        if torch.cuda.is_available():
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU.")
+        device = "cpu"
+
+    object_detection_detr_node = ObjectDetectionDetrNode(device=device,
+                                                         input_rgb_image_topic=args.input_rgb_image_topic,
+                                                         output_rgb_image_topic=args.output_rgb_image_topic,
+                                                         detections_topic=args.detections_topic)
+    object_detection_detr_node.listen()
 
-    detection_estimation_node = DetrNode(device=device)
-    detection_estimation_node.listen()
+
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/object_detection_2d_nanodet.py b/projects/opendr_ws/src/perception/scripts/object_detection_2d_nanodet.py
new file mode 100644
index 0000000000000000000000000000000000000000..b1ea62c9143f9e44515954b2f47d13177c214106
--- /dev/null
+++ b/projects/opendr_ws/src/perception/scripts/object_detection_2d_nanodet.py
@@ -0,0 +1,137 @@
+#!/usr/bin/env python
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+import torch
+
+import rospy
+from vision_msgs.msg import Detection2DArray
+from sensor_msgs.msg import Image as ROS_Image
+from opendr_bridge import ROSBridge
+
+from opendr.engine.data import Image
+from opendr.perception.object_detection_2d import NanodetLearner
+from opendr.perception.object_detection_2d import draw_bounding_boxes
+
+
+class ObjectDetectionNanodetNode:
+
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_objects_annotated", detections_topic="/opendr/objects",
+                 device="cuda", model="plus_m_1.5x_416"):
+        """
+        Creates a ROS Node for object detection with SSD.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no object detection message
+        is published)
+        :type detections_topic:  str
+        :param device: device on which we are running inference ('cpu' or 'cuda')
+        :type device: str
+        :param model: the name of the model of which we want to load the config file
+        :type model: str
+        """
+        self.input_rgb_image_topic = input_rgb_image_topic
+
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
+        else:
+            self.image_publisher = None
+
+        if detections_topic is not None:
+            self.object_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
+        else:
+            self.object_publisher = None
+
+        self.bridge = ROSBridge()
+
+        # Initialize the object detector
+        self.object_detector = NanodetLearner(model_to_use=model, device=device)
+        self.object_detector.download(path=".", mode="pretrained", verbose=True)
+        self.object_detector.load("./nanodet_{}".format(model))
+
+    def listen(self):
+        """
+        Start the node and begin processing input data.
+        """
+        rospy.init_node('object_detection_ssd_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Object detection SSD node started.")
+        rospy.spin()
+
+    def callback(self, data):
+        """
+        Callback that processes the input data and publishes to the corresponding topics.
+        :param data: input message
+        :type data: sensor_msgs.msg.Image
+        """
+        # Convert sensor_msgs.msg.Image into OpenDR Image
+        image = self.bridge.from_ros_image(data, encoding='bgr8')
+
+        # Run object detection
+        boxes = self.object_detector.infer(image, threshold=0.35)
+
+        # Get an OpenCV image back
+        image = image.opencv()
+
+        # Publish detections in ROS message
+        ros_boxes = self.bridge.to_ros_boxes(boxes)  # Convert to ROS boxes
+        if self.object_publisher is not None:
+            self.object_publisher.publish(ros_boxes)
+
+        if self.image_publisher is not None:
+            # Annotate image with object detection boxes
+            image = draw_bounding_boxes(image, boxes, class_names=self.object_detector.classes)
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=str, default="/opendr/image_objects_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=str, default="/opendr/objects")
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--model", help="Model that config file will be used", type=str, default="plus_m_1.5x_416")
+    args = parser.parse_args()
+
+    try:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
+            print("GPU not found. Using CPU instead.")
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
+    except:
+        print("Using CPU.")
+        device = "cpu"
+
+    object_detection_nanodet_node = ObjectDetectionNanodetNode(device=device, model=args.model,
+                                                               input_rgb_image_topic=args.input_rgb_image_topic,
+                                                               output_rgb_image_topic=args.output_rgb_image_topic,
+                                                               detections_topic=args.detections_topic)
+    object_detection_nanodet_node.listen()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/object_detection_2d_ssd.py b/projects/opendr_ws/src/perception/scripts/object_detection_2d_ssd.py
index f0dd7ca1d3c13cf204ae7b2f4c46facc7314ac9b..aa9ed03a2ee28114beda8d7671797ee2e38184e5 100755
--- a/projects/opendr_ws/src/perception/scripts/object_detection_2d_ssd.py
+++ b/projects/opendr_ws/src/perception/scripts/object_detection_2d_ssd.py
@@ -13,12 +13,14 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import rospy
+import argparse
 import mxnet as mx
-import numpy as np
+
+import rospy
 from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
 from opendr_bridge import ROSBridge
+
 from opendr.engine.data import Image
 from opendr.perception.object_detection_2d import SingleShotDetectorLearner
 from opendr.perception.object_detection_2d import draw_bounding_boxes
@@ -26,114 +28,139 @@ from opendr.perception.object_detection_2d import Seq2SeqNMSLearner, SoftNMS, Fa
 
 
 class ObjectDetectionSSDNode:
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_boxes_annotated",
-                 detections_topic="/opendr/objects", device="cuda", backbone="vgg16_atrous", nms_type='default'):
+
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_objects_annotated", detections_topic="/opendr/objects",
+                 device="cuda", backbone="vgg16_atrous", nms_type='default'):
         """
-        Creates a ROS Node for face detection
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param detections_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotated pose annotations)
+        Creates a ROS Node for object detection with SSD.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no object detection message
+        is published)
         :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         :param backbone: backbone network
         :type backbone: str
-        :param ms_type: type of NMS method
+        :param nms_type: type of NMS method
         :type nms_type: str
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
+
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
+        else:
+            self.image_publisher = None
 
-        # Initialize the face detector
+        if detections_topic is not None:
+            self.object_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
+        else:
+            self.object_publisher = None
+
+        self.bridge = ROSBridge()
+
+        # Initialize the object detector
         self.object_detector = SingleShotDetectorLearner(backbone=backbone, device=device)
         self.object_detector.download(path=".", verbose=True)
         self.object_detector.load("ssd_default_person")
-        self.class_names = self.object_detector.classes
         self.custom_nms = None
 
-        # Initialize Seq2Seq-NMS if selected
+        # Initialize NMS if selected
         if nms_type == 'seq2seq-nms':
             self.custom_nms = Seq2SeqNMSLearner(fmod_map_type='EDGEMAP', iou_filtering=0.8,
-                                                app_feats='fmod', device=self.device)
-            self.custom_nms.download(model_name='seq2seq_pets_jpd', path='.')
-            self.custom_nms.load('./seq2seq_pets_jpd/', verbose=True)
+                                                app_feats='fmod', device=device)
+            self.custom_nms.download(model_name='seq2seq_pets_jpd_fmod', path='.')
+            self.custom_nms.load('./seq2seq_pets_jpd_fmod/', verbose=True)
+            rospy.loginfo("Object Detection 2D SSD node seq2seq-nms initialized.")
         elif nms_type == 'soft-nms':
-            self.custom_nms = SoftNMS(nms_thres=0.45, device=self.device)
+            self.custom_nms = SoftNMS(nms_thres=0.45, device=device)
+            rospy.loginfo("Object Detection 2D SSD node soft-nms initialized.")
         elif nms_type == 'fast-nms':
-            self.custom_nms = FastNMS(nms_thres=0.45, device=self.device)
+            self.custom_nms = FastNMS(device=device)
+            rospy.loginfo("Object Detection 2D SSD node fast-nms initialized.")
         elif nms_type == 'cluster-nms':
-            self.custom_nms = ClusterNMS(nms_thres=0.45, device=self.device)
-
-        # Initialize OpenDR ROSBridge object
-        self.bridge = ROSBridge()
-
-        # setup communications
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+            self.custom_nms = ClusterNMS(device=device)
+            rospy.loginfo("Object Detection 2D SSD node cluster-nms initialized.")
         else:
-            self.image_publisher = None
+            rospy.loginfo("Object Detection 2D SSD node using default NMS.")
 
-        if detections_topic is not None:
-            self.bbox_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=10)
-        else:
-            self.bbox_publisher = None
-
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
+    def listen(self):
+        """
+        Start the node and begin processing input data.
+        """
+        rospy.init_node('object_detection_ssd_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Object detection SSD node started.")
+        rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
+        Callback that processes the input data and publishes to the corresponding topics.
         :param data: input message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
 
-        # Run pose estimation
+        # Run object detection
         boxes = self.object_detector.infer(image, threshold=0.45, keep_size=False, custom_nms=self.custom_nms)
 
-        # Get an OpenCV image back
-        image = np.float32(image.opencv())
-
-        # Convert detected boxes to ROS type and publish
-        ros_boxes = self.bridge.to_ros_boxes(boxes)
-        if self.bbox_publisher is not None:
-            self.bbox_publisher.publish(ros_boxes)
-            rospy.loginfo("Published face boxes")
+        # Publish detections in ROS message
+        ros_boxes = self.bridge.to_ros_boxes(boxes)  # Convert to ROS boxes
+        if self.object_publisher is not None:
+            self.object_publisher.publish(ros_boxes)
 
-        # Annotate image and publish result
-        # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,
-        # only used to test the corresponding bridge methods
-        odr_boxes = self.bridge.from_ros_boxes(ros_boxes)
-        image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)
         if self.image_publisher is not None:
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
-            rospy.loginfo("Published annotated image")
+            # Get an OpenCV image back
+            image = image.opencv()
+            # Annotate image with object detection boxes
+            image = draw_bounding_boxes(image, boxes, class_names=self.object_detector.classes)
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_objects_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/objects")
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--backbone", help="Backbone network, defaults to vgg16_atrous",
+                        type=str, default="vgg16_atrous", choices=["vgg16_atrous"])
+    parser.add_argument("--nms_type", help="Non-Maximum Suppression type, defaults to \"default\", options are "
+                                           "\"seq2seq-nms\", \"soft-nms\", \"fast-nms\", \"cluster-nms\"",
+                        type=str, default="default",
+                        choices=["default", "seq2seq-nms", "soft-nms", "fast-nms", "cluster-nms"])
+    args = parser.parse_args()
 
-
-if __name__ == '__main__':
-    # Automatically run on GPU/CPU
     try:
-        if mx.context.num_gpus() > 0:
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and mx.context.num_gpus() > 0:
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU.")
+        device = "cpu"
 
-    # initialize ROS node
-    rospy.init_node('opendr_object_detection', anonymous=True)
-    rospy.loginfo("Object detection node started!")
+    object_detection_ssd_node = ObjectDetectionSSDNode(device=device, backbone=args.backbone, nms_type=args.nms_type,
+                                                       input_rgb_image_topic=args.input_rgb_image_topic,
+                                                       output_rgb_image_topic=args.output_rgb_image_topic,
+                                                       detections_topic=args.detections_topic)
+    object_detection_ssd_node.listen()
 
-    input_image_topic = rospy.get_param("~input_image_topic", "/videofile/image_raw")
 
-    # created node object
-    object_detection_node = ObjectDetectionSSDNode(device=device, input_image_topic=input_image_topic)
-    # begin ROS communications
-    rospy.spin()
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/object_detection_2d_yolov3.py b/projects/opendr_ws/src/perception/scripts/object_detection_2d_yolov3.py
index 93155f148bc60c76242197d94ce8c3215f60aed7..9c3309b45435385a9bf51f00f5a18b91f8a39ead 100755
--- a/projects/opendr_ws/src/perception/scripts/object_detection_2d_yolov3.py
+++ b/projects/opendr_ws/src/perception/scripts/object_detection_2d_yolov3.py
@@ -13,111 +13,128 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-import rospy
+import argparse
 import mxnet as mx
-import numpy as np
+
+import rospy
 from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
 from opendr_bridge import ROSBridge
+
 from opendr.engine.data import Image
 from opendr.perception.object_detection_2d import YOLOv3DetectorLearner
 from opendr.perception.object_detection_2d import draw_bounding_boxes
 
 
 class ObjectDetectionYOLONode:
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_boxes_annotated",
-                 detections_topic="/opendr/objects", device="cuda", backbone="darknet53"):
+
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_objects_annotated", detections_topic="/opendr/objects",
+                 device="cuda", backbone="darknet53"):
         """
-        Creates a ROS Node for face detection
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param detections_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotated pose annotations)
+        Creates a ROS Node for object detection with YOLOV3.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no object detection message
+        is published)
         :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         :param backbone: backbone network
         :type backbone: str
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
 
-        # Initialize the face detector
-        self.object_detector = YOLOv3DetectorLearner(backbone=backbone, device=device)
-        self.object_detector.download(path=".", verbose=True)
-        self.object_detector.load("yolo_default")
-        self.class_names = self.object_detector.classes
-
-        # Initialize OpenDR ROSBridge object
-        self.bridge = ROSBridge()
-
-        # setup communications
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
         else:
             self.image_publisher = None
 
         if detections_topic is not None:
-            self.bbox_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=10)
+            self.object_publisher = rospy.Publisher(detections_topic, Detection2DArray, queue_size=1)
         else:
-            self.bbox_publisher = None
+            self.object_publisher = None
+
+        self.bridge = ROSBridge()
 
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
+        # Initialize the object detector
+        self.object_detector = YOLOv3DetectorLearner(backbone=backbone, device=device)
+        self.object_detector.download(path=".", verbose=True)
+        self.object_detector.load("yolo_default")
+
+    def listen(self):
+        """
+        Start the node and begin processing input data.
+        """
+        rospy.init_node('object_detection_yolov3_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Object detection YOLOV3 node started.")
+        rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
+        Callback that processes the input data and publishes to the corresponding topics.
         :param data: input message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
-        rospy.loginfo("image info: {}".format(image.numpy().shape))
 
-        # Run pose estimation
+        # Run object detection
         boxes = self.object_detector.infer(image, threshold=0.1, keep_size=False)
 
-        # Get an OpenCV image back
-        image = np.float32(image.opencv())
+        # Publish detections in ROS message
+        ros_boxes = self.bridge.to_ros_bounding_box_list(boxes)  # Convert to ROS bounding_box_list
+        if self.object_publisher is not None:
+            self.object_publisher.publish(ros_boxes)
 
-        # Convert detected boxes to ROS type and publish
-        ros_boxes = self.bridge.to_ros_boxes(boxes)
-        if self.bbox_publisher is not None:
-            self.bbox_publisher.publish(ros_boxes)
-            rospy.loginfo("Published face boxes")
-
-        # Annotate image and publish result
-        # NOTE: converting back to OpenDR BoundingBoxList is unnecessary here,
-        # only used to test the corresponding bridge methods
-        odr_boxes = self.bridge.from_ros_boxes(ros_boxes)
-        image = draw_bounding_boxes(image, odr_boxes, class_names=self.class_names)
         if self.image_publisher is not None:
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
-            rospy.loginfo("Published annotated image")
-
+            # Get an OpenCV image back
+            image = image.opencv()
+            # Annotate image with object detection boxes
+            image = draw_bounding_boxes(image, boxes, class_names=self.object_detector.classes)
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_objects_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/objects")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--backbone", help="Backbone network, defaults to \"darknet53\"",
+                        type=str, default="darknet53", choices=["darknet53"])
+    args = parser.parse_args()
 
-if __name__ == '__main__':
-    # Automatically run on GPU/CPU
     try:
-        if mx.context.num_gpus() > 0:
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and mx.context.num_gpus() > 0:
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU.")
+        device = "cpu"
 
-    # initialize ROS node
-    rospy.init_node('opendr_object_detection', anonymous=True)
-    rospy.loginfo("Object detection node started!")
+    object_detection_yolov3_node = ObjectDetectionYOLONode(device=device, backbone=args.backbone,
+                                                           input_rgb_image_topic=args.input_rgb_image_topic,
+                                                           output_rgb_image_topic=args.output_rgb_image_topic,
+                                                           detections_topic=args.detections_topic)
+    object_detection_yolov3_node.listen()
 
-    input_image_topic = rospy.get_param("~input_image_topic", "/videofile/image_raw")
 
-    # created node object
-    object_detection_node = ObjectDetectionYOLONode(device=device, input_image_topic=input_image_topic)
-    # begin ROS communications
-    rospy.spin()
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/panoptic_segmentation_efficient_ps.py b/projects/opendr_ws/src/perception/scripts/panoptic_segmentation_efficient_ps.py
index bce86e46ea3a33dec21c95e5e70a3e47e453369e..33392e316b7c28fd62a258251a16d14cd6565949 100755
--- a/projects/opendr_ws/src/perception/scripts/panoptic_segmentation_efficient_ps.py
+++ b/projects/opendr_ws/src/perception/scripts/panoptic_segmentation_efficient_ps.py
@@ -13,6 +13,8 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import sys
+from pathlib import Path
 import argparse
 from typing import Optional
 
@@ -29,27 +31,31 @@ matplotlib.use('Agg')
 
 class EfficientPsNode:
     def __init__(self,
+                 input_rgb_image_topic: str,
                  checkpoint: str,
-                 input_image_topic: str,
                  output_heatmap_topic: Optional[str] = None,
-                 output_visualization_topic: Optional[str] = None,
+                 output_rgb_visualization_topic: Optional[str] = None,
                  detailed_visualization: bool = False
                  ):
         """
         Initialize the EfficientPS ROS node and create an instance of the respective learner class.
-        :param checkpoint: Path to a saved model
+        :param checkpoint: This is either a path to a saved model or one of [cityscapes, kitti] to download
+            pre-trained model weights.
         :type checkpoint: str
-        :param input_image_topic: ROS topic for the input image stream
-        :type input_image_topic: str
+        :param input_rgb_image_topic: ROS topic for the input image stream
+        :type input_rgb_image_topic: str
         :param output_heatmap_topic: ROS topic for the predicted semantic and instance maps
         :type output_heatmap_topic: str
-        :param output_visualization_topic: ROS topic for the generated visualization of the panoptic map
-        :type output_visualization_topic: str
+        :param output_rgb_visualization_topic: ROS topic for the generated visualization of the panoptic map
+        :type output_rgb_visualization_topic: str
+        :param detailed_visualization: if True, generate a combined overview of the input RGB image and the
+            semantic, instance, and panoptic segmentation maps and publish it on output_rgb_visualization_topic
+        :type detailed_visualization: bool
         """
+        self.input_rgb_image_topic = input_rgb_image_topic
         self.checkpoint = checkpoint
-        self.input_image_topic = input_image_topic
         self.output_heatmap_topic = output_heatmap_topic
-        self.output_visualization_topic = output_visualization_topic
+        self.output_rgb_visualization_topic = output_rgb_visualization_topic
         self.detailed_visualization = detailed_visualization
 
         # Initialize all ROS related things
@@ -59,14 +65,27 @@ class EfficientPsNode:
         self._visualization_publisher = None
 
         # Initialize the panoptic segmentation network
-        self._learner = EfficientPsLearner()
+        config_file = Path(sys.modules[
+                               EfficientPsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_cityscapes.py'
+        self._learner = EfficientPsLearner(str(config_file))
+
+        # Other
+        self._tmp_folder = Path(__file__).parent.parent / 'tmp' / 'efficientps'
+        self._tmp_folder.mkdir(exist_ok=True, parents=True)
 
     def _init_learner(self) -> bool:
         """
-        Load the weights from the specified checkpoint file.
+        The model can be initialized via
+        1. downloading pre-trained weights for Cityscapes or KITTI.
+        2. passing a path to an existing checkpoint file.
 
         This has not been done in the __init__() function since logging is available only once the node is registered.
         """
+        if self.checkpoint in ['cityscapes', 'kitti']:
+            file_path = EfficientPsLearner.download(str(self._tmp_folder),
+                                                    trained_on=self.checkpoint)
+            self.checkpoint = file_path
+
         if self._learner.load(self.checkpoint):
             rospy.loginfo('Successfully loaded the checkpoint.')
             return True
@@ -78,19 +97,20 @@ class EfficientPsNode:
         """
         Subscribe to all relevant topics.
         """
-        rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
 
     def _init_publisher(self):
         """
         Set up the publishers as requested by the user.
         """
         if self.output_heatmap_topic is not None:
-            self._instance_heatmap_publisher = rospy.Publisher(f'{self.output_heatmap_topic}/instance', ROS_Image,
-                                                               queue_size=10)
-            self._semantic_heatmap_publisher = rospy.Publisher(f'{self.output_heatmap_topic}/semantic', ROS_Image,
-                                                               queue_size=10)
-        if self.output_visualization_topic is not None:
-            self._visualization_publisher = rospy.Publisher(self.output_visualization_topic, ROS_Image, queue_size=10)
+            self._instance_heatmap_publisher = rospy.Publisher(
+                f'{self.output_heatmap_topic}/instance', ROS_Image, queue_size=10)
+            self._semantic_heatmap_publisher = rospy.Publisher(
+                f'{self.output_heatmap_topic}/semantic', ROS_Image, queue_size=10)
+        if self.output_rgb_visualization_topic is not None:
+            self._visualization_publisher = rospy.Publisher(self.output_rgb_visualization_topic,
+                                                            ROS_Image, queue_size=10)
 
     def listen(self):
         """
@@ -128,26 +148,31 @@ class EfficientPsNode:
             if self._semantic_heatmap_publisher is not None and self._semantic_heatmap_publisher.get_num_connections() > 0:
                 self._semantic_heatmap_publisher.publish(self._bridge.to_ros_image(prediction[1]))
 
-        except Exception:
-            rospy.logwarn('Failed to generate prediction.')
+        except Exception as e:
+            rospy.logwarn(f'Failed to generate prediction: {e}')
 
 
 if __name__ == '__main__':
-    parser = argparse.ArgumentParser()
-    parser.add_argument('checkpoint', type=str, help='load the model weights from the provided path')
-    parser.add_argument('image_topic', type=str, help='listen to images on this topic')
-    parser.add_argument('--heatmap_topic', type=str, help='publish the semantic and instance maps on this topic')
-    parser.add_argument('--visualization_topic', type=str,
+    parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
+    parser.add_argument('input_rgb_image_topic', type=str, default='/usb_cam/image_raw',
+                        help='listen to RGB images on this topic')
+    parser.add_argument('--checkpoint', type=str, default='cityscapes',
+                        help='download pretrained models [cityscapes, kitti] or load from the provided path')
+    parser.add_argument('--output_heatmap_topic', type=str, default='/opendr/panoptic',
+                        help='publish the semantic and instance maps on this topic as "OUTPUT_HEATMAP_TOPIC/semantic" \
+                             and "OUTPUT_HEATMAP_TOPIC/instance"')
+    parser.add_argument('--output_rgb_image_topic', type=str,
+                        default='/opendr/panoptic/rgb_visualization',
                         help='publish the panoptic segmentation map as an RGB image on this topic or a more detailed \
                               overview if using the --detailed_visualization flag')
     parser.add_argument('--detailed_visualization', action='store_true',
                         help='generate a combined overview of the input RGB image and the semantic, instance, and \
-                              panoptic segmentation maps')
+                              panoptic segmentation maps and publish it on OUTPUT_RGB_IMAGE_TOPIC')
     args = parser.parse_args()
 
-    efficient_ps_node = EfficientPsNode(args.checkpoint,
-                                        args.image_topic,
-                                        args.heatmap_topic,
-                                        args.visualization_topic,
+    efficient_ps_node = EfficientPsNode(args.input_rgb_image_topic,
+                                        args.checkpoint,
+                                        args.output_heatmap_topic,
+                                        args.output_rgb_image_topic,
                                         args.detailed_visualization)
     efficient_ps_node.listen()
diff --git a/projects/opendr_ws/src/perception/scripts/pose_estimation.py b/projects/opendr_ws/src/perception/scripts/pose_estimation.py
index 855ada40cf28c9f0a2076bf4e970f8d6c4b769a9..87bf71693e938de9145c118a9539da4aaff16229 100644
--- a/projects/opendr_ws/src/perception/scripts/pose_estimation.py
+++ b/projects/opendr_ws/src/perception/scripts/pose_estimation.py
@@ -13,104 +13,150 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import argparse
+import torch
 
 import rospy
-import torch
-from vision_msgs.msg import Detection2DArray
 from sensor_msgs.msg import Image as ROS_Image
+from ros_bridge.msg import OpenDRPose2D
 from opendr_bridge import ROSBridge
+
+from opendr.engine.data import Image
 from opendr.perception.pose_estimation import draw
 from opendr.perception.pose_estimation import LightweightOpenPoseLearner
-from opendr.engine.data import Image
 
 
 class PoseEstimationNode:
 
-    def __init__(self, input_image_topic="/usb_cam/image_raw", output_image_topic="/opendr/image_pose_annotated",
-                 pose_annotations_topic="/opendr/poses", device="cuda"):
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw",
+                 output_rgb_image_topic="/opendr/image_pose_annotated", detections_topic="/opendr/poses", device="cuda",
+                 num_refinement_stages=2, use_stride=False, half_precision=False):
         """
-        Creates a ROS Node for pose detection
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_image_topic: Topic to which we are publishing the annotated image (if None, we are not publishing
-        annotated image)
-        :type output_image_topic: str
-        :param pose_annotations_topic: Topic to which we are publishing the annotations (if None, we are not publishing
-        annotated pose annotations)
-        :type pose_annotations_topic:  str
+        Creates a ROS Node for pose estimation with Lightweight OpenPose.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the annotated image (if None, no annotated
+        image is published)
+        :type output_rgb_image_topic: str
+        :param detections_topic: Topic to which we are publishing the annotations (if None, no pose detection message
+        is published)
+        :type detections_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
+        :param num_refinement_stages: Specifies the number of pose estimation refinement stages are added on the
+        model's head, including the initial stage. Can be 0, 1 or 2, with more stages meaning slower and more accurate
+        inference
+        :type num_refinement_stages: int
+        :param use_stride: Whether to add a stride value in the model, which reduces accuracy but increases
+        inference speed
+        :type use_stride: bool
+        :param half_precision: Enables inference using half (fp16) precision instead of single (fp32) precision.
+        Valid only for GPU-based inference
+        :type half_precision: bool
         """
-        if output_image_topic is not None:
-            self.image_publisher = rospy.Publisher(output_image_topic, ROS_Image, queue_size=10)
+        self.input_rgb_image_topic = input_rgb_image_topic
+
+        if output_rgb_image_topic is not None:
+            self.image_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
         else:
             self.image_publisher = None
 
-        if pose_annotations_topic is not None:
-            self.pose_publisher = rospy.Publisher(pose_annotations_topic, Detection2DArray, queue_size=10)
+        if detections_topic is not None:
+            self.pose_publisher = rospy.Publisher(detections_topic, OpenDRPose2D, queue_size=1)
         else:
             self.pose_publisher = None
 
-        self.input_image_topic = input_image_topic
-
         self.bridge = ROSBridge()
 
-        # Initialize the pose estimation
-        self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=0,
-                                                         mobilenet_use_stride=False,
-                                                         half_precision=False)
+        # Initialize the pose estimation learner
+        self.pose_estimator = LightweightOpenPoseLearner(device=device, num_refinement_stages=num_refinement_stages,
+                                                         mobilenet_use_stride=use_stride,
+                                                         half_precision=half_precision)
         self.pose_estimator.download(path=".", verbose=True)
         self.pose_estimator.load("openpose_default")
 
     def listen(self):
         """
-        Start the node and begin processing input data
+        Start the node and begin processing input data.
         """
-        rospy.init_node('opendr_pose_estimation', anonymous=True)
-        rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)
-        rospy.loginfo("Pose estimation node started!")
+        rospy.init_node('pose_estimation_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Pose estimation node started.")
         rospy.spin()
 
     def callback(self, data):
         """
-        Callback that process the input data and publishes to the corresponding topics
-        :param data: input message
+        Callback that processes the input data and publishes to the corresponding topics.
+        :param data: Input image message
         :type data: sensor_msgs.msg.Image
         """
-
         # Convert sensor_msgs.msg.Image into OpenDR Image
         image = self.bridge.from_ros_image(data, encoding='bgr8')
 
         # Run pose estimation
         poses = self.pose_estimator.infer(image)
 
-        # Get an OpenCV image back
-        image = image.opencv()
-        #  Annotate image and publish results
-        for pose in poses:
-            if self.pose_publisher is not None:
-                ros_pose = self.bridge.to_ros_pose(pose)
-                self.pose_publisher.publish(ros_pose)
-                # We get can the data back using self.bridge.from_ros_pose(ros_pose)
-                # e.g., opendr_pose = self.bridge.from_ros_pose(ros_pose)
-                draw(image, pose)
+        #  Publish detections in ROS message
+        if self.pose_publisher is not None:
+            for pose in poses:
+                # Convert OpenDR pose to ROS2 pose message using bridge and publish it
+                self.pose_publisher.publish(self.bridge.to_ros_pose(pose))
 
         if self.image_publisher is not None:
-            message = self.bridge.to_ros_image(Image(image), encoding='bgr8')
-            self.image_publisher.publish(message)
-
+            # Get an OpenCV image back
+            image = image.opencv()
+            # Annotate image with poses
+            for pose in poses:
+                draw(image, pose)
+            # Convert the annotated OpenDR image to ROS2 image message using bridge and publish it
+            self.image_publisher.publish(self.bridge.to_ros_image(Image(image), encoding='bgr8'))
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_rgb_image_topic", help="Topic name for output annotated rgb image",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/image_pose_annotated")
+    parser.add_argument("-d", "--detections_topic", help="Topic name for detection messages",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/poses")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--accelerate", help="Enables acceleration flags (e.g., stride)", default=False,
+                        action="store_true")
+    args = parser.parse_args()
 
-if __name__ == '__main__':
-    # Select the device for running the
     try:
-        if torch.cuda.is_available():
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU.")
+        device = "cpu"
+
+    if args.accelerate:
+        stride = True
+        stages = 0
+        half_prec = True
+    else:
+        stride = False
+        stages = 2
+        half_prec = False
+
+    pose_estimator_node = PoseEstimationNode(device=device,
+                                             input_rgb_image_topic=args.input_rgb_image_topic,
+                                             output_rgb_image_topic=args.output_rgb_image_topic,
+                                             detections_topic=args.detections_topic,
+                                             num_refinement_stages=stages, use_stride=stride, half_precision=half_prec)
+    pose_estimator_node.listen()
 
-    pose_estimation_node = PoseEstimationNode(device=device)
-    pose_estimation_node.listen()
+
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/rgbd_hand_gesture_recognition.py b/projects/opendr_ws/src/perception/scripts/rgbd_hand_gesture_recognition.py
index 69150856ad9eb6e9683da6f86c06b138cd547cc4..a21f10974c4ebbd5f66f945e4782494d5fbb9e1d 100755
--- a/projects/opendr_ws/src/perception/scripts/rgbd_hand_gesture_recognition.py
+++ b/projects/opendr_ws/src/perception/scripts/rgbd_hand_gesture_recognition.py
@@ -14,43 +14,44 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
+import argparse
+import os
+import cv2
+import numpy as np
+import torch
 
 import rospy
-import torch
-import numpy as np
+import message_filters
 from sensor_msgs.msg import Image as ROS_Image
-from opendr_bridge import ROSBridge
-import os
-from opendr.perception.multimodal_human_centric import RgbdHandGestureLearner
-from opendr.engine.data import Image
 from vision_msgs.msg import Classification2D
-import message_filters
-import cv2
+
+from opendr.engine.data import Image
+from opendr.perception.multimodal_human_centric import RgbdHandGestureLearner
+from opendr_bridge import ROSBridge
 
 
 class RgbdHandGestureNode:
 
-    def __init__(self, input_image_topic="/usb_cam/image_raw", input_depth_image_topic="/usb_cam/image_raw",
-                 gesture_annotations_topic="/opendr/gestures", device="cuda"):
+    def __init__(self, input_rgb_image_topic="/kinect2/qhd/image_color_rect",
+                 input_depth_image_topic="/kinect2/qhd/image_depth_rect",
+                 output_gestures_topic="/opendr/gestures", device="cuda"):
         """
-        Creates a ROS Node for gesture recognition from RGBD
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
+        Creates a ROS Node for gesture recognition from RGBD. Assuming that the following drivers have been installed:
+        https://github.com/OpenKinect/libfreenect2 and https://github.com/code-iai/iai_kinect2.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
         :param input_depth_image_topic: Topic from which we are reading the input depth image
         :type input_depth_image_topic: str
-        :param gesture_annotations_topic: Topic to which we are publishing the predicted gesture class
-        :type gesture_annotations_topic: str
+        :param output_gestures_topic: Topic to which we are publishing the predicted gesture class
+        :type output_gestures_topic: str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         """
 
-        self.gesture_publisher = rospy.Publisher(gesture_annotations_topic, Classification2D, queue_size=10)
+        self.input_rgb_image_topic = input_rgb_image_topic
+        self.input_depth_image_topic = input_depth_image_topic
 
-        image_sub = message_filters.Subscriber(input_image_topic, ROS_Image)
-        depth_sub = message_filters.Subscriber(input_depth_image_topic, ROS_Image)
-        # synchronize image and depth data topics
-        ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
-        ts.registerCallback(self.callback)
+        self.gesture_publisher = rospy.Publisher(output_gestures_topic, Classification2D, queue_size=10)
 
         self.bridge = ROSBridge()
 
@@ -70,23 +71,30 @@ class RgbdHandGestureNode:
         Start the node and begin processing input data
         """
         rospy.init_node('opendr_gesture_recognition', anonymous=True)
+
+        image_sub = message_filters.Subscriber(self.input_rgb_image_topic, ROS_Image, queue_size=1, buff_size=10000000)
+        depth_sub = message_filters.Subscriber(self.input_depth_image_topic, ROS_Image, queue_size=1, buff_size=10000000)
+        # synchronize image and depth data topics
+        ts = message_filters.TimeSynchronizer([image_sub, depth_sub], 10)
+        ts.registerCallback(self.callback)
+
         rospy.loginfo("RGBD gesture recognition node started!")
         rospy.spin()
 
-    def callback(self, image_data, depth_data):
+    def callback(self, rgb_data, depth_data):
         """
         Callback that process the input data and publishes to the corresponding topics
-        :param image_data: input image message
-        :type image_data: sensor_msgs.msg.Image
+        :param rgb_data: input image message
+        :type rgb_data: sensor_msgs.msg.Image
         :param depth_data: input depth image message
         :type depth_data: sensor_msgs.msg.Image
         """
 
         # Convert sensor_msgs.msg.Image into OpenDR Image and preprocess
-        image = self.bridge.from_ros_image(image_data, encoding='bgr8')
+        rgb_image = self.bridge.from_ros_image(rgb_data, encoding='bgr8')
         depth_data.encoding = 'mono16'
         depth_image = self.bridge.from_ros_image_to_depth(depth_data, encoding='mono16')
-        img = self.preprocess(image, depth_image)
+        img = self.preprocess(rgb_image, depth_image)
 
         # Run gesture recognition
         gesture_class = self.gesture_learner.infer(img)
@@ -95,37 +103,58 @@ class RgbdHandGestureNode:
         ros_gesture = self.bridge.from_category_to_rosclass(gesture_class)
         self.gesture_publisher.publish(ros_gesture)
 
-    def preprocess(self, image, depth_img):
-        '''
-        Preprocess image, depth_image and concatenate them
-        :param image_data: input image
-        :type image_data: engine.data.Image
-        :param depth_data: input depth image
-        :type depth_data: engine.data.Image
-        '''
-        image = image.convert(format='channels_last') / (2**8 - 1)
-        depth_img = depth_img.convert(format='channels_last') / (2**16 - 1)
+    def preprocess(self, rgb_image, depth_image):
+        """
+        Preprocess rgb_image, depth_image and concatenate them
+        :param rgb_image: input RGB image
+        :type rgb_image: engine.data.Image
+        :param depth_image: input depth image
+        :type depth_image: engine.data.Image
+        """
+        rgb_image = rgb_image.convert(format='channels_last') / (2**8 - 1)
+        depth_image = depth_image.convert(format='channels_last') / (2**16 - 1)
 
         # resize the images to 224x224
-        image = cv2.resize(image, (224, 224))
-        depth_img = cv2.resize(depth_img, (224, 224))
+        rgb_image = cv2.resize(rgb_image, (224, 224))
+        depth_image = cv2.resize(depth_image, (224, 224))
 
         # concatenate and standardize
-        img = np.concatenate([image, np.expand_dims(depth_img, axis=-1)], axis=-1)
+        img = np.concatenate([rgb_image, np.expand_dims(depth_image, axis=-1)], axis=-1)
         img = (img - self.mean) / self.std
         img = Image(img, dtype=np.float32)
         return img
 
+
 if __name__ == '__main__':
+    # default topics are according to kinectv2 drivers at https://github.com/OpenKinect/libfreenect2
+    # and https://github.com/code-iai-iai_kinect2
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/kinect2/qhd/image_color_rect")
+    parser.add_argument("--input_depth_image_topic", help="Topic name for input depth image",
+                        type=str, default="/kinect2/qhd/image_depth_rect")
+    parser.add_argument("--output_gestures_topic", help="Topic name for predicted gesture class",
+                        type=str, default="/opendr/gestures")
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda",
+                        choices=["cuda", "cpu"])
+
+    args = parser.parse_args()
+
     # Select the device for running
     try:
-        device = 'cuda' if torch.cuda.is_available() else 'cpu'
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
+            print("GPU not found. Using CPU instead.")
+            device = "cpu"
+        else:
+            print("Using CPU")
+            device = "cpu"
     except:
-        device = 'cpu'
+        print("Using CPU")
+        device = "cpu"
 
-    # default topics are according to kinectv2 drivers at https://github.com/OpenKinect/libfreenect2
-    # and https://github.com/code-iai-iai_kinect2
-    depth_topic = "/kinect2/qhd/image_depth_rect"
-    image_topic = "/kinect2/qhd/image_color_rect"
-    gesture_node = RgbdHandGestureNode(input_image_topic=image_topic, input_depth_image_topic=depth_topic, device=device)
+    gesture_node = RgbdHandGestureNode(input_rgb_image_topic=args.input_rgb_image_topic,
+                                       input_depth_image_topic=args.input_depth_image_topic,
+                                       output_gestures_topic=args.output_gestures_topic, device=device)
     gesture_node.listen()
diff --git a/projects/opendr_ws/src/perception/scripts/semantic_segmentation_bisenet.py b/projects/opendr_ws/src/perception/scripts/semantic_segmentation_bisenet.py
index 32390c91578e59bf157190349257f726397ec00d..3795c48993c3b4e6b120e19e0e9b7c1be8c71ead 100644
--- a/projects/opendr_ws/src/perception/scripts/semantic_segmentation_bisenet.py
+++ b/projects/opendr_ws/src/perception/scripts/semantic_segmentation_bisenet.py
@@ -14,98 +14,180 @@
 # limitations under the License.
 
 import argparse
+import numpy as np
 import torch
+import cv2
+import colorsys
+
 import rospy
 from sensor_msgs.msg import Image as ROS_Image
 from opendr_bridge import ROSBridge
+
 from opendr.engine.data import Image
+from opendr.engine.target import Heatmap
 from opendr.perception.semantic_segmentation import BisenetLearner
-import numpy as np
-import cv2
 
 
 class BisenetNode:
-    def __init__(self,
-                 input_image_topic,
-                 output_heatmap_topic=None,
-                 device="cuda"
-                 ):
+
+    def __init__(self, input_rgb_image_topic="/usb_cam/image_raw", output_heatmap_topic="/opendr/heatmap",
+                 output_rgb_image_topic="/opendr/heatmap_visualization", device="cuda"):
         """
-        Initialize the Bisenet ROS node and create an instance of the respective learner class.
-        :param input_image_topic: ROS topic for the input image
-        :type input_image_topic: str
-        :param output_heatmap_topic: ROS topic for the predicted heatmap
+        Creates a ROS Node for semantic segmentation with Bisenet.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_heatmap_topic: Topic to which we are publishing the heatmap in the form of a ROS image containing
+        class ids
         :type output_heatmap_topic: str
+        :param output_rgb_image_topic: Topic to which we are publishing the heatmap image blended with the
+        input image and a class legend for visualization purposes
+        :type output_rgb_image_topic: str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
         """
-        self.input_image_topic = input_image_topic
-        self.output_heatmap_topic = output_heatmap_topic
+        self.input_rgb_image_topic = input_rgb_image_topic
 
-        if self.output_heatmap_topic is not None:
-            self._heatmap_publisher = rospy.Publisher(f'{self.output_heatmap_topic}/semantic', ROS_Image, queue_size=10)
+        if output_heatmap_topic is not None:
+            self.heatmap_publisher = rospy.Publisher(output_heatmap_topic, ROS_Image, queue_size=1)
         else:
-            self._heatmap_publisher = None
+            self.heatmap_publisher = None
 
-        rospy.Subscriber(self.input_image_topic, ROS_Image, self.callback)
+        if output_rgb_image_topic is not None:
+            self.visualization_publisher = rospy.Publisher(output_rgb_image_topic, ROS_Image, queue_size=1)
+        else:
+            self.visualization_publisher = None
 
-        # Initialize OpenDR ROSBridge object
-        self._bridge = ROSBridge()
+        self.bridge = ROSBridge()
 
         # Initialize the semantic segmentation model
-        self._learner = BisenetLearner(device=device)
-        self._learner.download(path="bisenet_camvid")
-        self._learner.load("bisenet_camvid")
+        self.learner = BisenetLearner(device=device)
+        self.learner.download(path="bisenet_camvid")
+        self.learner.load("bisenet_camvid")
 
-        self._colors = np.random.randint(0, 256, (256, 3), dtype=np.uint8)
+        self.class_names = ["Bicyclist", "Building", "Car", "Column Pole", "Fence", "Pedestrian", "Road", "Sidewalk",
+                            "Sign Symbol", "Sky", "Tree", "Unknown"]
+        self.colors = self.getDistinctColors(len(self.class_names))  # Generate n distinct colors
 
     def listen(self):
         """
-        Start the node and begin processing input data
+        Start the node and begin processing input data.
         """
-        rospy.init_node('bisenet', anonymous=True)
-        rospy.loginfo("Bisenet node started!")
+        rospy.init_node('semantic_segmentation_bisenet_node', anonymous=True)
+        rospy.Subscriber(self.input_rgb_image_topic, ROS_Image, self.callback, queue_size=1, buff_size=10000000)
+        rospy.loginfo("Semantic segmentation BiSeNet node started.")
         rospy.spin()
 
-    def callback(self, data: ROS_Image):
+    def callback(self, data):
         """
-        Predict the heatmap from the input image and publish the results.
+        Callback that processes the input data and publishes to the corresponding topics.
         :param data: Input image message
         :type data: sensor_msgs.msg.Image
         """
-        # Convert sensor_msgs.msg.Image to OpenDR Image
-        image = self._bridge.from_ros_image(data)
+        # Convert sensor_msgs.msg.Image into OpenDR Image
+        image = self.bridge.from_ros_image(data, encoding='bgr8')
 
         try:
-            # Retrieve the OpenDR heatmap
-            prediction = self._learner.infer(image)
-
-            if self._heatmap_publisher is not None and self._heatmap_publisher.get_num_connections() > 0:
-                heatmap_np = prediction.numpy()
-                heatmap_o = self._colors[heatmap_np]
-                heatmap_o = cv2.resize(np.uint8(heatmap_o), (960, 720))
-                self._heatmap_publisher.publish(self._bridge.to_ros_image(Image(heatmap_o), encoding='bgr8'))
-
-        except Exception:
+            # Run semantic segmentation to retrieve the OpenDR heatmap
+            heatmap = self.learner.infer(image)
+
+            # Publish heatmap in the form of an image containing class ids
+            if self.heatmap_publisher is not None:
+                heatmap = Heatmap(heatmap.data.astype(np.uint8))  # Convert to uint8
+                self.heatmap_publisher.publish(self.bridge.to_ros_image(heatmap))
+
+            # Publish heatmap color visualization blended with the input image and a class color legend
+            if self.visualization_publisher is not None:
+                heatmap_colors = Image(self.colors[heatmap.numpy()])
+                image = Image(cv2.resize(image.convert("channels_last", "bgr"), (960, 720)))
+                alpha = 0.4  # 1.0 means full input image, 0.0 means full heatmap
+                beta = (1.0 - alpha)
+                image_blended = cv2.addWeighted(image.opencv(), alpha, heatmap_colors.opencv(), beta, 0.0)
+                # Add a legend
+                image_blended = self.addLegend(image_blended, np.unique(heatmap.data))
+
+                self.visualization_publisher.publish(self.bridge.to_ros_image(Image(image_blended),
+                                                                              encoding='bgr8'))
+        except Exception as e:
+            print(e)
             rospy.logwarn('Failed to generate prediction.')
 
+    def addLegend(self, image, unique_class_ints):
+        # Text setup
+        origin_x, origin_y = 5, 5  # Text origin x, y
+        color_rectangle_size = 25
+        font_size = 1.0
+        font_thickness = 2
+        w_max = 0
+        for i in range(len(unique_class_ints)):
+            text = self.class_names[unique_class_ints[i]]  # Class name
+            x, y = origin_x, origin_y + i * color_rectangle_size  # Text position
+            # Determine class color and convert to regular integers
+            color = (int(self.colors[unique_class_ints[i]][0]),
+                     int(self.colors[unique_class_ints[i]][1]),
+                     int(self.colors[unique_class_ints[i]][2]))
+            # Get text width and height
+            (w, h), _ = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, font_size, font_thickness)
+            if w >= w_max:
+                w_max = w
+            # Draw partial background rectangle
+            image = cv2.rectangle(image, (x - origin_x, y),
+                                  (x + origin_x + color_rectangle_size + w_max,
+                                   y + color_rectangle_size),
+                                  (255, 255, 255, 0.5), -1)
+            # Draw color rectangle
+            image = cv2.rectangle(image, (x, y),
+                                  (x + color_rectangle_size, y + color_rectangle_size), color, -1)
+            # Draw class name text
+            image = cv2.putText(image, text, (x + color_rectangle_size + 2, y + h),
+                                cv2.FONT_HERSHEY_SIMPLEX, font_size, (0, 0, 0), font_thickness)
+        return image
+
+    @staticmethod
+    def HSVToRGB(h, s, v):
+        (r, g, b) = colorsys.hsv_to_rgb(h, s, v)
+        return np.array([int(255 * r), int(255 * g), int(255 * b)])
+
+    def getDistinctColors(self, n):
+        huePartition = 1.0 / (n + 1)
+        return np.array([self.HSVToRGB(huePartition * value, 1.0, 1.0) for value in range(0, n)]).astype(np.uint8)
+
+
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument("-i", "--input_rgb_image_topic", help="Topic name for input rgb image",
+                        type=str, default="/usb_cam/image_raw")
+    parser.add_argument("-o", "--output_heatmap_topic", help="Topic to which we are publishing the heatmap in the form "
+                                                             "of a ROS image containing class ids",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/heatmap")
+    parser.add_argument("-v", "--output_rgb_image_topic", help="Topic to which we are publishing the heatmap image "
+                                                               "blended with the input image and a class legend for "
+                                                               "visualization purposes",
+                        type=lambda value: value if value.lower() != "none" else None,
+                        default="/opendr/heatmap_visualization")
+    parser.add_argument("--device", help="Device to use, either \"cpu\" or \"cuda\", defaults to \"cuda\"",
+                        type=str, default="cuda", choices=["cuda", "cpu"])
+    args = parser.parse_args()
 
-if __name__ == '__main__':
-    # Select the device for running the
     try:
-        if torch.cuda.is_available():
-            print("GPU found.")
+        if args.device == "cuda" and torch.cuda.is_available():
             device = "cuda"
-        else:
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
             device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
+        print("Using CPU.")
         device = "cpu"
 
-    parser = argparse.ArgumentParser()
-    parser.add_argument('image_topic', type=str, help='listen to images on this topic')
-    parser.add_argument('--heatmap_topic', type=str, help='publish the heatmap on this topic')
-    args = parser.parse_args()
-
-    bisenet_node = BisenetNode(device=device, input_image_topic=args.image_topic, output_heatmap_topic=args.heatmap_topic)
+    bisenet_node = BisenetNode(device=device,
+                               input_rgb_image_topic=args.input_rgb_image_topic,
+                               output_heatmap_topic=args.output_heatmap_topic,
+                               output_rgb_image_topic=args.output_rgb_image_topic)
     bisenet_node.listen()
+
+
+if __name__ == '__main__':
+    main()
diff --git a/projects/opendr_ws/src/perception/scripts/speech_command_recognition.py b/projects/opendr_ws/src/perception/scripts/speech_command_recognition.py
index 4726b478a140d638cf297abcb00ed942acbbe954..7acd13578dfc6564f2b84c3a1e9821598ee0161d 100755
--- a/projects/opendr_ws/src/perception/scripts/speech_command_recognition.py
+++ b/projects/opendr_ws/src/perception/scripts/speech_command_recognition.py
@@ -28,26 +28,26 @@ from opendr.perception.speech_recognition import MatchboxNetLearner, EdgeSpeechN
 
 class SpeechRecognitionNode:
 
-    def __init__(self, input_topic='/audio/audio', prediction_topic="/opendr/speech_recognition",
-                 buffer_size=1.5, model='matchboxnet', model_path=None, device='cuda'):
+    def __init__(self, input_audio_topic="/audio/audio", output_speech_command_topic="/opendr/speech_recognition",
+                 buffer_size=1.5, model="matchboxnet", model_path=None, device="cuda"):
         """
         Creates a ROS Node for speech command recognition
-        :param input_topic: Topic from which the audio data is received
-        :type input_topic: str
-        :param prediction_topic: Topic to which the predictions are published
-        :type prediction_topic: str
+        :param input_audio_topic: Topic from which the audio data is received
+        :type input_audio_topic: str
+        :param output_speech_command_topic: Topic to which the predictions are published
+        :type output_speech_command_topic: str
         :param buffer_size: Length of the audio buffer in seconds
         :type buffer_size: float
         :param model: base speech command recognition model: matchboxnet or quad_selfonn
         :type model: str
-        :param device: device for inference ('cpu' or 'cuda')
+        :param device: device for inference ("cpu" or "cuda")
         :type device: str
 
         """
 
-        self.publisher = rospy.Publisher(prediction_topic, Classification2D, queue_size=10)
+        self.publisher = rospy.Publisher(output_speech_command_topic, Classification2D, queue_size=10)
 
-        rospy.Subscriber(input_topic, AudioData, self.callback)
+        rospy.Subscriber(input_audio_topic, AudioData, self.callback)
 
         self.bridge = ROSBridge()
 
@@ -59,17 +59,17 @@ class SpeechRecognitionNode:
         # Initialize the recognition model
         if model == "matchboxnet":
             self.learner = MatchboxNetLearner(output_classes_n=20, device=device)
-            load_path = './MatchboxNet'
+            load_path = "./MatchboxNet"
         elif model == "edgespeechnets":
             self.learner = EdgeSpeechNetsLearner(output_classes_n=20, device=device)
             assert model_path is not None, "No pretrained EdgeSpeechNets model available for download"
         elif model == "quad_selfonn":
             self.learner = QuadraticSelfOnnLearner(output_classes_n=20, device=device)
-            load_path = './QuadraticSelfOnn'
+            load_path = "./QuadraticSelfOnn"
 
         # Download the recognition model
         if model_path is None:
-            self.learner.download_pretrained(path='.')
+            self.learner.download_pretrained(path=".")
             self.learner.load(load_path)
         else:
             self.learner.load(model_path)
@@ -78,15 +78,15 @@ class SpeechRecognitionNode:
         """
         Start the node and begin processing input data
         """
-        rospy.init_node('opendr_speech_command_recognition', anonymous=True)
+        rospy.init_node("opendr_speech_command_recognition", anonymous=True)
         rospy.loginfo("Speech command recognition node started!")
         rospy.spin()
 
     def callback(self, msg_data):
         """
         Callback that processes the input data and publishes predictions to the output topic
-        :param data: incoming message
-        :type data: audio_common_msgs.msg.AudioData
+        :param msg_data: incoming message
+        :type msg_data: audio_common_msgs.msg.AudioData
         """
         # Accumulate data until the buffer is full
         data = np.reshape(np.frombuffer(msg_data.data, dtype=np.int16)/32768.0, (1, -1))
@@ -105,22 +105,37 @@ class SpeechRecognitionNode:
             self.data_buffer = np.zeros((1, 1))
 
 
-if __name__ == '__main__':
+if __name__ == "__main__":
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--input_audio_topic", type=str, default="audio/audio",
+                        help="Listen to input data on this topic")
+    parser.add_argument("--output_speech_command_topic", type=str, default="/opendr/speech_recognition",
+                        help="Topic name for speech command output")
+    parser.add_argument("--buffer_size", type=float, default=1.5, help="Size of the audio buffer in seconds")
+    parser.add_argument("--model", default="matchboxnet", choices=["matchboxnet", "edgespeechnets", "quad_selfonn"],
+                        help="Model to be used for prediction: matchboxnet, edgespeechnets or quad_selfonn")
+    parser.add_argument("--model_path", type=str,
+                        help="Path to the model files, if not given, the pretrained model will be downloaded")
+    parser.add_argument("--device", type=str, default="cuda", choices=["cuda", "cpu"],
+                        help="Device to use (cpu, cuda)")
+    args = parser.parse_args()
+
     # Select the device for running
     try:
-        device = 'cuda' if torch.cuda.is_available() else 'cpu'
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
+            print("GPU not found. Using CPU instead.")
+            device = "cpu"
+        else:
+            print("Using CPU")
+            device = "cpu"
     except:
-        device = 'cpu'
-
-    parser = argparse.ArgumentParser()
-    parser.add_argument('input_topic', type=str, help='listen to input data on this topic')
-    parser.add_argument('--buffer_size', type=float, default=1.5, help='size of the audio buffer in seconds')
-    parser.add_argument('--model', choices=["matchboxnet", "edgespeechnets", "quad_selfonn"], default="matchboxnet",
-                        help='model to be used for prediction: matchboxnet or quad_selfonn')
-    parser.add_argument('--model_path', type=str,
-                        help='path to the model files, if not given, the pretrained model will be downloaded')
-    args = parser.parse_args()
+        print("Using CPU")
+        device = "cpu"
 
-    speech_node = SpeechRecognitionNode(input_topic=args.input_topic, buffer_size=args.buffer_size,
-                                        model=args.model, model_path=args.model_path, device=device)
+    speech_node = SpeechRecognitionNode(input_audio_topic=args.input_audio_topic,
+                                        output_speech_command_topic=args.output_speech_command_topic,
+                                        buffer_size=args.buffer_size, model=args.model, model_path=args.model_path,
+                                        device=device)
     speech_node.listen()
diff --git a/projects/opendr_ws/src/perception/scripts/video_activity_recognition.py b/projects/opendr_ws/src/perception/scripts/video_activity_recognition.py
index b79a462e3a56d5452d500195afd9af2b51bce54f..f6c6a9d4d4addb3f5506f2f3609ca954f73f3a98 100755
--- a/projects/opendr_ws/src/perception/scripts/video_activity_recognition.py
+++ b/projects/opendr_ws/src/perception/scripts/video_activity_recognition.py
@@ -13,12 +13,11 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-
+import argparse
 import rospy
 import torch
 import torchvision
 import cv2
-import numpy as np
 from pathlib import Path
 from std_msgs.msg import String
 from vision_msgs.msg import ObjectHypothesis
@@ -31,20 +30,19 @@ from opendr.perception.activity_recognition import X3DLearner
 
 
 class HumanActivityRecognitionNode:
-
     def __init__(
         self,
-        input_image_topic="/usb_cam/image_raw",
+        input_rgb_image_topic="/usb_cam/image_raw",
         output_category_topic="/opendr/human_activity_recognition",
         output_category_description_topic="/opendr/human_activity_recognition_description",
         device="cuda",
-        model='cox3d-m'
+        model="cox3d-m",
     ):
         """
-        Creates a ROS Node for face recognition
-        :param input_image_topic: Topic from which we are reading the input image
-        :type input_image_topic: str
-        :param output_category_topic: Topic to which we are publishing the recognized face info
+        Creates a ROS Node for video-based human activity recognition.
+        :param input_rgb_image_topic: Topic from which we are reading the input image
+        :type input_rgb_image_topic: str
+        :param output_category_topic: Topic to which we are publishing the recognized activity
         (if None, we are not publishing the info)
         :type output_category_topic: str
         :param output_category_description_topic: Topic to which we are publishing the ID of the recognized action
@@ -52,12 +50,20 @@ class HumanActivityRecognitionNode:
         :type output_category_description_topic:  str
         :param device: device on which we are running inference ('cpu' or 'cuda')
         :type device: str
-        :param model:  architecture to use for human activity recognition.
+        :param model:  Architecture to use for human activity recognition.
          (Options: 'cox3d-s', 'cox3d-m', 'cox3d-l', 'x3d-xs', 'x3d-s', 'x3d-m', 'x3d-l')
         :type model: str
         """
 
-        assert model in {"cox3d-s", "cox3d-m", "cox3d-l", "x3d-xs", "x3d-s", "x3d-m", "x3d-l"}
+        assert model in {
+            "cox3d-s",
+            "cox3d-m",
+            "cox3d-l",
+            "x3d-xs",
+            "x3d-s",
+            "x3d-m",
+            "x3d-l",
+        }
         model_name, model_size = model.split("-")
         Learner = {"cox3d": CoX3DLearner, "x3d": X3DLearner}[model_name]
 
@@ -68,7 +74,9 @@ class HumanActivityRecognitionNode:
 
         # Set up preprocessing
         if model_name == "cox3d":
-            self.preprocess = _image_preprocess(image_size=self.learner.model_hparams["image_size"])
+            self.preprocess = _image_preprocess(
+                image_size=self.learner.model_hparams["image_size"]
+            )
         else:  # == x3d
             self.preprocess = _video_preprocess(
                 image_size=self.learner.model_hparams["image_size"],
@@ -76,22 +84,32 @@ class HumanActivityRecognitionNode:
             )
 
         # Set up ROS topics and bridge
+        self.input_rgb_image_topic = input_rgb_image_topic
         self.hypothesis_publisher = (
-            rospy.Publisher(output_category_topic, ObjectHypothesis, queue_size=10) if output_category_topic else None
+            rospy.Publisher(output_category_topic, ObjectHypothesis, queue_size=1)
+            if output_category_topic
+            else None
         )
         self.string_publisher = (
-            rospy.Publisher(output_category_description_topic, String, queue_size=10) if output_category_topic else None
+            rospy.Publisher(output_category_description_topic, String, queue_size=1)
+            if output_category_description_topic
+            else None
         )
 
-        rospy.Subscriber(input_image_topic, ROS_Image, self.callback)
-
         self.bridge = ROSBridge()
 
     def listen(self):
         """
         Start the node and begin processing input data
         """
-        rospy.init_node('opendr_human_activity_recognition', anonymous=True)
+        rospy.init_node("opendr_human_activity_recognition", anonymous=True)
+        rospy.Subscriber(
+            self.input_rgb_image_topic,
+            ROS_Image,
+            self.callback,
+            queue_size=1,
+            buff_size=10000000,
+        )
         rospy.loginfo("Human activity recognition node started!")
         rospy.spin()
 
@@ -101,49 +119,43 @@ class HumanActivityRecognitionNode:
         :param data: input message
         :type data: sensor_msgs.msg.Image
         """
-        image = self.bridge.from_ros_image(data)
+        image = self.bridge.from_ros_image(data, encoding="rgb8")
         if image is None:
             return
 
-        x = self.preprocess(image.numpy())
+        x = self.preprocess(image.convert("channels_first", "rgb"))
 
         result = self.learner.infer(x)
         assert len(result) == 1
         category = result[0]
-        category.confidence = float(max(category.confidence.max()))  # Confidence for predicted class
+        category.confidence = float(category.confidence.max())  # Confidence for predicted class
         category.description = KINETICS400_CLASSES[category.data]  # Class name
 
         if self.hypothesis_publisher is not None:
             self.hypothesis_publisher.publish(self.bridge.to_ros_category(category))
 
         if self.string_publisher is not None:
-            self.string_publisher.publish(self.bridge.to_ros_category_description(category))
+            self.string_publisher.publish(
+                self.bridge.to_ros_category_description(category)
+            )
 
 
-def _resize(image, width=None, height=None, inter=cv2.INTER_AREA):
+def _resize(image, size=None, inter=cv2.INTER_AREA):
     # initialize the dimensions of the image to be resized and
     # grab the image size
     dim = None
     (h, w) = image.shape[:2]
 
-    # if both the width and height are None, then return the
-    # original image
-    if width is None and height is None:
-        return image
-
-    # check to see if the width is None
-    if width is None:
-        # calculate the ratio of the height and construct the
+    if h > w:
+        # calculate the ratio of the width and construct the
         # dimensions
-        r = height / float(h)
-        dim = (int(w * r), height)
-
-    # otherwise, the height is None
+        r = size / float(w)
+        dim = (size, int(h * r))
     else:
-        # calculate the ratio of the width and construct the
+        # calculate the ratio of the height and construct the
         # dimensions
-        r = width / float(w)
-        dim = (width, int(h * r))
+        r = size / float(h)
+        dim = (int(w * r), size)
 
     # resize the image
     resized = cv2.resize(image, dim, interpolation=inter)
@@ -160,11 +172,11 @@ def _image_preprocess(image_size: int):
     def wrapped(frame):
         nonlocal standardize
         frame = frame.transpose((1, 2, 0))  # C, H, W -> H, W, C
-        frame = _resize(frame, height=image_size, width=image_size)
+        frame = _resize(frame, size=image_size)
         frame = torch.tensor(frame).permute((2, 0, 1))  # H, W, C -> C, H, W
         frame = frame / 255.0  # [0, 255] -> [0.0, 1.0]
         frame = standardize(frame)
-        return Image(frame, dtype=np.float)
+        return Image(frame, dtype=float)
 
     return wrapped
 
@@ -179,7 +191,7 @@ def _video_preprocess(image_size: int, window_size: int):
     def wrapped(frame):
         nonlocal frames, standardize
         frame = frame.transpose((1, 2, 0))  # C, H, W -> H, W, C
-        frame = _resize(frame, height=image_size, width=image_size)
+        frame = _resize(frame, size=image_size)
         frame = torch.tensor(frame).permute((2, 0, 1))  # H, W, C -> C, H, W
         frame = frame / 255.0  # [0, 255] -> [0.0, 1.0]
         frame = standardize(frame)
@@ -194,17 +206,67 @@ def _video_preprocess(image_size: int, window_size: int):
     return wrapped
 
 
-if __name__ == '__main__':
-    # Select the device for running the
+def main():
+    parser = argparse.ArgumentParser()
+    parser.add_argument(
+        "-i",
+        "--input_rgb_image_topic",
+        help="Topic name for input rgb image",
+        type=str,
+        default="/usb_cam/image_raw",
+    )
+    parser.add_argument(
+        "-o",
+        "--output_category_topic",
+        help="Topic to which we are publishing the recognized activity",
+        type=lambda value: value if value.lower() != "none" else None,
+        default="/opendr/human_activity_recognition",
+    )
+    parser.add_argument(
+        "-od",
+        "--output_category_description_topic",
+        help="Topic to which we are publishing the ID of the recognized action",
+        type=lambda value: value if value.lower() != "none" else None,
+        default="/opendr/human_activity_recognition_description",
+    )
+    parser.add_argument(
+        "--device",
+        help='Device to use, either "cpu" or "cuda", defaults to "cuda"',
+        type=str,
+        default="cuda",
+        choices=["cuda", "cpu"],
+    )
+    parser.add_argument(
+        "--model",
+        help="Architecture to use for human activity recognition.",
+        type=str,
+        default="cox3d-m",
+        choices=["cox3d-s", "cox3d-m", "cox3d-l", "x3d-xs", "x3d-s", "x3d-m", "x3d-l"],
+    )
+    args = parser.parse_args()
+
     try:
-        if torch.cuda.is_available():
-            print("GPU found.")
-            device = 'cuda'
-        else:
+        if args.device == "cuda" and torch.cuda.is_available():
+            device = "cuda"
+        elif args.device == "cuda":
             print("GPU not found. Using CPU instead.")
-            device = 'cpu'
+            device = "cpu"
+        else:
+            print("Using CPU.")
+            device = "cpu"
     except:
-        device = 'cpu'
-
-    human_activity_recognition_node = HumanActivityRecognitionNode(device=device)
+        print("Using CPU.")
+        device = "cpu"
+
+    human_activity_recognition_node = HumanActivityRecognitionNode(
+        input_rgb_image_topic=args.input_rgb_image_topic,
+        output_category_topic=args.output_category_topic,
+        output_category_description_topic=args.output_category_description_topic,
+        device=device,
+        model=args.model,
+    )
     human_activity_recognition_node.listen()
+
+
+if __name__ == "__main__":
+    main()
diff --git a/projects/opendr_ws/src/ros_bridge/CMakeLists.txt b/projects/opendr_ws/src/ros_bridge/CMakeLists.txt
index b7ed470ae0003908ce38966898fecd1489304239..f66066c41f76fc202df6770f3a98e467e65f73eb 100644
--- a/projects/opendr_ws/src/ros_bridge/CMakeLists.txt
+++ b/projects/opendr_ws/src/ros_bridge/CMakeLists.txt
@@ -14,6 +14,12 @@ catkin_python_setup()
 ################################################
 ## Declare ROS messages, services and actions ##
 ################################################
+add_message_files(
+    DIRECTORY msg
+    FILES
+    OpenDRPose2DKeypoint.msg
+    OpenDRPose2D.msg
+)
 
 generate_messages(
     DEPENDENCIES
diff --git a/projects/opendr_ws/src/ros_bridge/msg/OpenDRPose2D.msg b/projects/opendr_ws/src/ros_bridge/msg/OpenDRPose2D.msg
new file mode 100644
index 0000000000000000000000000000000000000000..09b1443027f13af7376930779678f5887b948ffa
--- /dev/null
+++ b/projects/opendr_ws/src/ros_bridge/msg/OpenDRPose2D.msg
@@ -0,0 +1,26 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This message represents a full OpenDR human pose 2D as a list of keypoints
+
+Header header
+
+# The id of the pose
+int32 pose_id
+
+# The pose detection confidence of the model
+float32 conf
+
+# A list of a human 2D pose keypoints
+OpenDRPose2DKeypoint[] keypoint_list
diff --git a/projects/opendr_ws/src/ros_bridge/msg/OpenDRPose2DKeypoint.msg b/projects/opendr_ws/src/ros_bridge/msg/OpenDRPose2DKeypoint.msg
new file mode 100644
index 0000000000000000000000000000000000000000..72d14a19f2464a2068357c36ec53433b61072600
--- /dev/null
+++ b/projects/opendr_ws/src/ros_bridge/msg/OpenDRPose2DKeypoint.msg
@@ -0,0 +1,22 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+# This message contains all relevant information for an OpenDR human pose 2D keypoint
+
+# The kpt_name according to https://github.com/opendr-eu/opendr/blob/master/docs/reference/lightweight-open-pose.md#notes
+string kpt_name
+
+# x and y pixel position on the input image, (0, 0) is top-left corner of image
+int32 x
+int32 y
diff --git a/projects/opendr_ws/src/ros_bridge/package.xml b/projects/opendr_ws/src/ros_bridge/package.xml
index 845bd040966dd693f915de2a023d0140863e4182..e9cb01afb14cf4622d6c83b5d6768a1cc307ac2b 100644
--- a/projects/opendr_ws/src/ros_bridge/package.xml
+++ b/projects/opendr_ws/src/ros_bridge/package.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <package format="2">
   <name>ros_bridge</name>
-   <version>1.0.0</version>
+   <version>1.1.1</version>
   <description>OpenDR ros_bridge package. This package provides a way to translate ROS messages into OpenDR data types
       and vice versa.
   </description>
diff --git a/projects/opendr_ws/src/ros_bridge/src/opendr_bridge/bridge.py b/projects/opendr_ws/src/ros_bridge/src/opendr_bridge/bridge.py
index fe7e4171f2f8c051c26934966e039b76b43f7ff0..dcde5ec310c4f3395bad497ff437bb8146f2c300 100755
--- a/projects/opendr_ws/src/ros_bridge/src/opendr_bridge/bridge.py
+++ b/projects/opendr_ws/src/ros_bridge/src/opendr_bridge/bridge.py
@@ -28,6 +28,7 @@ from std_msgs.msg import ColorRGBA, String, Header
 from sensor_msgs.msg import Image as ImageMsg, PointCloud as PointCloudMsg, ChannelFloat32 as ChannelFloat32Msg
 import rospy
 from geometry_msgs.msg import Point32 as Point32Msg, Quaternion as QuaternionMsg
+from ros_bridge.msg import OpenDRPose2D, OpenDRPose2DKeypoint
 
 
 class ROSBridge:
@@ -69,51 +70,50 @@ class ROSBridge:
         message = self._cv_bridge.cv2_to_imgmsg(image.opencv(), encoding=encoding)
         return message
 
-    def to_ros_pose(self, pose):
+    def to_ros_pose(self, pose: Pose):
         """
-        Converts an OpenDR pose into a Detection2DArray msg that can carry the same information
-        Each keypoint is represented as a bbox centered at the keypoint with zero width/height. The subject id is also
-        embedded on each keypoint (stored in ObjectHypothesisWithPose).
-        :param pose: OpenDR pose to be converted
+        Converts an OpenDR Pose into a OpenDRPose2D msg that can carry the same information, i.e. a list of keypoints,
+        the pose detection confidence and the pose id.
+        Each keypoint is represented as an OpenDRPose2DKeypoint with x, y pixel position on input image with (0, 0)
+        being the top-left corner.
+        :param pose: OpenDR Pose to be converted to OpenDRPose2D
         :type pose: engine.target.Pose
         :return: ROS message with the pose
-        :rtype: vision_msgs.msg.Detection2DArray
+        :rtype: ros_bridge.msg.OpenDRPose2D
         """
         data = pose.data
-        keypoints = Detection2DArray()
-        for i in range(data.shape[0]):
-            keypoint = Detection2D()
-            keypoint.bbox = BoundingBox2D()
-            keypoint.results.append(ObjectHypothesisWithPose())
-            keypoint.bbox.center = Pose2D()
-            keypoint.bbox.center.x = data[i][0]
-            keypoint.bbox.center.y = data[i][1]
-            keypoint.bbox.size_x = 0
-            keypoint.bbox.size_y = 0
-            keypoint.results[0].id = pose.id
-            if pose.confidence:
-                keypoint.results[0].score = pose.confidence
-            keypoints.detections.append(keypoint)
-        return keypoints
+        # Setup ros pose
+        ros_pose = OpenDRPose2D()
+        ros_pose.pose_id = int(pose.id)
+        if pose.confidence:
+            ros_pose.conf = pose.confidence
 
-    def from_ros_pose(self, ros_pose):
-        """
-        Converts a ROS message with pose payload into an OpenDR pose
-        :param ros_pose: the pose to be converted (represented as vision_msgs.msg.Detection2DArray)
-        :type ros_pose: vision_msgs.msg.Detection2DArray
-        :return: an OpenDR pose
+        # Add keypoints to pose
+        for i in range(data.shape[0]):
+            ros_keypoint = OpenDRPose2DKeypoint()
+            ros_keypoint.kpt_name = pose.kpt_names[i]
+            ros_keypoint.x = data[i][0]
+            ros_keypoint.y = data[i][1]
+            # Add keypoint to pose
+            ros_pose.keypoint_list.append(ros_keypoint)
+        return ros_pose
+
+    def from_ros_pose(self, ros_pose: OpenDRPose2D):
+        """
+        Converts an OpenDRPose2D message into an OpenDR Pose.
+        :param ros_pose: the ROS pose to be converted
+        :type ros_pose: ros_bridge.msg.OpenDRPose2D
+        :return: an OpenDR Pose
         :rtype: engine.target.Pose
         """
-        keypoints = ros_pose.detections
-        data = []
-        pose_id, confidence = None, None
+        ros_keypoints = ros_pose.keypoint_list
+        keypoints = []
+        pose_id, confidence = ros_pose.pose_id, ros_pose.conf
 
-        for keypoint in keypoints:
-            data.append(keypoint.bbox.center.x)
-            data.append(keypoint.bbox.center.y)
-            confidence = keypoint.results[0].score
-            pose_id = keypoint.results[0].id
-        data = np.asarray(data).reshape((-1, 2))
+        for ros_keypoint in ros_keypoints:
+            keypoints.append(int(ros_keypoint.x))
+            keypoints.append(int(ros_keypoint.y))
+        data = np.asarray(keypoints).reshape((-1, 2))
 
         pose = Pose(data, confidence)
         pose.id = pose_id
@@ -213,7 +213,7 @@ class ROSBridge:
             ros_box.bbox.center.y = box.top + box.height / 2.
             ros_box.bbox.size_x = box.width
             ros_box.bbox.size_y = box.height
-            ros_box.results[0].id = box.name
+            ros_box.results[0].id = int(box.name)
             if box.confidence:
                 ros_box.results[0].score = box.confidence
             ros_boxes.detections.append(ros_box)
@@ -235,8 +235,8 @@ class ROSBridge:
             height = box.bbox.size_y
             left = box.bbox.center.x - width / 2.
             top = box.bbox.center.y - height / 2.
-            id = box.results[0].id
-            bbox = BoundingBox(top=top, left=left, width=width, height=height, name=id)
+            _id = int(box.results[0].id)
+            bbox = BoundingBox(top=top, left=left, width=width, height=height, name=_id)
             bboxes.data.append(bbox)
 
         return bboxes
@@ -294,7 +294,7 @@ class ROSBridge:
             detection.bbox.center.y = bounding_box.top + bounding_box.height / 2.0
             detection.bbox.size_x = bounding_box.width
             detection.bbox.size_y = bounding_box.height
-            detection.results[0].id = bounding_box.name
+            detection.results[0].id = int(bounding_box.name)
             detection.results[0].score = bounding_box.confidence
             detections.detections.append(detection)
         return detections
diff --git a/projects/opendr_ws/src/simulation/package.xml b/projects/opendr_ws/src/simulation/package.xml
index f55f2198ddd1e62eb7eb86617c13c5df13b69b7f..cd9795529bf1ce99a0d9d10cdeb9b38e07b2f293 100644
--- a/projects/opendr_ws/src/simulation/package.xml
+++ b/projects/opendr_ws/src/simulation/package.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <package format="2">
   <name>simulation</name>
-  <version>1.0.0</version>
+  <version>1.1.1</version>
   <description>OpenDR's ROS nodes for simulation package</description>
   <maintainer email="tefas@csd.auth.gr">OpenDR Project Coordinator</maintainer>
   <license>Apache License v2.0 </license>
diff --git a/projects/opendr_ws/src/simulation/scripts/human_model_generation_client.py b/projects/opendr_ws/src/simulation/scripts/human_model_generation_client.py
index 1f9470f9c6430f25e02f095ef3e56ccd035c8494..0b6fdacc34fc0fc6f8009ab3445bd9d68d79e996 100644
--- a/projects/opendr_ws/src/simulation/scripts/human_model_generation_client.py
+++ b/projects/opendr_ws/src/simulation/scripts/human_model_generation_client.py
@@ -25,9 +25,9 @@ from opendr.simulation.human_model_generation.utilities.model_3D import Model_3D
 
 
 if __name__ == '__main__':
-    rgb_img = cv2.imread(os.path.join(os.environ['OPENDR_HOME'], 'projects/simulation/'
+    rgb_img = cv2.imread(os.path.join(os.environ['OPENDR_HOME'], 'projects/python/simulation/'
                                       'human_model_generation/demos/imgs_input/rgb/result_0004.jpg'))
-    msk_img = cv2.imread(os.path.join(os.environ['OPENDR_HOME'], 'projects/simulation/'
+    msk_img = cv2.imread(os.path.join(os.environ['OPENDR_HOME'], 'projects/python/simulation/'
                                       'human_model_generation/demos/imgs_input/msk/result_0004.jpg'))
     bridge_cv = CvBridge()
     bridge_ros = ROSBridge()
@@ -46,6 +46,6 @@ if __name__ == '__main__':
         human_model = Model_3D(vertices, triangles, vertex_colors)
         human_model.save_obj_mesh('./human_model.obj')
         [out_imgs, human_pose_2D] = human_model.get_img_views(rotations=[30, 120], human_pose_3D=pose, plot_kps=True)
-        cv2.imwrite('./rendering.png', out_imgs[0].numpy())
+        cv2.imwrite('./rendering.png', out_imgs[0].opencv())
     except rospy.ServiceException as e:
         print("Service call failed: %s" % e)
diff --git a/projects/python/README.md b/projects/python/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..b1a72da80862fe5a4347456fe4cb4c607b00d414
--- /dev/null
+++ b/projects/python/README.md
@@ -0,0 +1,6 @@
+# Python usage examples and tutorials
+
+
+This folder contains several usage examples and tutorials that demonstrate the functionalities of OpenDR toolkit.
+The usage examples follow the same structure as the Python packages that are provided by OpenDR, i.e., they are provided separately for [perception](perception), [control](control) and [simulation](simulation) tools.
+Furthermore, usage examples of other utilities are provided in [utils](utils).
diff --git a/projects/control/eagerx/README.md b/projects/python/control/eagerx/README.md
similarity index 97%
rename from projects/control/eagerx/README.md
rename to projects/python/control/eagerx/README.md
index 26825812a6e1e4ee2b5a4311a30d8663b3cc7555..0a63adce4891d40392962ee4086e02e21595eda4 100644
--- a/projects/control/eagerx/README.md
+++ b/projects/python/control/eagerx/README.md
@@ -22,7 +22,7 @@ Specifically the following examples are provided:
    
 Example usage:
 ```bash
-cd $OPENDR_HOME/projects/control/eagerx/demos
+cd $OPENDR_HOME/projects/python/control/eagerx/demos
 python3 [demo_name]
 ```
 
diff --git a/projects/control/eagerx/data/with_actions.h5 b/projects/python/control/eagerx/data/with_actions.h5
similarity index 100%
rename from projects/control/eagerx/data/with_actions.h5
rename to projects/python/control/eagerx/data/with_actions.h5
diff --git a/__init__.py b/projects/python/control/eagerx/demos/__init__.py
similarity index 100%
rename from __init__.py
rename to projects/python/control/eagerx/demos/__init__.py
diff --git a/projects/control/eagerx/demos/demo_classifier.py b/projects/python/control/eagerx/demos/demo_classifier.py
similarity index 100%
rename from projects/control/eagerx/demos/demo_classifier.py
rename to projects/python/control/eagerx/demos/demo_classifier.py
diff --git a/projects/control/eagerx/demos/demo_full_state.py b/projects/python/control/eagerx/demos/demo_full_state.py
similarity index 100%
rename from projects/control/eagerx/demos/demo_full_state.py
rename to projects/python/control/eagerx/demos/demo_full_state.py
diff --git a/projects/control/eagerx/demos/demo_pid.py b/projects/python/control/eagerx/demos/demo_pid.py
similarity index 100%
rename from projects/control/eagerx/demos/demo_pid.py
rename to projects/python/control/eagerx/demos/demo_pid.py
diff --git a/projects/control/eagerx/dependencies.ini b/projects/python/control/eagerx/dependencies.ini
similarity index 100%
rename from projects/control/eagerx/dependencies.ini
rename to projects/python/control/eagerx/dependencies.ini
diff --git a/projects/control/mobile_manipulation/CMakeLists.txt b/projects/python/control/mobile_manipulation/CMakeLists.txt
similarity index 100%
rename from projects/control/mobile_manipulation/CMakeLists.txt
rename to projects/python/control/mobile_manipulation/CMakeLists.txt
diff --git a/projects/control/mobile_manipulation/README.md b/projects/python/control/mobile_manipulation/README.md
similarity index 100%
rename from projects/control/mobile_manipulation/README.md
rename to projects/python/control/mobile_manipulation/README.md
diff --git a/projects/control/mobile_manipulation/best_defaults.yaml b/projects/python/control/mobile_manipulation/best_defaults.yaml
similarity index 100%
rename from projects/control/mobile_manipulation/best_defaults.yaml
rename to projects/python/control/mobile_manipulation/best_defaults.yaml
diff --git a/projects/control/mobile_manipulation/mobile_manipulation_demo.py b/projects/python/control/mobile_manipulation/mobile_manipulation_demo.py
similarity index 100%
rename from projects/control/mobile_manipulation/mobile_manipulation_demo.py
rename to projects/python/control/mobile_manipulation/mobile_manipulation_demo.py
diff --git a/projects/control/mobile_manipulation/package.xml b/projects/python/control/mobile_manipulation/package.xml
similarity index 100%
rename from projects/control/mobile_manipulation/package.xml
rename to projects/python/control/mobile_manipulation/package.xml
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax.dae b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax.dae
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax.dae
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax.dae
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1.dae b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1.dae
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1.dae
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1.dae
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1_tex_0.jpg b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1_tex_0.jpg
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1_tex_0.jpg
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer1_tex_0.jpg
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2.dae b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2.dae
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2.dae
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2.dae
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2_tex_0.jpg b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2_tex_0.jpg
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2_tex_0.jpg
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/KallaxDrawer2_tex_0.jpg
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax_tex_0.jpg b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax_tex_0.jpg
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax_tex_0.jpg
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/meshes/Kallax_tex_0.jpg
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/model.config b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/model.config
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/model.config
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/model.config
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax/model.sdf b/projects/python/control/mobile_manipulation/robots_world/models/Kallax/model.sdf
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax/model.sdf
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax/model.sdf
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax.dae b/projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax.dae
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax.dae
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax.dae
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer.dae b/projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer.dae
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer.dae
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer.dae
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer_tex_0.jpg b/projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer_tex_0.jpg
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer_tex_0.jpg
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_Tuer_tex_0.jpg
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_tex_0.jpg b/projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_tex_0.jpg
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_tex_0.jpg
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax2/meshes/Kallax_tex_0.jpg
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax2/model.config b/projects/python/control/mobile_manipulation/robots_world/models/Kallax2/model.config
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax2/model.config
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax2/model.config
diff --git a/projects/control/mobile_manipulation/robots_world/models/Kallax2/model.sdf b/projects/python/control/mobile_manipulation/robots_world/models/Kallax2/model.sdf
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/Kallax2/model.sdf
rename to projects/python/control/mobile_manipulation/robots_world/models/Kallax2/model.sdf
diff --git a/projects/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli1_tex_0.jpg b/projects/python/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli1_tex_0.jpg
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli1_tex_0.jpg
rename to projects/python/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli1_tex_0.jpg
diff --git a/projects/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli2.dae b/projects/python/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli2.dae
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli2.dae
rename to projects/python/control/mobile_manipulation/robots_world/models/muesli2/meshes/muesli2.dae
diff --git a/projects/control/mobile_manipulation/robots_world/models/muesli2/model.config b/projects/python/control/mobile_manipulation/robots_world/models/muesli2/model.config
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/muesli2/model.config
rename to projects/python/control/mobile_manipulation/robots_world/models/muesli2/model.config
diff --git a/projects/control/mobile_manipulation/robots_world/models/muesli2/model.sdf b/projects/python/control/mobile_manipulation/robots_world/models/muesli2/model.sdf
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/muesli2/model.sdf
rename to projects/python/control/mobile_manipulation/robots_world/models/muesli2/model.sdf
diff --git a/projects/control/mobile_manipulation/robots_world/models/reemc_table_low/model.config b/projects/python/control/mobile_manipulation/robots_world/models/reemc_table_low/model.config
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/reemc_table_low/model.config
rename to projects/python/control/mobile_manipulation/robots_world/models/reemc_table_low/model.config
diff --git a/projects/control/mobile_manipulation/robots_world/models/reemc_table_low/table.sdf b/projects/python/control/mobile_manipulation/robots_world/models/reemc_table_low/table.sdf
similarity index 100%
rename from projects/control/mobile_manipulation/robots_world/models/reemc_table_low/table.sdf
rename to projects/python/control/mobile_manipulation/robots_world/models/reemc_table_low/table.sdf
diff --git a/projects/control/mobile_manipulation/rviz_config.rviz b/projects/python/control/mobile_manipulation/rviz_config.rviz
similarity index 100%
rename from projects/control/mobile_manipulation/rviz_config.rviz
rename to projects/python/control/mobile_manipulation/rviz_config.rviz
diff --git a/projects/control/single_demo_grasp/README.md b/projects/python/control/single_demo_grasp/README.md
similarity index 78%
rename from projects/control/single_demo_grasp/README.md
rename to projects/python/control/single_demo_grasp/README.md
index d28ef3d66183e11ffb7b02648421719322a629b7..0486c939e0282dcd82928113f2f689d735731518 100755
--- a/projects/control/single_demo_grasp/README.md
+++ b/projects/python/control/single_demo_grasp/README.md
@@ -26,7 +26,7 @@ $ make install_runtime_dependencies
 After installing dependencies, the user must source the workspace in the shell in order to detect the packages:
 
 ```
-$ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+$ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 ```
 
 ## Demos
@@ -38,7 +38,7 @@ three different nodes must be launched consecutively in order to properly run th
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 4. $ export WEBOTS_HOME=/usr/local/webots
 5. $ roslaunch single_demo_grasping_demo panda_sim.launch
 ```
@@ -47,7 +47,7 @@ three different nodes must be launched consecutively in order to properly run th
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 4. $ roslaunch single_demo_grasping_demo camera_stream_inference.launch
 ```
 
@@ -55,20 +55,20 @@ three different nodes must be launched consecutively in order to properly run th
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
 4. $ roslaunch single_demo_grasping_demo panda_sim_control.launch
 ```
 
 ## Examples
 You can find an example on how to use the learner class to run inference and see the result in the following directory:
 ```
-$ cd projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
+$ cd projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
 ```
 simply run:
 ```
 1. $ cd path/to/opendr/home # change accordingly
 2. $ source bin/setup.bash
-3. $ source projects/control/single_demo_grasp/simulation_ws/devel/setup.bash
-4. $ cd projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
+3. $ source projects/python/control/single_demo_grasp/simulation_ws/devel/setup.bash
+4. $ cd projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/
 5. $ ./single_demo_inference.py
 ```
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/CMakeLists.txt b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/CMakeLists.txt
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/CMakeLists.txt
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/CMakeLists.txt
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/mainpage.dox b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/mainpage.dox
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/mainpage.dox
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/mainpage.dox
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/finger.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/finger.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/finger.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/finger.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/hand.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/hand.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/hand.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/hand.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link0.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link0.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link0.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link0.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link1.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link1.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link1.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link1.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link2.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link2.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link2.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link2.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link3.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link3.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link3.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link3.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link4.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link4.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link4.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link4.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link5.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link5.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link5.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link5.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link6.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link6.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link6.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link6.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link7.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link7.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link7.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/meshes/visual/link7.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/package.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/package.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/package.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/package.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/dual_panda_example.urdf.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/dual_panda_example.urdf.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/dual_panda_example.urdf.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/dual_panda_example.urdf.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.urdf.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.urdf.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.urdf.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.urdf.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/hand.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.urdf.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.urdf.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.urdf.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.urdf.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm_hand.urdf.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm_hand.urdf.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm_hand.urdf.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/robots/panda_arm_hand.urdf.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/franka_description/rosdoc.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/rosdoc.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/franka_description/rosdoc.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/franka_description/rosdoc.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/.setup_assistant b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/.setup_assistant
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/.setup_assistant
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/.setup_assistant
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CHANGELOG.rst b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CHANGELOG.rst
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CHANGELOG.rst
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CHANGELOG.rst
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CMakeLists.txt b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CMakeLists.txt
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CMakeLists.txt
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/CMakeLists.txt
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/README.md b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/README.md
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/README.md
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/README.md
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/chomp_planning.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/chomp_planning.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/chomp_planning.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/chomp_planning.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/fake_controllers.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/fake_controllers.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/fake_controllers.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/fake_controllers.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/hand.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/hand.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/hand.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/hand.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/joint_limits.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/joint_limits.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/joint_limits.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/joint_limits.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/kinematics.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/kinematics.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/kinematics.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/kinematics.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/lerp_planning.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/lerp_planning.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/lerp_planning.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/lerp_planning.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/ompl_planning.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/ompl_planning.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/ompl_planning.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/ompl_planning.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.srdf.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.srdf.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.srdf.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.srdf.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm_hand.srdf.xacro b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm_hand.srdf.xacro
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm_hand.srdf.xacro
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_arm_hand.srdf.xacro
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_controllers.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_controllers.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_controllers.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_controllers.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_gripper_controllers.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_gripper_controllers.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_gripper_controllers.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/panda_gripper_controllers.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_depthmap.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_depthmap.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_depthmap.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_depthmap.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_pointcloud.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_pointcloud.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_pointcloud.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/sensors_kinect_pointcloud.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/stomp_planning.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/stomp_planning.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/stomp_planning.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/stomp_planning.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/trajopt_planning.yaml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/trajopt_planning.yaml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/trajopt_planning.yaml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/config/trajopt_planning.yaml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/chomp_planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/chomp_planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/chomp_planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/chomp_planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/default_warehouse_db.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/default_warehouse_db.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/default_warehouse_db.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/default_warehouse_db.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo_chomp.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo_chomp.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo_chomp.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/demo_chomp.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/fake_moveit_controller_manager.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/fake_moveit_controller_manager.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/fake_moveit_controller_manager.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/fake_moveit_controller_manager.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/joystick_control.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/joystick_control.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/joystick_control.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/joystick_control.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/lerp_planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/lerp_planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/lerp_planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/lerp_planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/move_group.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/move_group.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/move_group.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/move_group.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit.rviz b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit.rviz
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit.rviz
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit.rviz
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_empty.rviz b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_empty.rviz
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_empty.rviz
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_empty.rviz
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_rviz.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_rviz.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_rviz.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/moveit_rviz.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl-chomp_planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl-chomp_planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl-chomp_planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl-chomp_planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl_planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl_planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl_planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/ompl_planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_control_moveit_rviz.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_control_moveit_rviz.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_control_moveit_rviz.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_control_moveit_rviz.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_gripper_moveit_controller_manager.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_gripper_moveit_controller_manager.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_gripper_moveit_controller_manager.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_gripper_moveit_controller_manager.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_controller_manager.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_controller_manager.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_controller_manager.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_controller_manager.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_sensor_manager.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_sensor_manager.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_sensor_manager.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/panda_moveit_sensor_manager.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_context.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_context.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_context.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_context.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_ompl.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_ompl.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_ompl.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_ompl.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_trajopt.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_trajopt.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_trajopt.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/run_benchmark_trajopt.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/sensor_manager.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/sensor_manager.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/sensor_manager.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/sensor_manager.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/setup_assistant.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/setup_assistant.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/setup_assistant.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/setup_assistant.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/stomp_planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/stomp_planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/stomp_planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/stomp_planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajectory_execution.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajectory_execution.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajectory_execution.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajectory_execution.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajopt_planning_pipeline.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajopt_planning_pipeline.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajopt_planning_pipeline.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/trajopt_planning_pipeline.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse_settings.launch.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse_settings.launch.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse_settings.launch.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/launch/warehouse_settings.launch.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/package.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/package.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/package.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/panda_moveit_config/package.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/CMakeLists.txt b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/CMakeLists.txt
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/CMakeLists.txt
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/CMakeLists.txt
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/README.md b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/README.md
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/README.md
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/README.md
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/inference_utils.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/inference_utils.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/inference_utils.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/inference_utils.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/samples/0.jpg b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/samples/0.jpg
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/samples/0.jpg
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/samples/0.jpg
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_grasp_camera_stream.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_grasp_camera_stream.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_grasp_camera_stream.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_grasp_camera_stream.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_inference.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_inference.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_inference.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/inference/single_demo_inference.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/camera_stream_inference.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/camera_stream_inference.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/camera_stream_inference.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/camera_stream_inference.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_controller.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_controller.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_controller.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_controller.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim_control.launch b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim_control.launch
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim_control.launch
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/launch/panda_sim_control.launch
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/cran_feld_pendulum.stl b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/cran_feld_pendulum.stl
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/cran_feld_pendulum.stl
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/cran_feld_pendulum.stl
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/d435.dae b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/d435.dae
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/d435.dae
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/objects/d435.dae
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/package.xml b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/package.xml
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/package.xml
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/package.xml
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/BallBearing.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/BallBearing.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/BallBearing.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/BallBearing.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CommonLine.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CommonLine.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CommonLine.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CommonLine.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CranfieldFace.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CranfieldFace.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CranfieldFace.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CranfieldFace.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CylinderPneumatic.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CylinderPneumatic.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CylinderPneumatic.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/CylinderPneumatic.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/FuelLine.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/FuelLine.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/FuelLine.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/FuelLine.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Housing.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Housing.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Housing.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Housing.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Pendulum.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Pendulum.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Pendulum.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/Pendulum.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/RodEnd.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/RodEnd.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/RodEnd.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/RodEnd.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/panda_arm_hand.proto b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/panda_arm_hand.proto
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/panda_arm_hand.proto
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/protos/panda_arm_hand.proto
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/camera_publisher.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/camera_publisher.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/camera_publisher.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/camera_publisher.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/constants.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/constants.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/constants.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/constants.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/gripper_command.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/gripper_command.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/gripper_command.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/gripper_command.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/joint_state_publisher.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/joint_state_publisher.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/joint_state_publisher.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/joint_state_publisher.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/panda_ros.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/single_demo_grasp_action.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/single_demo_grasp_action.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/single_demo_grasp_action.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/single_demo_grasp_action.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/trajectory_follower.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/trajectory_follower.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/trajectory_follower.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/trajectory_follower.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/utilities.py b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/utilities.py
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/utilities.py
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/scripts/utilities.py
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/.franka_simulation.wbproj b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/.franka_simulation.wbproj
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/.franka_simulation.wbproj
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/.franka_simulation.wbproj
diff --git a/projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/franka_simulation.wbt b/projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/franka_simulation.wbt
similarity index 100%
rename from projects/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/franka_simulation.wbt
rename to projects/python/control/single_demo_grasp/simulation_ws/src/single_demo_grasping_demo/worlds/franka_simulation.wbt
diff --git a/projects/perception/.gitignore b/projects/python/perception/.gitignore
similarity index 100%
rename from projects/perception/.gitignore
rename to projects/python/perception/.gitignore
diff --git a/projects/perception/activity_recognition/benchmark/README.md b/projects/python/perception/activity_recognition/benchmark/README.md
similarity index 100%
rename from projects/perception/activity_recognition/benchmark/README.md
rename to projects/python/perception/activity_recognition/benchmark/README.md
diff --git a/projects/perception/activity_recognition/benchmark/benchmark_cox3d.py b/projects/python/perception/activity_recognition/benchmark/benchmark_cox3d.py
similarity index 97%
rename from projects/perception/activity_recognition/benchmark/benchmark_cox3d.py
rename to projects/python/perception/activity_recognition/benchmark/benchmark_cox3d.py
index fb63294bacdd11bc7d02ebfca01783b6574cd8db..7955222248cb686a28267a041f3cfc4ec4dca766 100644
--- a/projects/perception/activity_recognition/benchmark/benchmark_cox3d.py
+++ b/projects/python/perception/activity_recognition/benchmark/benchmark_cox3d.py
@@ -29,7 +29,7 @@ logger.setLevel("DEBUG")
 
 
 def benchmark_cox3d():
-    temp_dir = "./projects/perception/activity_recognition/benchmark/tmp"
+    temp_dir = "./projects/python/perception/activity_recognition/benchmark/tmp"
 
     num_runs = 100
 
diff --git a/projects/perception/activity_recognition/benchmark/benchmark_x3d.py b/projects/python/perception/activity_recognition/benchmark/benchmark_x3d.py
similarity index 97%
rename from projects/perception/activity_recognition/benchmark/benchmark_x3d.py
rename to projects/python/perception/activity_recognition/benchmark/benchmark_x3d.py
index 5256cf308d26946fde93f3c1ee2e4a47baca97a9..487aabbc6ee7f5186399e5abfc9d9e568ef5b5b4 100644
--- a/projects/perception/activity_recognition/benchmark/benchmark_x3d.py
+++ b/projects/python/perception/activity_recognition/benchmark/benchmark_x3d.py
@@ -29,7 +29,7 @@ logger.setLevel("DEBUG")
 
 
 def benchmark_x3d():
-    temp_dir = "./projects/perception/activity_recognition/benchmark/tmp"
+    temp_dir = "./projects/python/perception/activity_recognition/benchmark/tmp"
 
     num_runs = 100
 
diff --git a/projects/perception/activity_recognition/benchmark/install_on_server.sh b/projects/python/perception/activity_recognition/benchmark/install_on_server.sh
similarity index 100%
rename from projects/perception/activity_recognition/benchmark/install_on_server.sh
rename to projects/python/perception/activity_recognition/benchmark/install_on_server.sh
diff --git a/projects/perception/activity_recognition/benchmark/requirements.txt b/projects/python/perception/activity_recognition/benchmark/requirements.txt
similarity index 100%
rename from projects/perception/activity_recognition/benchmark/requirements.txt
rename to projects/python/perception/activity_recognition/benchmark/requirements.txt
diff --git a/projects/perception/activity_recognition/demos/online_recognition/README.md b/projects/python/perception/activity_recognition/demos/online_recognition/README.md
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/README.md
rename to projects/python/perception/activity_recognition/demos/online_recognition/README.md
diff --git a/projects/control/eagerx/demos/__init__.py b/projects/python/perception/activity_recognition/demos/online_recognition/activity_recognition/__init__.py
similarity index 100%
rename from projects/control/eagerx/demos/__init__.py
rename to projects/python/perception/activity_recognition/demos/online_recognition/activity_recognition/__init__.py
diff --git a/projects/perception/activity_recognition/demos/online_recognition/activity_recognition/screenshot.png b/projects/python/perception/activity_recognition/demos/online_recognition/activity_recognition/screenshot.png
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/activity_recognition/screenshot.png
rename to projects/python/perception/activity_recognition/demos/online_recognition/activity_recognition/screenshot.png
diff --git a/projects/perception/activity_recognition/demos/online_recognition/activity_recognition/video.gif b/projects/python/perception/activity_recognition/demos/online_recognition/activity_recognition/video.gif
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/activity_recognition/video.gif
rename to projects/python/perception/activity_recognition/demos/online_recognition/activity_recognition/video.gif
diff --git a/projects/perception/activity_recognition/demos/online_recognition/demo.py b/projects/python/perception/activity_recognition/demos/online_recognition/demo.py
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/demo.py
rename to projects/python/perception/activity_recognition/demos/online_recognition/demo.py
diff --git a/projects/perception/activity_recognition/demos/online_recognition/requirements.txt b/projects/python/perception/activity_recognition/demos/online_recognition/requirements.txt
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/requirements.txt
rename to projects/python/perception/activity_recognition/demos/online_recognition/requirements.txt
diff --git a/projects/perception/activity_recognition/demos/online_recognition/setup.py b/projects/python/perception/activity_recognition/demos/online_recognition/setup.py
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/setup.py
rename to projects/python/perception/activity_recognition/demos/online_recognition/setup.py
diff --git a/projects/perception/activity_recognition/demos/online_recognition/templates/index.html b/projects/python/perception/activity_recognition/demos/online_recognition/templates/index.html
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/templates/index.html
rename to projects/python/perception/activity_recognition/demos/online_recognition/templates/index.html
diff --git a/projects/perception/face_recognition/README.md b/projects/python/perception/face_recognition/README.md
similarity index 100%
rename from projects/perception/face_recognition/README.md
rename to projects/python/perception/face_recognition/README.md
diff --git a/projects/perception/face_recognition/demos/benchmarking_demo.py b/projects/python/perception/face_recognition/demos/benchmarking_demo.py
similarity index 100%
rename from projects/perception/face_recognition/demos/benchmarking_demo.py
rename to projects/python/perception/face_recognition/demos/benchmarking_demo.py
diff --git a/projects/perception/face_recognition/demos/eval_demo.py b/projects/python/perception/face_recognition/demos/eval_demo.py
similarity index 100%
rename from projects/perception/face_recognition/demos/eval_demo.py
rename to projects/python/perception/face_recognition/demos/eval_demo.py
diff --git a/projects/perception/face_recognition/demos/inference_demo.py b/projects/python/perception/face_recognition/demos/inference_demo.py
similarity index 100%
rename from projects/perception/face_recognition/demos/inference_demo.py
rename to projects/python/perception/face_recognition/demos/inference_demo.py
diff --git a/projects/perception/face_recognition/demos/inference_tutorial.ipynb b/projects/python/perception/face_recognition/demos/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/face_recognition/demos/inference_tutorial.ipynb
rename to projects/python/perception/face_recognition/demos/inference_tutorial.ipynb
diff --git a/projects/perception/face_recognition/demos/webcam_demo.py b/projects/python/perception/face_recognition/demos/webcam_demo.py
similarity index 100%
rename from projects/perception/face_recognition/demos/webcam_demo.py
rename to projects/python/perception/face_recognition/demos/webcam_demo.py
diff --git a/projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/README.md b/projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/README.md
similarity index 100%
rename from projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/README.md
rename to projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/README.md
diff --git a/projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/benchmark/benchmark_pstbln.py b/projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/benchmark/benchmark_pstbln.py
similarity index 100%
rename from projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/benchmark/benchmark_pstbln.py
rename to projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/benchmark/benchmark_pstbln.py
diff --git a/projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/demo.py b/projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/demo.py
similarity index 100%
rename from projects/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/demo.py
rename to projects/python/perception/facial_expression_recognition/landmark_based_facial_expression_recognition/demo.py
diff --git a/projects/perception/fall_detection/README.md b/projects/python/perception/fall_detection/README.md
similarity index 100%
rename from projects/perception/fall_detection/README.md
rename to projects/python/perception/fall_detection/README.md
diff --git a/projects/perception/fall_detection/demos/eval_demo.py b/projects/python/perception/fall_detection/demos/eval_demo.py
similarity index 100%
rename from projects/perception/fall_detection/demos/eval_demo.py
rename to projects/python/perception/fall_detection/demos/eval_demo.py
diff --git a/projects/perception/fall_detection/demos/inference_demo.py b/projects/python/perception/fall_detection/demos/inference_demo.py
similarity index 100%
rename from projects/perception/fall_detection/demos/inference_demo.py
rename to projects/python/perception/fall_detection/demos/inference_demo.py
diff --git a/projects/perception/fall_detection/demos/inference_tutorial.ipynb b/projects/python/perception/fall_detection/demos/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/fall_detection/demos/inference_tutorial.ipynb
rename to projects/python/perception/fall_detection/demos/inference_tutorial.ipynb
diff --git a/projects/perception/fall_detection/demos/webcam_demo.py b/projects/python/perception/fall_detection/demos/webcam_demo.py
similarity index 100%
rename from projects/perception/fall_detection/demos/webcam_demo.py
rename to projects/python/perception/fall_detection/demos/webcam_demo.py
diff --git a/projects/perception/heart_anomaly_detection/README.MD b/projects/python/perception/heart_anomaly_detection/README.MD
similarity index 100%
rename from projects/perception/heart_anomaly_detection/README.MD
rename to projects/python/perception/heart_anomaly_detection/README.MD
diff --git a/projects/perception/heart_anomaly_detection/demo.py b/projects/python/perception/heart_anomaly_detection/demo.py
similarity index 100%
rename from projects/perception/heart_anomaly_detection/demo.py
rename to projects/python/perception/heart_anomaly_detection/demo.py
diff --git a/projects/perception/lightweight_open_pose/README.md b/projects/python/perception/lightweight_open_pose/README.md
similarity index 100%
rename from projects/perception/lightweight_open_pose/README.md
rename to projects/python/perception/lightweight_open_pose/README.md
diff --git a/projects/perception/lightweight_open_pose/demos/benchmarking_demo.py b/projects/python/perception/lightweight_open_pose/demos/benchmarking_demo.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/demos/benchmarking_demo.py
rename to projects/python/perception/lightweight_open_pose/demos/benchmarking_demo.py
diff --git a/projects/perception/lightweight_open_pose/demos/eval_demo.py b/projects/python/perception/lightweight_open_pose/demos/eval_demo.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/demos/eval_demo.py
rename to projects/python/perception/lightweight_open_pose/demos/eval_demo.py
diff --git a/projects/perception/lightweight_open_pose/demos/inference_demo.py b/projects/python/perception/lightweight_open_pose/demos/inference_demo.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/demos/inference_demo.py
rename to projects/python/perception/lightweight_open_pose/demos/inference_demo.py
diff --git a/projects/perception/lightweight_open_pose/demos/inference_tutorial.ipynb b/projects/python/perception/lightweight_open_pose/demos/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/lightweight_open_pose/demos/inference_tutorial.ipynb
rename to projects/python/perception/lightweight_open_pose/demos/inference_tutorial.ipynb
diff --git a/projects/perception/lightweight_open_pose/demos/webcam_demo.py b/projects/python/perception/lightweight_open_pose/demos/webcam_demo.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/demos/webcam_demo.py
rename to projects/python/perception/lightweight_open_pose/demos/webcam_demo.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/README.md b/projects/python/perception/lightweight_open_pose/jetbot/README.md
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/README.md
rename to projects/python/perception/lightweight_open_pose/jetbot/README.md
diff --git a/projects/perception/lightweight_open_pose/jetbot/evaluate.sh b/projects/python/perception/lightweight_open_pose/jetbot/evaluate.sh
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/evaluate.sh
rename to projects/python/perception/lightweight_open_pose/jetbot/evaluate.sh
diff --git a/projects/perception/lightweight_open_pose/jetbot/fall_controller.py b/projects/python/perception/lightweight_open_pose/jetbot/fall_controller.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/fall_controller.py
rename to projects/python/perception/lightweight_open_pose/jetbot/fall_controller.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/flask.png b/projects/python/perception/lightweight_open_pose/jetbot/flask.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/flask.png
rename to projects/python/perception/lightweight_open_pose/jetbot/flask.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/jetbot.sh b/projects/python/perception/lightweight_open_pose/jetbot/jetbot.sh
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/jetbot.sh
rename to projects/python/perception/lightweight_open_pose/jetbot/jetbot.sh
diff --git a/projects/perception/lightweight_open_pose/jetbot/jetbot_kill.sh b/projects/python/perception/lightweight_open_pose/jetbot/jetbot_kill.sh
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/jetbot_kill.sh
rename to projects/python/perception/lightweight_open_pose/jetbot/jetbot_kill.sh
diff --git a/projects/perception/lightweight_open_pose/jetbot/requirements.txt b/projects/python/perception/lightweight_open_pose/jetbot/requirements.txt
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/requirements.txt
rename to projects/python/perception/lightweight_open_pose/jetbot/requirements.txt
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/example/Images/.keep b/projects/python/perception/lightweight_open_pose/jetbot/results/.keep
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/example/Images/.keep
rename to projects/python/perception/lightweight_open_pose/jetbot/results/.keep
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_sit.wbo b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_sit.wbo
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_sit.wbo
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_sit.wbo
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_standing.wbo b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_standing.wbo
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_standing.wbo
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/protos/human_010_standing.wbo
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/pose_demo.wbt b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/pose_demo.wbt
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/pose_demo.wbt
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/pose_demo.wbt
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/brown_eye.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/brown_eye.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/brown_eye.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/brown_eye.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow005.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow005.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow005.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow005.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow009.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow009.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow009.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyebrow009.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes01.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes01.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes01.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes01.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes04.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes04.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes04.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/eyelashes04.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_diffuse.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_diffuse.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_diffuse.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_diffuse.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_normal.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_normal.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_normal.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/female_elegantsuit01_normal.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthnormals.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthnormals.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthnormals.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthnormals.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthtex1.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthtex1.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthtex1.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/keylthtex1.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_diffuse.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_diffuse.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_diffuse.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_diffuse.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_normal.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_normal.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_normal.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/male_casualsuit02_normal.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/middleage_lightskinned_male_diffuse2.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/middleage_lightskinned_male_diffuse2.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/middleage_lightskinned_male_diffuse2.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/middleage_lightskinned_male_diffuse2.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/short01_diffuse.png b/projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/short01_diffuse.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/short01_diffuse.png
rename to projects/python/perception/lightweight_open_pose/jetbot/simulation_pose/worlds/textures/short01_diffuse.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/static/eu.png b/projects/python/perception/lightweight_open_pose/jetbot/static/eu.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/static/eu.png
rename to projects/python/perception/lightweight_open_pose/jetbot/static/eu.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/static/opendr.png b/projects/python/perception/lightweight_open_pose/jetbot/static/opendr.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/static/opendr.png
rename to projects/python/perception/lightweight_open_pose/jetbot/static/opendr.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/static/opendr_logo.png b/projects/python/perception/lightweight_open_pose/jetbot/static/opendr_logo.png
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/static/opendr_logo.png
rename to projects/python/perception/lightweight_open_pose/jetbot/static/opendr_logo.png
diff --git a/projects/perception/lightweight_open_pose/jetbot/templates/index.html b/projects/python/perception/lightweight_open_pose/jetbot/templates/index.html
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/templates/index.html
rename to projects/python/perception/lightweight_open_pose/jetbot/templates/index.html
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/__init__.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/__init__.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/__init__.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/active.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/active.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/active.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/active.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/pid.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/pid.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/pid.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/pid.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/pose_controller.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/pose_controller.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/pose_controller.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/pose_controller.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/pose_utils.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/pose_utils.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/pose_utils.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/pose_utils.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/robot_interface.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/robot_interface.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/robot_interface.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/robot_interface.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/visualization.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/visualization.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/visualization.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/visualization.py
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/webots.py b/projects/python/perception/lightweight_open_pose/jetbot/utils/webots.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/webots.py
rename to projects/python/perception/lightweight_open_pose/jetbot/utils/webots.py
diff --git a/projects/perception/multimodal_human_centric/audiovisual_emotion_recognition/README.MD b/projects/python/perception/multimodal_human_centric/audiovisual_emotion_recognition/README.MD
similarity index 100%
rename from projects/perception/multimodal_human_centric/audiovisual_emotion_recognition/README.MD
rename to projects/python/perception/multimodal_human_centric/audiovisual_emotion_recognition/README.MD
diff --git a/projects/perception/multimodal_human_centric/audiovisual_emotion_recognition/audiovisual_emotion_recognition_demo.py b/projects/python/perception/multimodal_human_centric/audiovisual_emotion_recognition/audiovisual_emotion_recognition_demo.py
similarity index 100%
rename from projects/perception/multimodal_human_centric/audiovisual_emotion_recognition/audiovisual_emotion_recognition_demo.py
rename to projects/python/perception/multimodal_human_centric/audiovisual_emotion_recognition/audiovisual_emotion_recognition_demo.py
diff --git a/projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/README.MD b/projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/README.MD
similarity index 100%
rename from projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/README.MD
rename to projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/README.MD
diff --git a/projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/gesture_recognition_demo.py b/projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/gesture_recognition_demo.py
similarity index 100%
rename from projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/gesture_recognition_demo.py
rename to projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/gesture_recognition_demo.py
diff --git a/projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_depth.png b/projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_depth.png
similarity index 100%
rename from projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_depth.png
rename to projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_depth.png
diff --git a/projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_rgb.png b/projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_rgb.png
similarity index 100%
rename from projects/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_rgb.png
rename to projects/python/perception/multimodal_human_centric/rgbd_hand_gesture_recognition/input_rgb.png
diff --git a/projects/perception/object_detection_2d/centernet/README.md b/projects/python/perception/object_detection_2d/centernet/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/centernet/README.md
rename to projects/python/perception/object_detection_2d/centernet/README.md
diff --git a/projects/perception/object_detection_2d/centernet/eval_demo.py b/projects/python/perception/object_detection_2d/centernet/eval_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/centernet/eval_demo.py
rename to projects/python/perception/object_detection_2d/centernet/eval_demo.py
diff --git a/projects/perception/object_detection_2d/centernet/inference_demo.py b/projects/python/perception/object_detection_2d/centernet/inference_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/centernet/inference_demo.py
rename to projects/python/perception/object_detection_2d/centernet/inference_demo.py
diff --git a/projects/perception/object_detection_2d/centernet/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/centernet/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/object_detection_2d/centernet/inference_tutorial.ipynb
rename to projects/python/perception/object_detection_2d/centernet/inference_tutorial.ipynb
diff --git a/projects/perception/object_detection_2d/centernet/train_demo.py b/projects/python/perception/object_detection_2d/centernet/train_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/centernet/train_demo.py
rename to projects/python/perception/object_detection_2d/centernet/train_demo.py
diff --git a/projects/perception/object_detection_2d/detr/README.md b/projects/python/perception/object_detection_2d/detr/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/detr/README.md
rename to projects/python/perception/object_detection_2d/detr/README.md
diff --git a/projects/perception/object_detection_2d/detr/eval_demo.py b/projects/python/perception/object_detection_2d/detr/eval_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/detr/eval_demo.py
rename to projects/python/perception/object_detection_2d/detr/eval_demo.py
diff --git a/projects/perception/object_detection_2d/detr/inference_demo.py b/projects/python/perception/object_detection_2d/detr/inference_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/detr/inference_demo.py
rename to projects/python/perception/object_detection_2d/detr/inference_demo.py
diff --git a/projects/perception/object_detection_2d/detr/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/detr/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/object_detection_2d/detr/inference_tutorial.ipynb
rename to projects/python/perception/object_detection_2d/detr/inference_tutorial.ipynb
diff --git a/projects/perception/object_detection_2d/detr/train_demo.py b/projects/python/perception/object_detection_2d/detr/train_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/detr/train_demo.py
rename to projects/python/perception/object_detection_2d/detr/train_demo.py
diff --git a/projects/perception/object_detection_2d/gem/README.md b/projects/python/perception/object_detection_2d/gem/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/gem/README.md
rename to projects/python/perception/object_detection_2d/gem/README.md
diff --git a/projects/perception/object_detection_2d/gem/inference_demo.py b/projects/python/perception/object_detection_2d/gem/inference_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/gem/inference_demo.py
rename to projects/python/perception/object_detection_2d/gem/inference_demo.py
diff --git a/projects/perception/object_detection_2d/gem/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/gem/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/object_detection_2d/gem/inference_tutorial.ipynb
rename to projects/python/perception/object_detection_2d/gem/inference_tutorial.ipynb
diff --git a/projects/python/perception/object_detection_2d/nanodet/README.md b/projects/python/perception/object_detection_2d/nanodet/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..92c456c235ea211433d616ee5657006a529ad344
--- /dev/null
+++ b/projects/python/perception/object_detection_2d/nanodet/README.md
@@ -0,0 +1,18 @@
+# NanoDet Demos
+
+This folder contains minimal code usage examples that showcase the basic functionality of the NanodetLearner 
+provided by OpenDR. Specifically the following examples are provided:
+1. inference_demo.py: Perform inference on a single image in a directory. Setting `--device cpu` performs inference on CPU.
+2. eval_demo.py: Perform evaluation on the `COCO dataset`, implemented in OpenDR format. The user must first download 
+   the dataset and provide the path to the dataset root via `--data-root /path/to/coco_dataset`. 
+   Setting `--device cpu` performs evaluation on CPU. 
+   
+3. train_demo.py: Fit learner to dataset. PASCAL VOC and COCO datasets are supported via `ExternalDataset` class.
+   Provided is an example of training on `COCO dataset`. The user must set the dataset type using the `--dataset`
+   argument and provide the dataset root path with the `--data-root` argument. Setting the config file for the specific
+   model is done with `--model "wanted model name"`. Setting `--device cpu` performs training on CPU. Additional command
+   line arguments can be set to overwrite various training hyperparameters from the provided config file, and running 
+   `python3 train_demo.py -h` prints information about them on stdout.
+   
+    Example usage:
+   `python3 train_demo.py --model plus-m_416 --dataset coco --data-root /path/to/coco_dataset`
\ No newline at end of file
diff --git a/projects/python/perception/object_detection_2d/nanodet/eval_demo.py b/projects/python/perception/object_detection_2d/nanodet/eval_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..759c6aa4bd75c32d714abce4bccbc5aa35c52c2a
--- /dev/null
+++ b/projects/python/perception/object_detection_2d/nanodet/eval_demo.py
@@ -0,0 +1,34 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+from opendr.perception.object_detection_2d import NanodetLearner
+from opendr.engine.datasets import ExternalDataset
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--data-root", help="Dataset root folder", type=str)
+    parser.add_argument("--model", help="Model that config file will be used", type=str)
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+
+    args = parser.parse_args()
+
+    val_dataset = ExternalDataset(args.data_root, 'coco')
+    nanodet = NanodetLearner(model_to_use=args.model, device=args.device)
+
+    nanodet.download("./predefined_examples", mode="pretrained")
+    nanodet.load("./predefined_examples/nanodet-{}/nanodet-{}.ckpt".format(args.model, args.model), verbose=True)
+    nanodet.eval(val_dataset)
diff --git a/projects/python/perception/object_detection_2d/nanodet/inference_demo.py b/projects/python/perception/object_detection_2d/nanodet/inference_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..71e95b15fb2e648675e536166181b0ad0a333b7c
--- /dev/null
+++ b/projects/python/perception/object_detection_2d/nanodet/inference_demo.py
@@ -0,0 +1,34 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+from opendr.perception.object_detection_2d import NanodetLearner
+from opendr.engine.data import Image
+from opendr.perception.object_detection_2d import draw_bounding_boxes
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--model", help="Model that config file will be used", type=str, default='m')
+    args = parser.parse_args()
+
+    nanodet = NanodetLearner(model_to_use=args.model, device=args.device)
+    nanodet.download("./predefined_examples", mode="pretrained")
+    nanodet.load("./predefined_examples/nanodet_{}".format(args.model), verbose=True)
+    nanodet.download("./predefined_examples", mode="images")
+    img = Image.open("./predefined_examples/000000000036.jpg")
+    boxes = nanodet.infer(input=img)
+
+    draw_bounding_boxes(img.opencv(), boxes, class_names=nanodet.classes, show=True)
diff --git a/projects/python/perception/object_detection_2d/nanodet/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/nanodet/inference_tutorial.ipynb
new file mode 100644
index 0000000000000000000000000000000000000000..96af81257ce73a2f35180751d8ab3d9fd7cb1abf
--- /dev/null
+++ b/projects/python/perception/object_detection_2d/nanodet/inference_tutorial.ipynb
@@ -0,0 +1,790 @@
+{
+ "cells": [
+  {
+   "cell_type": "markdown",
+   "id": "f8b84e11-4e6b-40f6-807b-ec27281659e9",
+   "metadata": {
+    "tags": []
+   },
+   "source": [
+    "# Nanodet Tutorial\n",
+    "\n",
+    "This notebook provides a tutorial for running inference on a static image in order to detect objects.\n",
+    "The implementation of the [NanodetLearner](../../../../docs/reference/nanodet.md) is largely copied from the [Nanodet github](https://github.com/RangiLyu/nanodet).\n",
+    "More information on modifications and license can be found\n",
+    "[here](https://github.com/opendr-eu/opendr/blob/master/src/opendr/perception/object_detection_2d/nanodet/README.md)."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "b671ddd9-583b-418a-870e-69dd3c3db718",
+   "metadata": {},
+   "source": [
+    "First, we need to import the learner and initialize it:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 1,
+   "id": "b6f3d99a-b702-472b-b8d0-95a551e7b9ba",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/manos/new_opendr/opendr/venv/lib/python3.8/site-packages/tqdm/auto.py:22: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n",
+      "  from .autonotebook import tqdm as notebook_tqdm\n",
+      "/home/manos/new_opendr/opendr/venv/lib/python3.8/site-packages/gluoncv/__init__.py:40: UserWarning: Both `mxnet==1.8.0` and `torch==1.9.0+cu111` are installed. You might encounter increased GPU memory footprint if both framework are used at the same time.\n",
+      "  warnings.warn(f'Both `mxnet=={mx.__version__}` and `torch=={torch.__version__}` are installed. '\n"
+     ]
+    },
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "model size is  1.5x\n",
+      "init weights...\n",
+      "Finish initialize NanoDet-Plus Head.\n"
+     ]
+    }
+   ],
+   "source": [
+    "from opendr.perception.object_detection_2d import NanodetLearner\n",
+    "\n",
+    "model=\"plus_m_1.5x_416\"\n",
+    "\n",
+    "nanodet = NanodetLearner(model_to_use=model, device=\"cuda\")"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4ef5ce70-8294-446a-8cc2-b3eba5e1037b",
+   "metadata": {},
+   "source": [
+    "Note that we can alter the device (e.g., 'cpu', 'cuda', etc.), on which the model runs, as well as the model from a variety of options included a custom you can make (\"EfficientNet_Lite0_320\", \"EfficientNet_Lite1_416\", \"EfficientNet_Lite2_512\",\n",
+    "                \"RepVGG_A0_416\", \"t\", \"g\", \"m\", \"m_416\", \"m_0.5x\", \"m_1.5x\", \"m_1.5x_416\",\n",
+    "                \"plus_m_320\", \"plus_m_1.5x_320\", \"plus_m_416\", \"plus_m_1.5x_416\", \"custom\")."
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "10c74615-61ec-43ed-a1ae-57dceedfe938",
+   "metadata": {},
+   "source": [
+    "After creating our model, we need to download pre-trained weights."
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 2,
+   "id": "8a680c28-8f42-4b4a-8c6e-2580b7be2da5",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "save_path = \"./predefined_examples\"\n",
+    "nanodet.download(path=save_path, mode=\"pretrained\")\n",
+    "\n",
+    "load_model_weights=\"./predefined_examples/nanodet_{}\".format(model)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "0e63e7a9-4310-4633-a2ac-052e94ad3ea0",
+   "metadata": {},
+   "source": [
+    "and load our weights:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 3,
+   "id": "e12f582b-c001-4b9d-b396-4260e23139f6",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "Model name: plus_m_1.5x_416 --> ./predefined_examples/nanodet_plus_m_1.5x_416/plus_m_1.5x_416.json\n"
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "INFO:root:No param aux_fpn.reduce_layers.0.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.0.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.0.bn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.0.bn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.0.bn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.0.bn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.0.bn.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.0.bn.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.0.bn.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.0.bn.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.0.bn.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.0.bn.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.1.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.1.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.1.bn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.1.bn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.1.bn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.1.bn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.1.bn.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.1.bn.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.1.bn.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.1.bn.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.1.bn.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.1.bn.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.2.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.2.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.2.bn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.2.bn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.2.bn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.2.bn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.2.bn.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.2.bn.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.2.bn.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.2.bn.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.reduce_layers.2.bn.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.reduce_layers.2.bn.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.2.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.2.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.0.blocks.0.shortcut.3.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.2.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.2.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.top_down_blocks.1.blocks.0.shortcut.3.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.depthwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.depthwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.pointwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.pointwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.dwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.dwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.dwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.dwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.dwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.dwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.dwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.dwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.dwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.dwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.pwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.pwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.pwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.pwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.pwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.pwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.pwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.pwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.0.pwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.0.pwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.depthwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.depthwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.pointwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.pointwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.dwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.dwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.dwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.dwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.dwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.dwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.dwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.dwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.dwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.dwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.pwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.pwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.pwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.pwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.pwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.pwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.pwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.pwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.downsamples.1.pwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.downsamples.1.pwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.2.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.2.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.0.blocks.0.shortcut.3.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost1.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.primary_conv.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.ghost2.cheap_operation.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.0.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.0.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.1.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.2.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.2.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.bottom_up_blocks.1.blocks.0.shortcut.3.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.depthwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.depthwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.pointwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.pointwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.dwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.dwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.dwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.dwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.dwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.dwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.dwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.dwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.dwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.dwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.pwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.pwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.pwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.pwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.pwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.pwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.pwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.pwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_in_conv.0.pwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_in_conv.0.pwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.depthwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.depthwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.pointwise.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.pointwise.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.dwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.dwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.dwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.dwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.dwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.dwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.dwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.dwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.dwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.dwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.pwnorm.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.pwnorm.weight.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.pwnorm.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.pwnorm.bias.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.pwnorm.running_mean.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.pwnorm.running_mean.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.pwnorm.running_var.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.pwnorm.running_var.\u001b[0m\n",
+      "INFO:root:No param aux_fpn.extra_lvl_out_conv.0.pwnorm.num_batches_tracked.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_fpn.extra_lvl_out_conv.0.pwnorm.num_batches_tracked.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.0.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.0.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.0.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.0.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.0.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.0.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.1.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.1.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.1.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.1.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.1.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.1.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.2.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.2.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.2.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.2.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.2.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.2.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.3.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.3.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.3.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.3.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.cls_convs.3.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.cls_convs.3.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.0.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.0.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.0.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.0.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.0.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.0.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.1.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.1.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.1.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.1.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.1.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.1.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.2.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.2.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.2.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.2.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.2.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.2.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.3.conv.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.3.conv.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.3.gn.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.3.gn.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.reg_convs.3.gn.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.reg_convs.3.gn.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.gfl_cls.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.gfl_cls.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.gfl_cls.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.gfl_cls.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.gfl_reg.weight.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.gfl_reg.weight.\u001b[0m\n",
+      "INFO:root:No param aux_head.gfl_reg.bias.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.gfl_reg.bias.\u001b[0m\n",
+      "INFO:root:No param aux_head.scales.0.scale.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.scales.0.scale.\u001b[0m\n",
+      "INFO:root:No param aux_head.scales.1.scale.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.scales.1.scale.\u001b[0m\n",
+      "INFO:root:No param aux_head.scales.2.scale.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.scales.2.scale.\u001b[0m\n",
+      "INFO:root:No param aux_head.scales.3.scale.\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mNo param aux_head.scales.3.scale.\u001b[0m\n",
+      "INFO:root:Loaded model weight from ./predefined_examples/nanodet_plus_m_1.5x_416\n",
+      "\u001b[1m\u001b[35m[root]\u001b[0m\u001b[34m[09-01 18:10:13]\u001b[0m\u001b[32mINFO:\u001b[0m\u001b[37mLoaded model weight from ./predefined_examples/nanodet_plus_m_1.5x_416\u001b[0m\n"
+     ]
+    }
+   ],
+   "source": [
+    "nanodet.load(path=load_model_weights, verbose=True)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "4e3ce347-391f-45a1-baf8-91d8a9ce04a7",
+   "metadata": {},
+   "source": [
+    "We will also download one sample image and load it, so we can use it in OpenDR for testing:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 4,
+   "id": "9efba6eb-5235-4e31-a002-1bcb6e311704",
+   "metadata": {},
+   "outputs": [],
+   "source": [
+    "nanodet.download(path=save_path, mode=\"images\")\n",
+    "\n",
+    "from opendr.engine.data import Image\n",
+    "image_path = \"./predefined_examples/000000000036.jpg\"\n",
+    "img = Image.open(image_path)"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 5,
+   "id": "9f083566-3d57-4db6-baa5-0fefdf8fa8ea",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<matplotlib.image.AxesImage at 0x7f41f0521730>"
+      ]
+     },
+     "execution_count": 5,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAMsAAAD8CAYAAADZhFAmAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9Z6xlWXbfCf7W3vucc+993oSPyEgT6W1l+SKrKJJFSqIIseUIanpkBjOjLy1gHDAjzJf5KqCBAWYwGCOgGyN2qyVREtmkqCqaIllFsrxlmsrMykgXGd68ePaac/beaz6sfe6LLFZmFWjUMUAeMirjvXjm3nP22nut//r//0tUlfev96/3rx9+uf+pX8D71/vX/79c7wfL+9f71494vR8s71/vXz/i9X6wvH+9f/2I1/vB8v71/vUjXu8Hy/vX+9ePeP2lBIuI/DUReUVEzovIP/vL+B3vX+9f/7kv+Yvus4iIB74H/AxwEfg68PdV9bt/ob/o/ev96z/z9ZdxsnwEOK+qr6tqC/wb4Bf+En7P+9f713/WK/wl/MxTwNt3fHwR+Oh7fcNgeVmXjhyZfywi9t/ysaIIgpR/c3L4d9DylYqqfaTl75R/VVXyHR/PvwVQBRHK1ysiggI52w/LqvPvt98J4gTK73KCvZ7ymu88qeevQUDu+J39986/vvwMJ3e+a+b3QOlfx+HPVVXekRPoO9/3ne/t+++r3vH1/RdWRJy397XYVDgRbu9PyDHjvGNhWLGxuIgTd8czsStrZne8R8r58LV93+vR8nDe8b6/7z7NH0z5xDs+zeHrFYTvz4j0+75Hvu93AGj+vo9RuOO+CjAdj4lt+86HUK6/jGD5kS4R+SfAPwFY2Nzk7/zX/zVOIKktwKEPVN6TcqLLGYctSAc03rFU19TBzRdNcI4uKwddR8rQpURVBTIw6yLTlOmSPa0qeFLOAHQxA0LOGeTwIeQEk5iYxsQsJjTZom6Cp6k8qkrMMKg8o9ojAuM20nYJ7xziIKdMlxXnPEEcKgBKzkpMGRXwInjnCN4RRPDeHpoTR3AOsN+z13ZMJh1dhpSVnJQYlZwzKkLO9tD7oJovlDve0zxWy3rUbO8dzRwPOywMFMnCg0eW+ejZE/w/f+cbjG9P8aI8+ugx/umnP8HK6Ag5JyCXRZZREl999evcPNin6+x+zbpE22ViTHQx0cVM0vJ6s5KyotnuRcq2Ockdrz1rRlFyVMSV95ft+wVIOdlO0G9qOYHappqylkDAniuC5oSIkNU2xJwSqhlRSCmX3wff/I3Pvuua/csIlkvAmTs+Pl0+945LVf8F8C8Ajpx7QBXw3kPWcnrYDUg5I+IQgZgyIuAVYs6Q7MY23lM7h2pm5D3ZCyl4u7HOoy4zjQlHpgqBKgiqni4DGYIPzLpImxMxZergqYLgg6NJnmmXiCmhKLUPLNQVPjgOZi1kxffbt4otThFb7MGRY8I5QcRyXucCHRGnQi6Lm5xxTsjOkZISnODLaZMVZrEjxhLMGVJSNJcbKRaUdhrO1847TmcXPDllcllsqWzgwXlElJSgTcIoK2jm9mTG2miEDw7nhdRFdsYtu5Mxy8OymeRsiy4r3nuevf9DpBQtdHIipsg0dqSszLqWNiYmaUbXRtqUmcaOWdsxix1tTEzbjpgS01lHlxMpKzEl21hiIqVk7zPbexXV+UnWH/sxJgDc/KRQUky2SWV7zd5ePSD281SR8l8FxL97ZfKXESxfBx4UkfuwIPkl4H/2Xt/gRFisAk0IFvkI3lkguOxAbOd3IYDaYqacQpQ3mQHvBHGh7FaZDoeIMPAeapjmDGpflxOgGXGOREIEGh9wksquLvOUrw5+HqhV6ANTCU1FzJaGxKwkmOc9OWeCc/jg5qeXE0Fyst2/3/GVcvqlkioKbUzMRAg+E1NmPI1M20hSIXYJ57w9WCe4LKizHdMyGFvEKKgo4hyC3BFIJd0rp1xKtuC6BMSMVIH9WYd3wqgJTEKgG0+ZtImdyZhTZaFpSYudt3tc+xpfj2zxlTMsZdsoNGt5XSDiSoqWcXKYAivl1EGJ5R7lnIk50sZEl+zPLEXaLtKVz086C7bJbGb/3kWmbccsRrqYaduOlEsQxkgCUpeIXSTnbL9T7YQj2/16t+svPFhUNYrIPwV+G/DAf6uqL77X9whiN1XtiK3KQk2qVM5TeU/MEUWoJNhuKkLt7OPKOXuEJcF3KMF7glemyW5ApaCuHNsIs2zHsPeB2EbbwXLCe0cdLFjsDQnee9TbjqbZdrfgHOo9KopXR9RI0gwozjmc2ILKyRaqYgVEyhHnHd45HH3NIsSc7QSLma6zlKGvy1QhRktdcrZFmFRxTgjOkRSi7RwA5V7aDelPGO/tvStSvkzAKaL2cUtNzi1OIWaljYmVuuKWjC2Y2sjN/Ql2pHmc2EkevCcle70xTXHiyOWERSCVe5XVNgdxYvWbOtvkkh2R4hziSioqubwPZ5tk2SxVFBFLTb335JTKBqHz1xJTn27l8ppSOU2VLkdsM2oZzyJJk21MMTJLieks8uKv//q7rtO/lJpFVT8DfOZH/XoB3Lzihi7bg3diN0/Ebrgg1N7RpUQQR+M9Hj3cncoOZbWOoiq0KbHXRUpNbvWEKt4J5HIzUTuZvLfCsKTywTsklBqBsnAFomr5upIKZMULDEJFTInK2e6ZxRZuU1co0HYRRCxtKydcTNDGhCq0baKdRVv4YnWY6zeOlHHe246bLCBVLSVLKZNSLsEBsSspGxl1jlAKIe9cSf0s8L048EqSTNSAZsWJBd72rOPY8iJvXt3BIcRZ5Or2Lil1VFVFjKksZrvvbTzguTe/jQO8D7ZpOU8VKpwTahdAPFmhCjWVDzhxeBFC+XsIVdloPK48H4eQEERcWSN5Xl+CpaMignOBjD1zVUt3wWpC55VKhAG1BWzdsDa0n2VBb+vNO8/yoHnXdfo/WYF/5yUCA+9tt8iZaYw456i8wzl7kXYTwSP4UJW6VHHBoznPA8ayCzulskLlHANvu9g0RlLKBG8/KzuhwpEy2NlkKURGDWjIGUthS36LWA3UJZJYEe5E6USZtpk2Wu0BWDGrlGJSmXWRruTGOTLf+VJSYrIdfzqzE07KiaACmbITi6Vzin0eVZy3955zxntXUthDBJBSUyRsV9asiHi886W8st/rnUNdYJJqarF7d313j6Ori4Ag3tHNOq7vT5ilDu+z/QxLPAvAUjNuE/uzsW1+zqOa52iTqBBztGJe9bD2MHiRGDPeCZW31wdW44lzOOepgyd4A1K8eOqqxjtH7QODuib4QBV8+beKKlTkZJmDc4alOnF4H+aYmqXwqV+FfwpN/P7r7ggWhBAcPb7rnaMOgarshnam6Dy9cOUUEedAsVQgZzq1OkYKTisoToRRCHRZSc52ZF/SHsFg4UqssGtLXUK2oHElpbKdyxuogO3enWZmbTK0J1tgWqblmHaZmJLVVEBWoW2zBUZMpBRJZYGrKqmkiikZiiNlIYsKQixQ6TyDtEVYAhjstTtxSI+COZ3D3s4JMdviDCHYz51D7n3NIOQQ2JcVlnQPEcfVvX0e3lhDRAghEGeJW3sTZt2MUbVkwVeQJe+FnD23u2W+fXFGE5TKKbUXanEEDyIZLwGnGS92IqiU+jEbaqZkvHMl3Swbl/bghZJSmteo/f2ISckpU1UeL0Jd26lUe0cm2/FePl85hzhPJcKwrkg509SVrbfK0NdJO3nXdXp3BEvpQ+RSeFbi5whTShm8I0i5iRQIEcHnSHSCRmWaM7NsJ0uTHaEAAIlyFIuwUAWcc8ySFZ4OxeHIFeSYqdR29FnMdJpw4iHa75x2kTbp4emgzF9P5QOaEgllNjPkSrCUx1KkkioVuDapolnoCgyqCLlLlmSopVtkcNmCNWXIKSEF8QNL75A8v4e5gBWqUhZynzYKYgUMKSVC5axuKL2KnGzRO4FcXnMlcGV3nw+fPoZTW4zdbMb+tGNvesDG0lFyTogoObfl9dQ0zSrfvn7LTsRyGpbfhA8BUUunbRnbSRIceLE0FlECSu2wjzVTeUVzZFh50IQn07WdIYgKXewgCcMBLDSe6TjhvIEjqvZzU874qSfnRC4f9/fHTnhnrQRVdsfTd12nd0WwAPNcOZd8fpKSwYMKqe1Yqiu8E/s4KyrQxUhXCtJZTJZSCDTB4UtalrDejBdhEDwuZ7qYSECXM6ijS1aTJLXaoC2omWKplgrMorI37Zi2cd5EVBWcg5Q6K7Jjpou2MLUUn7n0EFK20yyX0yeLAo6shvH3KVPwVveollMtp/mDBex1ZSWTcV5Kv0StHik7MjCHmRxC0mwBUtI+QRGxmlCd/bdPVbI6lI7d/RmVF05sLPLWpEMnymySuLW/xz2bttn0J6H1jhL3rq/gHXRZcN5BFrKUzSBr2Qz6QPJIcuRorw3sXjkpOK9qCX77Db4gcKVFiy+njhcDh5gmfCcFfFEqsRPMoVSubEmS8WSCQOUy3jA4NGcqB2gmf19T+M7rrgmWrMI4RsYxkVWYtB3Be1uUGQ6iMgyl8DMAlEnMzLrEtIs48QSsV9HGhCv1R1JDxpwTaxCWIjBmJZZiN5edJnMI/4bgygOBLmXGbcdk1jGd2cJ2UtJH75hptr5AabzhBC8OLUV3ViWW06hfzP1OVjZ9vDeUJxUok7KjS0F7XFk4Tg6RuZzsNTPvRJfUSvteuMxrIICYkxXrJTidy/NGHdlStqQOdY42tmQJHDu6zNu395FxoG0T13b3rcEXGkRqnHbzlPjYYsOoqdidxr4Ut/RQFMXNU0OrD2yDlFKQi+YSKFo2Iz9POz2HGLMFtKfViBdHwtGp1WIkQXFoLKks9t48zFsSIoc9Iiv9CoKIrYVxeveQuEuCxR7gTGHc6Rwrz7mlrqzYY+poKsegsgBqoxL7RZpsF2kVFEfGehFSKv4qKeKg7eHacnrF0vgylMZOk6haivOMrwJ15YmzyKxTZq3SdXleEjoRQmB+Cqnaw9FYimqFlCNgu6aWNDFLaYZRHpwTC57SXBNk/hoLsms7bAa87YQu+L63Nk+pDukspfcUDUZVT/lCKcFrv9eVU1ecfX+XleRt4YJjazzl6NLiHHDIMXJ194CYIt7VBUQoAYyw2DQ8cnyRN25PSj2RSVhHPWZ7DymXQJjXZpYGIW7enZcSam5+H9QWfYGNDSlzh51456zJW9K5nO3UAbVA4xAttyUh83vUI2Gqti7eo76/O4Kly5nb045pNhh1POvKzRW6tt9dM9NO2KElOLHjUsuDBirX56AJQZi2EVWDf1ux5e3dYb0hqoi3tEvQebDk8mA0BAY1kJWDWcfBpLUivdxoESFT+hsFWtaC0OX5wsjz3avfUYHy5Kzuct42A4NtbTH0iz5rxoklIF4cSS21cyGgapwt0dIiFHeYOjohluLelr07TOsoiworeVRAe+qNZpx0djJ6z429A06tLuGDp2oq4iyxtT9j0o6pwsAQuOwBB6Ue+T/+xDN2X3IiqaW8MVtqO4uJWcxMYmK/bVER9mcdUZWDaWtNV4VJl0gI0zYx6SKzLjFLiVlS2sJkaKMzmko2aDnnXHo54J1aOSfQo1xSnpPzDs2Wvln5128gpUn6Huv0rgiWnJXbk66gIEKXhVnbkZLivcMHX47yktM6R1OCRMTNu71zMl3pyKoKrSbbkUSYdFYLhAIP55nxtmKyIjVHO1WqYA3NLiqT3HJrd8p4GhGsCYna7hhzJql1/HOyRmFfVOdshbohOFIa+2WXy2Vxlycjd8DBttXb53zfTRZb4OLL9xfY2QppxamlcbGkdk4cTvvTJ/dr5nAXxWo4VbWUrZQJjkQlBn87cVzb3eXx40epgqMNjnbacXu/ZdJNWC3QsGpfD9hrHVah/Dzrxmud5ycfpS4SzfONxZXTrieUSqkFRbSwJsQKc5RU2gGqBrS0sSublHDQtcy6XJ4JTGOmTRiNRmGWIvuzyDQaWDNLmVkXmbQWhLMu0aXM6+/Bw78rgiWpsjeJpXCXeec5aUaSImRC5ea4vCvQn6jtFClbTRCzzgl7/WLUkg6UUEMQZjGRk/18K+ns4E+5z6cV1yZC8IwnifE0lqTAfr93gko2ACIpKlqImSX/vyMNopwYiswL8pwN+fIFPIhlp5PS40D6PpFS2RkwJwAaK3qOSeMLb86IiPZpW5f2/u00YY74OO9xWjhV/cIou7AtxL55qdyetiwNrQ45CMDUOvs70ynHczK0S+7ci7Xs0HZv7TRj3gBzUnovzuo5A8xcobn06VePghYmtvNWG5Zg7xu0i7Ur96sHNLLx5nIk+HqOGh5+jaWM2t+gHg1US8i6aA3jv/ffLLzrOr0rggUsdZikTDuzws1yTMtlVaxTbbCnI5cFPwiehTowrAJtUm4eTImizJ9fyfd76nnsj+0stJ3xj1K52d55Yk+BSd5Qqax0uWD4mXla5ZzDBY9zdmqkwimyoLBCX0oH3U6Dw76G966kbbawREvdIIbkiHPz/zrNFoQ94lN21UL5Kz9W56eG9jtzATekP8GwfzOEquzWBRpwau8n5gQSMHDWnsf+LKLA6uKQ29tjWjpms8jN/QMePJpKemdfm3MujUil66aA4FxdTv50x6K1brw40GxESftA59Qe1TwHNA5rMZ1/7Nz3U/TteFQSTsL8+w1lvANG1x75sAUizuFSuddByuZy16NhhioNVAx+ba04yxm0ApydJsE5BsF2lC4lYwd7RyWA9wTv0Lbvc1int7+xRpo7PD1S34cpjb2UKRwnK34PphHvfQEPHFVtcgGQkvMWNq9hzkg+3CUpi9V5b6dE4TeJ2Ody6RHkUtC7fov3BjQ4J/O+SqnzgUPkRkSgvK95vBR0By3UndIZL+URKcY5DO6yIn29UxanL2lUxqPZ6paUlZ3plGPLQy4EBw7aNnJtdw8oCzqn+WtLyRay9wOk0EjyHazxw0VrR5n34Y4AOPx6792cb3b4vg9P1pwPuWN9ym20IAc4kFy+381h/qyHbANLS63/pNKz2zt7ge9R4d8VwdLvFsPG08ZMTBFUCF6om0AdoPaeEDyFD0hdBRzCrFDasyrDEEgDyNoa69fbA+8K92q+cFLpFMeIc37OvO1vvoEGyqRN88ZjSqkkCWrNU+whZQVfCT67eRpTVR5NliIWvvHhg895TutBe+C3MHV7ohXKcpqy54b02E1/uuTSY3Jg2GsJOMWatf3OmAvC05M1je0guKSoE5w1fOapKqWE6tSDxPkuf3P3gOMri1h8OuKsY2tvTJdmJSis2WmbUiHDviM9kxI4+Z1pE4cwuv09FTaABe0h/Z/5++6vd6Z+9twQayf0sLAPbl4voj2MnOcBZ6/Zzb9f1VPQkne97opgMc2GErxSBYeLgkdYHNUsDyoa78gos5QOxVJIKdAiHmtWxmx1RFMFgy1TKrtrqSXU9vySuRC88a16oVZfE/W7eRfTXCQmUoiVCqH0Jpwwf5gueERNdNY/15QyXoVIBnFFCWnpCv2D7RnJhY0r3lnnfuca7sj9aMwlGOYdFQ6bFFYIzxeP7xeLFc9auF99+6IgExZYWtI+kcOUBaEtYKuKNVyvHYw5tWzBgnekaeTG3oxpZ7SWvtnZp3u2wN08fdKebVxOhD5g+t+rpb48hHAL2sDhur2z/rCaqKg1lTmQgh72T6xHZahoKQHLqWYnXErpcMMqm4r31TtAmB903RXBoiiTmPClYF0aVAyqwLD2NMEz8H6ebyomtupiZm8WcV2mTXkOJ9viUKNVFNw8eutQx9boDqFypKgQAlmiUTr6Ard0w1UtWBSd70CqlvfnlKhLo7PFCJQpG9sYLbRzMd5Tykooik7EBFf9CdAvUoeBFCKKpszCeMKZ62/TbJzhTWBPArEwW6TQ1KXHifv8uyB8Asa7cgXxsht8mKbBXJat5XDJqvNF1RbNTBM8oFzfPeCJY0cMmRMhtpGDacf+bMzKcI7PkgvTYN48fceu3QcH83/rUylgnnId1gsONBVticwl3fba+2dRckgois/DE8eCz4LSeZn/u9qeZZILNZp/jKkEd1FbvkcedlcES68/TykhzqS6lbOUp0ux5AnYYhPHpIuMW5OvOqfzWrcvbh1CFYz51cVM5RyJjKsddWl+RZeZdZYy6HyHMXg2Fqo6WB2iQIoJUQekObrVJRNn5VTK5dAXsHJ4ErlCcdF5hjV/LySdQ8YB2JjOePL6Hhtv3GKnm/C/+oBnZzbl5e2bvJQy55sR15sR+6JE70pDT+ZgSH9S9EpTAw3Krq6HxMS56qpk8NaoBVA69XQxUyfTD+1OZywNaoaDimnd0k5aJrPE7fEBp9cEEY9z+o5dui/4gTtOmDuL9cOFfWdgzU8Kd1jbWV1WWAAlGA8rD0t1+5/TAwjzrrzrG51u/poMKRQQI8E68eXkyX8qvfv+664IFu8cy01taZQ4muBwmNjJcuo0h2YVYdpZRzgUfUdMhoD5sku6yjGqKrpcmMF9Z7gIpbTkxh7Bt7EUAJbn5oKkdF1nhbceImpJI5rEAhT7XTHnUqQbFb9fqFUdLA00IgWh8vNUKCWTu7oghKSc2rnNR6/s8NQOLNKwP1rlc/vX6V6fsDEI/Fja4EOuY3v/gG7rFvvH1nm7ajjvHRdUuJFgP2c6Eut1xThibIZ8CEP3geOLXGG+iO+gflha5625V+79dDojoywPG3arCeKEru24vntAOmHy6P7n9Tu2oVH+HfVJn0qp5nfIEOCO9Anm+pJUOGOHwVZ+jv2Qd6R7fT3q3lET9Tw546kdBqUFmYjJPwyw0TmSedenYSIwqD0pmTCqqkzxltV6GlKO4jYlVIVh7Rmq7SjTLpVOSa9FEKqSy3cx4pwwGtSklBjHVDr2Quk7W09ApBSIMKxrg4bLC9Oy+xtxsE8XHJX3zLr4jlPbSVFOeof3jq4rLIKC5okzxSNZWcuRs1du8MyVHR6c1iz4RfxCgMoTYiRMYHdnTL01oGsjbuBZZ4CrHflCx33NlJ8gk2phNqzYrSuuxcTNvTGvpAlXxDEJFV1VMw2O7CtaFDQUuNwQtR6Onuf6QErWB/GxI2pmZzxjY3HAJbOyMdXk3piYZtShoqfpOyd0XcT7wGS6XWTF3qj5/QkIpWazms4VJrUraZ4g5DjmYHyVqlpiMNgsdUbEeYOFe5HaIe/NPlYtiGTqg/FPF+xWm9nv6yXPcud9eI91encEC+bYosHktgNv6ISqzAVg5hACVZH9xqKAbHMmdj0d2xntu2hTrH7J1AGqusJ3jtksMZsluoKS9Z1y50renpXgIQxrpm2HlhzZO6EpNBMtxboPgsRyYkXD9L0D7gjUrFjTMmZql7lnZ4+HLt7gsa0ZJ9wig/oIBHChIhcOl8sVq9WIg9iy3jZopyTtkOAtVZllGEGOgiwIo1ZpmHLM20L8uFYcMGWax/j9TDcyjwCNgekgM2sqoxalwHgQmFaBaVXROmFQVay5inCwz7fPf4/81GNc39vj1PoSL7x1w5gCs8i1nTExdzQFUcuZwmh2qCa2D66xtXetbDwFHSuLed4rUpNce+8NGSyAQHAOzS1Jb4G7Vhqv9v11VRk5E1egdYd33tSZCMF5C7xSz7k5+GCbGfRlni/B5nBZ5ifdeyDHd0ewQDGC8LBQVQy8n58UitI4Tz2ygn2WMtOUGHfCuMvFC0rMvKEUiV1K5GxsYc1qELRz1E6I3pFyoo2ZLKZ7QQvBz1kRWgc7PUJBR7TURDkr3gV7XaW2TXUpNrWHS3MxaLAACwLLkwlnr9zgoWt7nJtVrI2WqRbXoKBiRgMxtWZO4JyyQM12N+FkGlkRHg2ByJLJLhOyp51GfHakoOAFXbLTbyEGRguLOHXE0OKqgE6t+WaNyoALoJXlL3m/iN40IZrQBC+/fp6NB9dYbJa5sLvPsdVVO42DJ7WRrf0p427GQnOY7tjWnmjjGESsd5Uz5dCwfkcXCyXfIkxFScl6Hrnc40mxQbKGZmc/f96Mtd+VkjVs+7NAysY0azsGtcmUTX7s5kiGat8Bu6Nno0oVKlADXyaz8buu0bsmWLwThsHTeF9MC+zmh0J3KBkAXvpueaT2wuKgNhZyF0vdYuKmaUyQhar2DKtAJULlHLW3NGhnYgtlUFvn3ajxgvdKVdKo0RyNsa78LGWK7AEJUDvbnbqYkWBNVAQqEVamU87c3ObMpS3OHignF9cYDk/CYs8xkfL/zux3SuItyeg6I6nZjhODjDOIL/l7y5wi4xpP9rbgnAhxlgnBW13WCjkqOYFuRVN/BiWPM7JnAqkedNCk5FmCGpwhK1zI8JGj93OkWeLopW1uLqwUXp6QxpmDScfu+IDNRePX5dQjh55AjfhV/rvn3ySqbVLeYW0BgdrbJuIKDy14Ry3gJZtZCZa6BmcsYu968w7MP6FsYoI3LwXUMgJxVKFC1aFOiiWVMdp9yQb6nlDSWNjSmVnbkTHmd8zpB65PuIuCxcwlIGFalIH3LPhC1sOOeFdIk5qVYRUYhMAmihfPzfHMRFYl72y8p6szTTGvi7m36MmEIHZ6YPLTUHnqENAsiMdUeHNBlBJcoMuZg2ImkcmmdxFHVye8eLTNLO1uc3LrNqdv7HF2omxWCywMjlBtVGjfOBNfGAB9gNruZ+4lFMcVWHINb812IBQGQrTdP9c9gRTSIMFUELXAIAodEfGCBMEFOewhzRRXFZi4Nf+0LtvXpi4TxKOtErvMdJC4eqThiFvE3YgcGS1x+1vnOZUy570QkylCt/bH3HfUoG9xHjSaDFgdg1Cx09Zc3TPKjHH+eupODyf7eTvALJu8MQLmcDjYv5pvgvW2fEGMde6B4AR8UVwGqezfvOlggoOU7L/BWWZQl39zRUDn8KXOFSbx3dfoXREsWWF31tqp4u2N9QvClX/3KLWz9CaIMvCBhRBQlHFMrA0q2pQZF/Zo5RxDZ7lw9460SFhowhz3aIJjVAUGVUDUdr3Z7pjR0rA8RKPhH7SRXHvaqEhwNKljsL/HmStbLF/fZmOn5WRbsTocMRqcILoOsFopOXAFlqagN9YvdSTJhNIw05QtmFxkwVfszCZIfWeDEVwCvEOnoFPBV26estjR5qgXAtklJLhy8ggMhdxlSCbK6g3mNFuwxRTRDKFx/MnkCk/de9qK7dag14dPH+dvvXWF/7R3nefIpDZxbXePlDp8VZmIq+gZnVMWmiH3rC9yee+2Fe5zlrj0gFRB0XrFZ1FMllNWc0ktVTD+nSGh1lS2gGsLgKA5I/EQklYxgqShmYfOPJQ6CQA1eyZXKEUp2r3f6+5yNAyKnt2b/BZMB49aUFin3PBwpxDEaC+CMomR/S6iYrBvyqZ2NG6VmdFF1eIEIjRNzTDn0kcR1pqaQSguMk6onafd3UF3rrOyeZQ0neGnUyY39ohbu9S7Y+pbB6xOMiupZrlaYKHexC8EdJjNKc2ZS0mctdZ/cUW91z8or+CsPhGE7Aopxh1aM9VVzX5qzWtYHFp2Ze16aowabOvUsD1R49B5iF3ExZ7uge04ydJESUYaTVi/RWKhkhSqyixFvsc2/3jjKfLY6henAjcS546c4hcXhmy+9gbPxcz17QMjQxZxnSkzbdGBcs/KgK+IARxg/DhzmSl/R3GlYevlkDOG9kYhnrnUTmDOSu6/V/oOvcyDUDGRjtFYU5Fmy9whJ89FZUZqTdiawh32ft7tuiuCxUh4QnRKIpl9KZ6q7EaKCcSyCE6h9tYvmaTMJGX2u1QsWQ+loikncjaipSuQZFVsWZekYqkK7HeR2omhX95kyUPvGa0s8/Z//+/4+Pp9DPaEuN+RO/BVoAoNTlaQ4PFNDc6jzpIFK/RtByvqbogRp55cegDOCaIefOlGp75cUVQT3hktfcE1ZK9kb/cCtd05y7wwggKDIxmpLIXRrEgqasyodppkQXKPFJfiD0P/co52cpfFdqG9xalTm4ySQ4mGXgnkidU5J5oVfuHpxzn+2hu8fXOHaTelaZYNzaKnrljf5MfuPUZdVeZ3kBJtpjhLmrfCLBv7QdVEf1GMSJvFCviYMzG7YjyikCCSiwzC2eunEG7pUXx71qYR86XR7OamHYVpVwLrsDenoiWo7/aTRSzS22QmAl56HF6ZEqmDx6vQFap3wgp/nxNtsg552xVGsOYSIHYj+nK6y5ntaaISU1VWvjILpOL165x5TAUy+7/3BnJ1gS9dPc9fXXiEWj0yCEgTcJUz50lxdmI4K75zBvEOJZO7jASHqwK565AqEEIwik0ujbNcUrK+IajGkk6F/VhVFYLQaTRLpL4bHzxSOSvqy4PNgp1aGVQM2qYEjaizVEoOfbwkFYGa+UaZHZEmXCW8MN3i0+vPQGeL0CXFNYIMHDrL6DiztFLxqUcf5vztG1z4+ks8/LFlqkF15wNFxHF2ZYX71jfu6EVpMfk+ZA3H2OF8MIaEYLt/Qb60pFAxRfMu61Jxd+vZ4+bOGbMFX5fN7jVp0SypZR6z2KGYz4OIM9aFmkFJmxJtEe5lhf/P4M+hwReR/xb4eeC6qj5RPrcO/FvgXuBN4BdV9bbYOfZ/A34OGAP/WFW/9UN/B7bgyNBUwXb7YqqXVJnGTBBrFJq2XcwFppw2wzoYcxhBJNAWSkNpeRiMnIuXlwjJe5BMVQVC4alLtt129rW3Wbywx8bJB/nK1ef4dnuND66ept9xenmwiEOlHPHiEG/pQm57HlIxwSud62yap9J5DqW7bvVAf/UEz1QWyyA0TF2kiaE0Rp3l8mqMBTUhTEHT8vx1IfahFri5p/SIiNUxJbg8fYcfYgV7MmG64Djih+SpoVsaBG0cREVmxSjjoKOi4vHV4+zsT3j9V36H4598lqWzx+dN276JaK+pD5LCNJ73WyD4CsUauc6ZXZG/g/2NAsHcMmVYl95IqU2K+ZhK6c5rT/f3hSxpqajmhDgtaGdN76nsSu8GdH4q/epo8K7r9Ec5Wf6/wP8D+OU7PvfPgN9T1X8uNgbvnwH/J+CvAw+WPx8F/l/8kNks9lILTQXTUwPUwRlC4WwX6XIilEy0jZa3Dp1j4M215aDwiOrgcckktSnn4vaopYNuYylyspy5dhm8IWJOofvmGyz8yU1GgxG+qfnI6Wf4gze+yJnpKkeXVgs/yRna5Ap9ReznCwZEuBCsPig1Ad4Tu0hVO5DePiEjlUMIlib1TFspj60Y7A2p2Msta2GhpHU2FcDImQHxkFM8/D5fAs4DXQFIgqdNrbk8iom+wL4OZ2lt6jpCFl7gBs+eOYt02Wj8leAGHgkCM/M/QwSZWj1AhOWFAQubFZf/4BvcWFvknp/6CPXKggU2iel027zFtFevujnrV0r90Vut2huR0l13hvCVFWL9muJDVjrvznmydmju6JWwTux9ikA3U0ub/aGrSE6WWko/8UAtlS2x9p7XDw0WVf1DEbn3+z79C8BfKX//l8DnsWD5BeCX1Qg6XxGRVRE5oapXfvjvObwp6sz/K5Siq648EiOi4IvJUe93O3AOD0zFiI2+FOpNENDARCLqIEUrbnvO2SzqnM/UOKF68S2Off0miwvrxRy8YTQY8bH7nuU/fu8b/KPmI8bvCmo1R0l4xQs4ZwHoxDD+FHHY7uaA2HVEhFCBrypbFLmgQ67YPeHQaLxpK0QTy2HEdhpzxq8bQFB7C1SMJS0oUuxlNBcmsslWbK6JApLwjYeC8kg/Wcl5JJdGn7N68U3d5WdXnkInilTWA8qacFMz48vFPNCABBN+qYKrHadWVmkHyoVf+W2Gj5zj2EcexoVA2+4wnt4sJ7/O2cl9S2DO+dKMUiTSxUtNtFgYSfk6kWLgUb4m22bqncc5j3PBTAOlkDk1410F5eeiVrU4HwpEbX0018sdELr4F2+yd+yOALgKHCt//0FTv04B7xksOSvTWcfioCnryDErg4eaIAyAEGyeihkdZAJC7c25MgRPBAKRQRXm7h0iUDc1VRdpnHJAJGVnrGInaOngu1feZvWr11hd3qRqqoK2GTBw7+pJ3jx2gi9tnecnjj40D+qcE+o8Tn1pmLmC1vQLuC/cM66pD6FasUVoLftC6VcxTbpVrKbgc8JyM+LW/m009OxaK3ypXC8IRApKmHJCXaHKTzIkey05GuLkgrN6KMMcu5XyukV4y9/m3oV16uzIbUJGvvyeUmMlbPxH7aAqabMzZgAxISNHM3Lcc+9xdm5e4bV/9SabH3uSwT1lDEWRFRjCbU4sdp6Wk78nOfbs5DlzmHl/rafQpztqnqzQtjODx7GmY0xKVVXklIhF/2K2sLn0agpZs9wJFLoiPmvj7F3X6Z+7wFdVFZEfcoD96UvumPw13DhCUrMvqmqPiLkGuiZQFYBi0Gu5ncdjvZbKCW02XL8Wx3BQ41RxPswfBAgDCXSaGQXHtJhrezHBmL52keUvX+DE6kkG3gpx70MpIG1xffzkk/za85/j3M5tTi1tQmO7rKo5SJIFTaDO6gfNGRVr1mUVwtATx1NzaFHFqxXqlAWTUkKSzomAWjyvlusB5/M+gnHLfF1Zn6RLVE4M9iyd86QJIaM+obE077ANATWIVIL1cpzz5K7ICkRRB98aX+LnzjyNzjKyZMCFb9V4ab50zyuBILjaoHAXTI6tKUPtkDbDTFhlgeWFEVvffolb347UTy4QjizYYi8wbz8FQOh9w3TeXEvRlKx9T6oXhgUXCoXfOGEZLWqDYO9VwUugKosmA+Tyu0pdJMkspGLKZZqCBU7jw58/DXuX61qfXonICeB6+fyPNPULQO+Y/LV63znVrMww/b2jIgdXJmXZoggCA2+zWHw5YnunR58yTTCo2Ze0zNAwh4rQRnOOaXwmVZamkZXx65dpP/8aZ1dOMKxqfFVboed8GVenkJVaaz5x34f4te99kf/16OMMdWjeXSVNlODB9YzWUsP0Ml6x3otrAr1TuPbS39jhQoWrPbmd73PltFEWw4i9bt96Ea44VBYOXKIACBwSy23LMjMIhyPnhKur0uw0jUgyhVtxsbdUbYsp6iLHZRFqQRY82haGdACJiht6Ox0GntSfELFgtgNBK8HtgB5EGDh0nNhYWmQ1CFvf2mLc3CQ8tkE4MiJpJsaI4HChAg6RTDBwR8ROw7JW3kHj77l65hhaHF96R8uihhQVO3XnmhYLUl9+pvN2KvX/3ntFH0rk/vTl3vVf3vv6DeAflb//I+DX7/j8PxS7Pgbs/Ej1ChBzsSwqXKsuZmLZvb2zMQRBBDT1zHJQG3w0qgKNd5aWYdysphAna4FBMbaoC7Fx4Dxy8Sb5My/x4MYphqHCO5sp4n1ljicFZvQuIN5zcukoK5tn+P3r30WjVdHBVVZ2a5lqVfotztuD8k2Dq4ItMufpYkev8NNoHXaDSA/TISmsQ+8cQ1+ZZ1Yq3C4AVwibSckx2s8zApUtnA6kK+5GwaPRHGvwvWqyONRXxUxDhOdml/jg5klzwRlZUOAFKiCARpAkZA/ZK1IbJq9kCBkZONhX0n5CWsi7CekUxuDGiSPNKmfqkyx+a0z3B2+Trx8c0ol6OLsU96ihhb2V650yZC0gSD/ZwG6HUfJR+7vrSzInBTUzEV7vwWaES9vMgi8aFle+njtQ7h9w/SjQ8b/GivlNEbkI/F+Afw78ioj8L4G3gF8sX/4ZDDY+j0HH/4sf9vPL87M3Xm5EFmXOISr/LmrpVsDRhIAv8wa9F+pCvAwihuKgc7181ozX8kZDRaPK5PxFJp95kXuXj1K52na4qprvSta4oxhwW/0QqpofP/sB/tOLv8dbty9y/5F70GD08xwzGqxwzCTMGsuVrrZDouJdhXctOZtrjJdAkt7Q2xgHjl4K60maqSQUf7OIxNA3GFBvxnMEO2W0Kybq9MVwcYgpIIbNsFEg2yCk6tAPeCaZt2fX+atHHkcbZ0TMUhuIQB4bRSZ1GVmq0MbhKyFuzUAUt1aTdzrc2KDqGI014GpruJqEAdwssuJWWAur7P7JLlfSBV4ME7r1mtCYKrKy2RTmy2arr4BYvaa6GJWIEMucnR767u2lYko0laXR3tu68EXaAGUERzRPOJE7JrSpOe709dCfKVhU9e+/yz/99A/4WgX+qx/2M3/Ab8EmP9m0q8aLdfFLpzkXJ8JKXBlEJDRVZbQJV8ReWtipQoGMS5ML2z1DMPRn5+W3OPjtl3lg5SQi3kznxCZjSc9JK2le8WCgCgHtIk3T8LH7P8TvPPf7/KOFNRq/iHMGTSbNUOx2ehZx7qKlDKY3IlQNsQwphYh2kHu1nuYivLLURlSoQoUPA7p2RuOCpVXel13FKCVknb9OspCIeAkQFWL5+vI1mWzcK+cQsQbfa9ObPDAYUXc1sujt1OzKIu/XjQcaIzCKeNJOawvQGifogQEnfqUhdA6dJrRVsjM+nCSskZoU3VcW8gIPLixz0rfcmk2QjSFyLDBT5fwb17j3ng3q4MveUNKsnOlSNBa6E9rWGI8x57lJR8o6h7dTNtfKFDtiaRWkbDVoVsfuztgYygKzNjLrIjnl0tz+wdfd0cGnIFfeSI3DyjGsfJna2zcDmcuDUcrYawg4mwzWj8R2PZuXQ6+uUr9c/tYrTD7/PR5YPUPwA9K8uKQU3FoWelE9AmZrCp0o4jxHlzY5cvZx/vjid/jpez+GUKGNcddyxJpfGNmyR3esCZhx6pCMGXY7X2BNSie/Tx+sL5+KaZ0PNTHOaGrrXWRM4kzMpd9jC8mJGKwu9vPRni9ljUxF5ukJPX9OlRf33uJvPPIIUgnqMqkT8rgzaBgMHl/wUJWFlpJRQxLkEXArIrmwh6cJ2vJ+K4GZ2vt3guusUSoqSAeaEotVYNEvkV9Vbnz3Fr9/43t84G88g6BMZjMWhoN50FjxVFsvLmeGVTDun6Y7mpzWhVfKSe09OUd6B//54lGbroAwV432hiijdzAR3nn9WWuWv9DL3mRm0ASWBhXLdWCp8oyqQFUkqOYeadryabJpWwWvMutWVdpk5tNgJ1DtvQ1wFc+VLz7P5Hdf4fTiJtkJKubf5ZwjBNPEqC+KOe/mNrKuCJW02DGJFx49/givDisuXX0NYrKFm4ygJ+rKgFH7o0VU1vcJxIdiyOdRX97XXJPu5lws5xxROwbViBljxPm5e4v1CsruUHnCoEHqCuqSbnhB6rLYHYWFW8AH1w+EStxmgueAI8MjphFqgXHCBUd2ivrS4OwLgZGxilFFa4U2m75mpmjEuGiquCgmCaj7RWrfI1HRrjRfZ5k0SeRZ4trNWzz36mv8/CNPciwMGDUNywsLpn6UQ5tWS7APnXF64Z9lYWVOpvdF+9RrYHypBe1n2IQxmE9N1jmsQt/Hf7frrgiWfqG0XRndnE0GS1YcUDtHE/whPq42HqGfMDzNmUnMtAptQXmyQpuVWdvxxme+Cl+9yrmj91DVI5sWHPqMxB5AihFyf+xjC7sUg10/KwKHSGDQBD74wMf5vd3Xmdzegi6hMRn9pvQy0izawywsXNdUmPMj5NzR+4Dh5dChxBdWrneGkCksLSyw2+6D9kOHXJnmBZoUkpK6aOiWWkNXAqAZV4e5qV/feAOrAdQ5/mT/LT509F5cU6PjjM6yMZkDUBqWGmw5Ogn2PrWkfSOBKaRZNiZ0FohYZthmXAdpbOBF7rJNL2ttt9M9kwO4Snh57ya/deMNnn3iCY40KwzfHNumMGcA31nomxdc7/qiarC5fa3dw65roYAsWqKhDwotAE4udZArKsreWP49gDDgbgmW8j9JYZZga9axNe3Y7RItRlVpnGcheIbOMfSeobehnE6ExpvCcuidIV2Y2Gs2nvD6v/1jFl+dcGL5GM7V1FVDkGC5rotkUbP2VHOjD/RabC2+yoWPlq3wN7DBsb6wwuq5D/LVK8+TZzODUftmaBYbrzc1WKqfAe9DjbiKLiZy6lBJRr13RpOxeLSASWWnXAiLjLv2kOZBWSRibADrdKsV/jnhVHDZ4dTj6xpndp5IOYmksAVaidzcu8p9x86ie5lcGaqWq2zwaihSbWwH78YteRJhlskDhbGi0f6gmBlg7HOgTI4JpwlfGbKm2VSmeKB2aIAvXbvA89vX+MUHn2JDG/I0MzwAvzOjd2kx0qXvV8n8lMg9d4xDjX2PKiomaaagXv01t2gSC45UVJFdioWp3pue/ODrrgiWPpfMGIXbiWOWle1py7iN5bQws4qFumaxblioAyPvGQQDBKoeOcOREPav3ODCf/sFTm4HNpc3qeoheI8jEFyFpypWRd0cWhQHImo1hZruQr23hewFZxiqoWPB88ixc5xfW+TqpddwWdEukcvcEe89UtwqiYV/VBYOwRvsKwYmaBCSJPr5hkGCjZFwnoV6yG7XlgdcMCIR0EwuM2gURSUbiqiFyRxKE7egPoigwZcJyJnX4xb3jBYZVSvGEAhaiInFI612tjsHOymdt3orxQgTRXczeZJxpabLRTsjUZE6IANfTmhDMDUl8tBeX0T5rSuvsKUT/vYDTzBsHdIJ7GYaWaJ+5eYdHXtjJfTdexOs2ZPukVKLz8OuvmY7LRL9/Eg7JVPuvQashsscchK1oIjvdd0VwSICTeUZlvHNddFqW8B07HWJadZCxc5lnIF9n+fQwE2z0bm3vvkSe//665zzm6yO1nCuMtTLV7aIS9/Gy4CYlJgiSRO5S7ZDlxsqJSWzG2syYjNRsrSmCp6n7v8QfzC+yOz2DZx6HIIvJ4lzZo1khXgBJHygKh5pmg3KzQW5kwKFWrfd5rovVkMOQgKnZVGUxdtUuMbm1rg7eiioNS/VlR3XH1LzicVYrgq8fPsCz9z/sKVcQSCUCQWmrjNzicxhk25mqJNDYE/J08KmpizeBNqZWEw6RWMmB1eeVyJUxi7el45/c+k7rCwO+LkT5/C70QRoSY0JEGFhZ4A/mBV2cD8Ps/SwnCv1R/9289wnGSgnRp77UGsBj8D+3p/Yh0FUysvCNXyv6+4IFoxmX3sxQwpMyJWKuOvWpOX6tOXKZMpeZ8V9qzCJia5UscF7ZDLh6n/4IvUXLnHfwikGVYN3NrLZTC/KcY2hJ94Hgq9JuTPzhGDuKWWiSSmode7W0kGhj9jDcBJYbZZYfuApvnbpBbSbmvlDgYdF3Bx+7a18zB7Wl4lbcV47zwt732PN9pAHoWZfMrhs3lxCqYV0jv7ZgWP8snlvQkvKlszk2xqXlsJstfuEvM/mygmktRokxcIFqMoJ67EivsDOUgsaTM9CAsnmYaBdKYuzeayJQp5FSIJ0xp4WZ/Dx7f0p//LVr/Lk0eN8cv0s0hoQMFdIiqCTzHBpFXnxKj1Kdcists3QzBf7FMwCJ/VBcsepUaAZuhiN3oIpK7t46Kjfjz8HmWtk3u26K6Bjgz1tR1WFSWeFXA8BT2NmlpShdyQtmpWUUHHUJIbOMX3xTbY/913O+A1Gy6uIr/F1MIEWvTTXFr0L3jhSPQKFEnNXGnpmGCdZ0LIrO5TsAjGBl8JvUjU9SBAePHo/f3TzbR66+ConH3iK5Iw+XkLN+h9dZw1T5whaWZDGDhdqJDuscCmDmLwvehBPTWSSs6U/3norGjLZBYKLBn8nN8/XQU0T7wQXjEuFs3RPsonSXty5yNNH78Fluyc5ZtxCMElMTEhlHgVVZSCDpox4E9zlmE1MFpyRPgsop84Yydoq6sW0uhUWAEl5c/82n3nzeX7u3OPc36zPuXVSOags0B0ebcC1ntGu0I07GFXzWrG06efEzMNhRLZw5kmU2gaUioFzb1USywSxAq3MU7hY6C7Ke/uG3RUni2ICry6aXqRLmajQJmXSJSazxPak5eZ4xtXxjKs3t7ndJrbbyOzabbb/hz+k+U/neaQ+wcJgwayUiqu9U1Mgqsih72/uC7/iKuICwTWkWUI12liH0iNx2qMtGa9GU3EYqpXUiIqVr/jAAx/h98ZvM9m6jM9ikHJRVIqabDfkgkg5oapq61u0rWky2pY4npC7rmg1THMRXEVqKnJujQhYefC+uFt6cgzmHKNGf0HMBihrsjrCWRMzdRH1ZhF1afcKD6zdR9qL0BfLnaLe7JUK8YpcYfLlYm+WDmzGjWQ7bVMbkdL9ttnY1tiVSixF9A5tM8/fvspnXv0Ov/T4s5xtVoldNHAgZXJj/tbiBPE6t41dWj+JPHfV5MTSnyB2mqqWQbkllUJ69x7mtUoWo/ykO1oMh3+34LM6q6+BLKV7r2C5K04WVUpxG4oepbjKQ0nJbM+YJWUSlfFkQn7zKvcdTNj48hbNYBG/uoGrG3CVudcD2rsfZkD6wHDzxhz92q1MB9E4zyxOoYskrziCpTa+UE2w3bPXrvTjuFUdy8NFlh58lq+/9Dw/NlpDFwaIKH5QW+c6WwFvC7H3KgOdzgijEaCEZmCcdHHkbKmHI+CqhqgRV0ZouEIQJGBoG5ns7QSzuC5jNIphg3KYtlzotjgzGjDwS2Zo3iWzQRLmwilNJW2cKko0DY8Eq+m64pbSlZ8di44nGRzdz45RVdIs8sWt13nj0lv842d/nAXfGGfSi93Dylswgo3lLiImAapQs7w9ZOvmmLQ5Mil0qVHmELgWxUGxa81lFksuQIDN6LHZn1qGQ/Uo27xe6VE37Qmp7x4ud8nJYg+qmHLazcceWFM5Foc1y8OKlYWG4Bzt6hq7Wy0vff5PeH7/CtEzJ1uSk/n0KrgMXq2VRUyWhmjp36j1KDRpSSccUgWqekDE4VOGnKyfEROaCiFRsIApCEqhHlM5x4Mb9/HyxgrXLp83gqAAMRb3k1ILaNHGi+DrYEU9ufRYpHSjpRhhWTDW9QKT3NoMmKqwcZ1DQmVHpxMoOynB2+kTAq4OVrALlsKJ8NLtN/ngyQfBK34kUHvyyDYlyT1+bxoVatDGAk33knHQRJBQ6qHaDs88y0g0loSrbdFHgd+88By3r73FP/jwTzAKzbwvpLOERGuWukIh1gF9lNlCbxNr68fx37iKxlwM8OxPFyMpRjMlKSeMzRRNpBzfwVKORcIwr39U5yBADwwl7bX+7051gbskWFLO7E5apl0yIwU1t49ZG80L1wmDYK6SIThmGU7daLln+SH2Ftb4dzde5OXty+QuW8qFJaMuR3KKRgd3DvGWi3vvwbt5KkbO5K6zI987qrpGxRsMWnYlcp5btqo3nUouzANK17/2jqce+BC/e3CRdHvH+h1ZCqnTlz+KJhvJ1tQ22at3Q+nXtQ1unfdBGQwaZjoz+yJfdBpSgoySe2cpmnKdQ6XWKyrdbcnsphnSHrA5Om59EV8cAGZlIcVsu/SiR2qHFDhWWyWnEkxtOY2DQ2cJ1zHv2OdpRtvIQZzy7577IusH2/wXH/5JgjfzDSJI7WDBoY0FaL+RuCilZir2t9G6/JuLp9BXbpFjsr5ISYtjtlQ9qfVLckrvOC16a9bco4MYt2w+ur3cPauNi3rTBfQ9OpN3RbBohnEbSSLUhSfoHQwaY4/uTSJb446DWWTaJnwXOT1uWWgWePzYOX78vk/wcjzg37/+Za5t38QTrF+SDAmpXCBgdjimUSlFel0hdQ3OGQI265Ayj7KqKuM4Ff1HttykpAKli11OQqt9BBcqNkbLLJ97mq+89U3SeFpy5MLi9VYImw6loESVpXVZs9kcGVdjjo4BLDZL7E3GPYxnp1vAHCeDMQKQ8nOlP5kEVwec9/jKKB+vji/zxNoJ6KR4FJQhsEXpTLJaTaPBxDmA7mMpUjLNidTl63po2gnUMmcrbE+n/Ltv/SFPCHzyg59EnC/9mQI1l8KeUF5nwCg5mi2AyzwhV+qmpSPLrNxwxJ0pKSZmXUfblcnWOaPpsKsfi81Sl5KdPimZYQVqzjCl/9KfTjEXn7liuzTHo9/luitqFoCclMm0QxZrlhvzsJ3FTNtlkgqxywyDEfZWdiYcczWXqgrxjsYFPnj6GW5Nt/nNC9/mzNbr/PjJJxmORjQlGKw4LPBxWSQo9KO4g/dk70lF8SviCL5hMp2ZfSRm9iBis2MKE8z6HEXoZ7CT8PDR+/nSjQs8eu111gcPAQGRjASQ7K3p6b0Z2IWKrp3ZruVMSOZK0SlqhfpiPWQvd4XpUPoJKRbSpUdcJsbOtDelJ9EPnhWB8UB4a5T43mrDsyce4joLbFybUkdjHuMEqcskMszmVQdG/ccdQsKGIzl0psjIaiCyqSRdFq7oPp/91h/xyRPHefiRDyLiSF2CTvDRZAUKuCzGi4vWj3ELzjQzTc/29mbAMM1IIxw9cZLd75zn6tPLdNJrVYzi4p3MaxSgmFgUjUyxxe1Hjb+jAVn8zfp1oJKw2TTvvkbvjmARwfvKUI2YaeqKVoRZVhabYB12EZabmga4//XrVMMFXBtIYoW2OsfRhTU+/chPcf7mG/zyq1/gI0fu5+ljDxFq66bbIg3kkisb8QOiQHRiTcdiMoG3Tn5AmGG9ACl2oBYXpc9AgUAVhETwtiU+du4j/PY3P8MvbhwnbKyjUgaT9tJjMeDBS0WrM3LqiuFCLAIw6MdSj1zNm9wmi2k+ymgaUhcLq1bLCHGM3uIcNIG9Bce3hhO+2d3kufNvIM7zzThGU+LEcJGPPXQ/H4qLrF0+IIReSyPkOpmxxYGlYCJi/ZdYGre9wyMY7KvKq2zzx9/+In/9zAOcuv+xogY1rlkSoBFcU5H2OlKr5R5bCpHbwhJXKdPVBJcgV47kMn5Qcfrcvbz83GV+58Q9xWnUcuJCEjeuWQFscjb3mNILwHgdRYN/Bwdszg3rETeUnejfdZneFcGiqsxiYtEFWlX2u0gssGwIjsXKs1oFBsFMvo9d26MLQ0jeSILe7Ik8VpM8cvQBTq+c5DuXnuPFl3+fT596mpNrx41Bq7HQzzMZD05tjF4u9j6F5gIeYvGcKs2ErptRV7UxWVVLelUQtwJtOhfwdKw1S9T3Pc3X3vg2nxj9BDpwEI3aQmm8StHr1HVthSk9vHOI1mR1dAo30r5R62O0hVvkLBICTjOpzfb3piYD367H/Nvt1wnZ8fqLbzObzqBt2X7pZUIIXF5a5vnX3+KLH3yMjz94mge2Wo4cKGs+4IOQ9xVaRwhGsZfWVqNzFZmIThLSWeP4Ob3Kd9/8Ln/r4adZ3ThVoFmF1npaQtm924xJrYFi0tHz3RBBs6WAUinaKLJeoSOTDoxCzSePr/HG1g7fWjt2SFUp7PBe0zJnGEdLjecOMRj7QctpY30n5lB1H/sxv3tlclcEC9gGsHvQWld6wc0hZAHzBUumka4mUwZbU243Q3uTPeO0NJhsJIFjdbDAJ+/7GJd3rvObF7/DPTfe5MdOPsbyaMGabr7/PlM6Omxoq4FjZmbhqgqXPTqbgXOE0Fgjsd+0ipXOfIZjr5PAU/vEI8fP8cWbF7j/xlucPPtwQabMPikhmBZejZKSCs9r7gOMfW3l2HDLLLJutj9VIGmhmTcVlObt+WHmzbMNTy2ts/+9t/h/X/wTtrb3OHVmg8m1a7jBkNxO8YMRUjn80gKxa3nx+e/y/Deew08OaHA8c/o0//CJD+NONTTXpyzOomlwghBS4YqJwIGSJPOlg7e4sv82/8XTH2CxWUfHIAPmUL0RKrGTJtj7QRJaOyRavebVlWkBGTdwsOiQpQoZmQumC5Yera6u8re3r7J1sM3rC6sAJTE8lBH3JYf39rMLmjz/vFCygAKEWDtND7U/717f3x3BYjuoMp3FIt5SKi+sDAKu8tTeM3A2JdhfuMlUK8ZqlP1MfzIkehNpV+ZzIMqp1eMcXfwZXrp5nn91/g/4xPo5njr5EKGp7WSQXKTMziSxItTB04rSKkZE1NooFwjB1+RUuv1OikzY7JU092pJh/eeAZknH/wov/udz/H3N8/gl0YQjfAoGK9L1aj3xa4SChVGgid3HU4CTXDEypSSmWxpnFMInltM+dIR5SXpkKajenKZ720dkC8ecGJjiXZnh8c/8jRN5RjfvIVbaNCVVQZLQ3b2p9bRLmKyg2vbfOmN82wPHEtH1jm4dJMHhqv80ur91ONICg7djUi2OuH3tr8H8RY//9BTjLplg5A9ED2iDu2skLcSooAiteAqm+qWRXFNqbM8MHCw7q34r+SQxRzVdDa148yZk/zS6xf53OYqrCwWNa3QKTaJrDRkWw0EEUQTs3aM9xVV1dhYdjJ70wld7ieFCaOqwqG8Ud3lJ4t1Za0gm8w6umh2NdMuszqqzTEyZ5YHFXuvXGDLrSI4fAhFb2IWQ0lNXRh8MSvAkBjvPE8fe4j7V0/xjQvf5MXnfoefOvUMx9aOUNUO9YFQmQCs13EHb/T/SVJ6spSolh3T03UdvrIGocGT4IP1eUSMnOlE2Byt8Nbph/nGG8/xscc+TPJFFSmYzqVPAETo4ozQNAhurl3pJdV7VWaskUUprJfgcZXndw+u8N1Z4tFHz5F9hbpMHjpSUsZZcYMFbu8fsHF0ndMffQoqRzUacN/mBq9+903UCdcvbXEwnrK5ssREj3Njb5fL5y/SrA15+43XuP/H1vjY4grN2OI5xsRnb7/Eet3x0bPPULUDtLPuf2ooMLtDklLGHRTEqzxtZ+hZWKqsXvBiDjFLpuNhpjAtqVHlYVrGZniD1x999B5W3rrJsY+cw48aVE1inHPi6o1XmY23uefeZxEN3N69wMFBy8b6JqPBBqqJyeQms7ZlOFyhrpbw1chEe5L53VHzruv0rgiWeb6YkhloJyuoZwJ7s0iXMoPK045nrF86gPUjlsrkhO9/gID3wZjCiJH9nENcMaiLmbXBEj/5wI9zYecy/+Nr3+Sha8t8/OzTLCwuIWmKa6rC4q3ICJIyQzFtRHbWcReBRIcnELsZtdT0qAxaBomm3mpUqZzy5JlH+fyN3+LcjSusbxw3zlRVuuRir7OqGqbxwAADZxqaTiOT2DFOkUuzCS+0V/m4Xyd5mAZlp5nyfNpnbyexu3uLLnf4oeOt515k5623GCyMaEon/+KtdW7vHTBaHnHsyArfuHidsydOMlqoOL66wivPv8KlV1/jYO+A2HWkyYzujX1ElX+rn+Xg1GM8uXyGtWrE7958kfsXHE8efxyv5h0sjSc7kFSc/0XQxqhMkoXcJvzQI85Kc9cYD0yWgnkfdBmZWpGfCy2Fg4wsCbmMLZRsFBw/VY6fXOXa73+TYz/7EXwdyBpxrmJ58Tg3Z7s4PFU1YmXpBJtr9+FcU6xeAynDja03OX70QQbNOl5qQObtgHe77opggdLR9uZCkkStllAYzyKzKMyS0ly4zENh2YRM9k1QuEnOmV+x9NBhMbYga3GNsZtQ+8B962fYXDjCd6+8xL9+6ff51LFHeeDkA9RAGFZmGiE6PzVqJ0xjz4PyQEDp8M7TdS2uqvDF1E7Ke3GocbZiosnw0JnH+a2L3+CXVv4qrrFutgZKP8VGt+EwfpcPRmAUYVA30FU8tHEvqwRy5bh4JvBHeo2r12+xtAzpxg2+/pnXeOwTz/D8577AzsUrLDgh7d5iHBPiKkazDjfb43bbccPVyHDIef8Co8UFzj37BPc+8wgn7j/NC5//Em88/yKxPbQxPX/+PP/yxi0+cu4xTh45zhOLnifOPILbr9BGCpJUWM9q6VMeKGHR21i+oMZCLv0fXweoFZYOB6uSiwo2Wx3na28/e2ZpJ0GNChMEjRCiY3NtxK0/fJ7Nv/KEETkls7SwTNM8RkwtMe5ze+cii4snGTRrttGlGddvvs3aymkWF04iYuCKc44US5PnXa67JFgKEoLOi9ukRZvuSn6rysYr11laPG4LDEvdsrMzXgoUbM70BldGNZ2Im/sQGxvYoyw3Qz586glurR7nj954nvMvXOaT9z/Jmj+Kr5jziIQyck+tydnPUxcxvpikSNfZSATvjMBnVD0jbIbgCcD9x+7l7e2L/Mnll3j2wafnrz/lOE/LfF2ToonfpMgOUowk4NTqJrK1xcubkZfPerrrjr39LSY720z3ZjCZ8uoff4PZ/hSycR+Hi2sokfHBBE3K7Vu71HXNvedWWHnwYTbuOc1bL7zKdz73RWR5mZ/4ax/jQ3/jp2mnB7z98vcKUmQp7e2dbb708nMc273Bxgee4b5lYZQENzNrpf7YdZUFhLoyunvBbJd8ELS2HocsWe+GaHSj3s3eMoKMGxX4u42l+SkFLlckGh0qo9RLFWt14uaXv8PqRx6l1SkOoQrmooNUbK4/CMCsO0DV7Kaqeo2VlZOlDh3PTcdTStZ8fpfrLgkWi2YT9RyOLsup2OkoVNt7PHwguKHBw1nKdGIxBMoVVrHrN4as1kCTXJAP5gxZX8zd6ipwdPkYP/vkJq9tXeDfvPZlPnbzLE/e8zDVqME1TemKC8NQoRTZQI5WnHuP4qmyErspUtXgglmCpnQ43aquiLPEh+/7MF947re4b+sMy2troOArj2ZDYYITcyVxDueq0s2H16+9yeXxda6erHnTX0VfjjQp8ca3X8GLErsxIQyYTj2pmzI9GANKCI7BwgDnLEWdzSKaE5fevMb+rR2qreucPPsgjz7zONcu3eK7X36Rh598iE/8nb/Fl//9r/HWy69YZ10Nat872Cdcucq1pUvciMucHByh8YUjF0olP/JIdQiBJ1V87dCm2L52Cd2PxY6Jud4nJzPDyykj0TY9jSW9K26RGpy5ysQEtcepox55Vr3j4KUtNj9ogRFzoq6BnimL2scFRn7g7FG0t60SS+cVpfY2bezdrrskWAwC9IghTCguFZsayWQHZ9+6zvrqMq3D3qh4UlLqEEgYGc4XKNfUe5nMHQo6EXwRTWlxYDFXfqGqGp44/gCnVo7xrbe+xcvf+Rw/de8znLjnDKDUzpC3KJlJ4XGZy0lv+empa0dKHUTTfVgQJ1A7gUIQBtKwfuRefuX1L/OPnvo0wQ9LXSVFgOZQFWLXUg2CgRiu4sETp3np4nUu59v4SzfYvrnH7uUbdNMD2mRo3KTbp6ob68+4TOw69vZ2yUR87VlZrqgniclEIEeyJvauXaJKLTe2T3LsyUc498R9fPtrLzLohvzU//xv84Vf/U+8/q3nS3fcdvjtg31+6/WXadZX+dmNI1SVx2WMU6b2JFEx7hiCi8m4ZouO1NqwWukKEzz1ngZFgOWxfou39IvaQav4gUd9oUlUIMlDHcqodc+gFoQJs9evsbtZ8Wtf+yJ18IirqH3g733skwYl97w5FfrhTi9cPM+4i0xiy4mVNd6rZrkruGE9DSXdQc2JvauiKlWnfODWGBktkDG2qi88r1hMGlSFTrToGDJCplIrOCnEO+cczhvMrF4sZfKumFbAarPEjz/449z7xEf59SvP85XvfJVu94A863BJGbqKOtji7pstnhIwYqzn1HVmqFAKdyj1kmZmsePBow8Q6yXevPBaWWBFgOYDqKMZjoiFmkGxZJo08KJENh+4h4MxHOyMGW9vE9sZMbZMJ2P7c7CPdzY3M2lmMjlge2uHbjwl5My9x9YZDQNdGwkqbC4PqHXC1ve+yzd+7Xe48uLbPP3hRzl7/yncwoD1Zz7AvU89OqfZ9IX3NCvP3XiTG7JLbgrtBWMPaCwZgSrSKG4zkJuCIiagLX2PlJGpOb3kg4xO1dxhJmqjyDuQCUjtyQkbmdGKDVrtFagz4EYLe5k6Q7V9k+biDpPxlBtb25zeWOPU5gZRu7lGv79yNhDp0s5t9mctp9eO8r0bl+a6/x90/dBgEZEzIvIHIvJdEXlRRP435fPrIvK7IvJq+e9a+byIyP9dRM6LyHMi8uwP+x3GDzx0O8+lLrBRII5TV29x3/Iq0dkMRxviqJh6KiOaCC7jRQutXnGa6f8v5Whs+ULPpn9YSGE5G2UlkahD4OzaCX766Z/h7eWGf/+Nz3Ll9fPkyYQqdwzKOIoeD+2n4BrJxaTKB9OJ9S/E0pfUdSRxjJqGI0urfOyxT/DVq+eZTg7IKsUEDyN0hlC62uYr1Hn4k9FtpIl89/kLXD6A/du7qCZSiiCHDOpYRnWHuqb29fzEmk0m3Li+RRsTg8aRupbdvX0uXjlgY2WJB04uc24t89JX/pCXvvk8a8sLnDh+lJH3PP3pT7KwvnrIZk6Z4M0e6sXLb5rzTSO4oSPXGb8YkKHDbXhYC9aEbJW8ZyeOnSgG8VNoNK42oqcWu1fK5ieh5985XChMZ8X8AKYJ2Y/oQUIPIu0sc+XFt3jhf/hVxt/5Hvtb2zxx8l5+7MHHCK5i7iVd/hha6dhcWkIkc3b9BJ+473GTefxZgwWL9/+Dqj4GfAz4r0TkMQ6nfz0I/F75GN45/eufYNO/3vOy5aY207xnANt2hlPlY9d3aNbXMWdNQ14SNpjUozgycdbikgWMFHSs74ZTUrBeYKqa8U7n/CIRG4ZjtbnxigaDIR+/71nOPf0pPrt1ni986/PMdvdocqRBCWKDkCRUBlk7wHtCCNRVRdYO45kbHyk4GwPnvGNzcY3Bmft57sVvQLRmp6OYWqhQDYdWl2XltekW37j8Frev3eL2fiRub5Mme0xnE2LqTH7sPCFUiDPfrGpQ0Sw0ZaSccaIGg4Z7jq3hkzIc1OzsTLi5c8Arb25Rec89xxZ55MwiF776Nf7g3/06Ny5c4oMffRRf1UgzxFc2isMFz2hpAWmEb1+4wPb+2Nxd2mwO/OuBcKQ2Ov5uRg+S9U1ahXHZ3b2RWXt+WZnebfXKICCN0Zi0cTDwuJGfT6PmIBqxM8E+ke/evMpvfPPb/Mpn/5Dnrlzk7GNneObUMdzSIr/6tT/ixQuv4uabGkUpm8spKQwELly+QttN0DvskX7Q9aN4HV+hDCNS1T0ReQkbUPQLwF8pX/Yv+XNM/9LyP0aIywQf5o4dx27d5ul6ga4Fl4wl64tXq2CjCRQzv86YVY+ixK41X1zNeAnW0U86Jz7Gsjh747ti12B5bWEZO+c5uXKUzWd+hucufZdf+cZn+dSpJzhx9kFSFehH4RlLTObjwr2YOm88G1M3A6rKxir4nl6fM4+cfIjfv/Ab3HfhDY4+8Mg8/8wYzWfSdkglnN+/xc3tXSY5wP4O4fZ1Yjcr5GMDBNQF8yBDaKdTmtpDjoX+oUzbyPWbu7zw6hWauibGPU4e3yC1kVu3D+jayOpyxWA04qmHj3NzZ58X/+grPPPpT7F6bJWcYbC0gteOHBMxRt547Qr3bG6yJ2NOLK3BquKXgnmLTY13ZfJ/8x4jqKVR4nBeiWTbHMWZE2jjzN62MXg45Wwj+ryZWNBmcgU344Tzr1/ltWvX6dKMM0fW+NDZE2wujew51pm1/QUq3aZLaoxz6VX4zOe79DNfjq0e59TRm9wa7/Cty6+z307+7MFy51XG5X0A+Cp/zulfdw4zqtePzMU6PSyMOJwqn3zrFgsnT3Jztzv07K1MX6/OkR34TKlLIr6q8ZqsT6G5zDSB4CrQVFi9ghTjNjOtKEYWRdIrWJqWMUBqEGo+fPZpbh25ly+8/FXOfvMSH3j0o/ilJWKxNw3OHocWH+Hga4aDftx2Mvo8NmRIxbGxuMbmmYf4/Gtf52+duQc3bOx0E6PCe/F0uePrty+xe+MWadoi031SOyPFznhUZSSG98HAkJToNLK/P0G8UFc2szKJ0HYtF6/eZmFYkbqO1aWaDz5yjr1xy7Vbe8QOxuNZ8RtXtq7e5Cuf+wpP/sQHWFpc4GCr45HHH2Y6HvP6+bdZGDaEOnB9vM+5pvRH2owmOzhEfEEPTbGqiaJSzUavKcpYdUDjcNmR1KxjUbVnnIXZQcvl69u8fPESl3a3WawrHjq6yS88+QgLqVBjAD0wB85J7Dh+zymqSzuM6obHTt9f5AW2AXrvD8VhKCujJdoc2Bkf8NF7HpxT/f9cwSIii8B/AP63qrp7aK8Jf5bpX3cOMxqdPXc4Bij3dHPl5M1dPlwNEBeYFF8n7735HIsZghscWP4iYnMGtTK3xhRx3uyAUteZXVLhX6WccRiNQrwhK64MVe3tVBFLhQQTS20urPMzz3yaFy59l1977rM8vHmGhx/4EDoIuFCRNZGyo+2mDJrGds3kSDninTezB2eAgMPx9L1P8OuXXuaVl7/Dox/8BE4cXbYmXAiBtydbXN/fY3b7FjqbIqMl4nTHJg+XvtFgNER8jZMK383outY2ic7smkZNw3BoXmWTWaSNCZzjxq19NjcXeHDhOBsnNvmDP/4Gb18fM51MWV5paNUT93Z54+svM5tOqGrP0eOrXHhtQtfNePDZR7l9aY9Lezu8ef0yr79+kXvvu5fjssRSM7SzVosL5CTjh95MABNI5W1qWJetCaygNUYrQph0Ha9fvskrb11i7+CAIyuLPHbsKH/l/nuoxCMpk6dWJ8aY8INgiz9l0iSytnyEY3urXN/d4V/90W+zPBjwqceeZX15nZRiqYMEUTv17l0/zgtvvsyj99wPd6zrP1OwiEhVAuVfqeqvlk//uad/HUYOxbi67O5ArZlPv3GNhTOnGc8y06QgpjBMKF6AAh1Gyg3AUYVCOOy5VyKmVc95rhXJKZOzfQ+hKi4o7pAUWVR9PRMVBzl1dto5x+MnH+Pk0hG+9NpXuPT13+VjT3yCZnWVflT3oO/P9MRIsnX6vb0OXzaDzYVVHrzvSf741ec5e+MhRpubZi3kHD5UvD3Zo4sd0kbUBzRHSxFLeuXF40NDqEckbYkoqDVJTQRlkOhDmxv8nZ99lstbB/zhN17n2q0dFOUPvnyejz52Dw8/eY6PPnmW+uVLvHwhMpsqPndcv3rFrI9wzA5mvPbqJa5dvEI3bTn/3Td45OwZbsV9fu/y68zijFdfeoFfeuBD5NTYQCfnUK/QqdnU9t5l3rKDKJGUFKk929sHnN++ycsX3qKdzrj/5BE+fvYU666hqjzGlLS0WQcgtRCnidRGvDO/ARVB05R6oeGRM/eQrlwwh1PvmaaOHOO8wO/19yKOx0/dz7GVVcQJC9WfgxtWZtv/N8BLqvp/veOffgOb+vXP+dPTv/6piPwbbKz3D53+1VNEUEPFUHj87Rt8YGUVqWrasRVdSU2brhnzrXKmIQ/ii2bBzce/+dxrQhTV4pbvzI0yKkiaEXNC2mzjuLPRY7IUTyqKU4raaFdVCGJzGH3wHFs9zt985ud4/tqr/NqffJYPnH2K+048QNXUhN5EzwYkULmKTjtybBGp0OBNE4Py+PGH+dL+bb7y3a/yUz/x81B6TaJA3dgm4D2aOrSdYB5oAAYWOO9xweNlSFUFWhGEgzLoyZNi4uLNPW7vz3jq4dMc3Vxla3dMmkWGiyPSZMru1i5n7ruPlbVF6vAKr1/bw6fMkw9v8o3nLzEZT9Gu5ebl6+ztHSACbTtlGDzjWUuD1R63DvaZxBkrS8vkccLXFH24QCNzvbjN1oRbt/d56cpFXrn4Nvrm6zx1z1n+2sc/yOrCAl5topdE6/Fo3TM0jGSaY8LXoTSjhTTNuEVTumpwfOjBx/jA/Q9b6uv+9DK/MzPKCmujVUSE4N89JH6Uk+XHgH8APC8i3ymf+z/zFzj9q8/fbOgmrI4n/PzVPQYP3E92nv3Y9T1+9tspXioG3nozmntjBkfGZqD4oowyBXExMSiAgfawofe4GC0VyxGJkeyM9yWSzW29IG0+daWQp7ABIuIrKlfx9KnH2Fja4LMv/wHfu36Bv/rEpwgrK/b7y+mmmgmhMsFSSrgYUWd2RhtLy5w9cpbXdv6Eh994jTMPPkTOkEQ5SC2pmxkbgGzBVgDMHtnr57l4F9DsqAdY0EzG1FHJLrI7nfHL//FrHFlfYXF5xIn1RU4dXWRxEDh1/33s3trj2L2nEe149qlz7H71Za7vzbi5PWZzrWE6mbDXKtvb22SF4WgEInztxZc4vnaEh+47w/54SiRyI8+Qg21ONasGZjTevMiikINybXrA869e4NWLlxiK8NjJo/yMDhjt7HLs+Dny8jIyMY2/quIGgTSOVstk20hzF2HkobbFnZ2SfZECJAqy6ebDsHqdSy4TBe6cT/lOt/6e3feDrx8FDftj3r2t+Rc2/cuXwj7kxN985TInjx1Hs9B2iWlnKVNwxbDOFygwW+/Ehq56khhrNcXeYd12PMm9ztpEXeRcxk4ESw9ywAWly0Zn8Zjjex2sp5OlN2Irw5VEUC1DcpxwfGGTv/v4z/Hazbf4zW9/jk+d+yDHT54pPSHbDJzzuMqRU0dKLZIDVDXeOU5tHGPaPsznX/kaf+/kSZrFJUKhXeh0YovAC5rN7rXvqAffUFUDQtUYfCsO7x3J28jx2HXENEG6GTu7O8ymU45O11gLNcMTDQvDisFowPL6kIOtXUYrmxxVOHf/cc5/8RVu77XcvLVHbCOSzSXFB8fy8ojNzQ2q4Ni+ucP5q1e5evUaXddx8do1zq4d5Z/9zN9mWNeknLi8t8vzb7/N629fZBBnPHbfWX7pIx9geXFIGmd2Z5dpmnuJN64Q3L0kzP1GADotREpBZhQipX1OR4JOxVK+WCgZk444mZW1WGjiZTeW4qI/F3vdQZrMmgk/4AS687o76C4CqkLlhI+8dZln3QCaGhVhf9IV50dFc2TkbCKYxYIRFTWneaNQMY5RKjWJQ02n3mvUXQDReZGc1eDcLFpSapnruGMXyUIZFa6EYnqhUmakiLMF7Byry+t8eGmFpYUlPvPKl3ni5tt86PGPQDUkVAYgiEaCBlIXic4mZokLHF3Y4NroJjunTvP8i9/igx/7CZz3dLE1czjN5BhxsaXECYINKjUDjUKFL2Iyj+Cqipw7BurxwxGIMhwt8MxT9/PUoydZHi2wuOCoa1g7uU53MCN3HcPFNe47dZRzp29waXvGwd4BB7enRhVyZlO2uDigbVuapmHt2Do7e/sctDNi11F1M7QRrk72eO38Vc6/9RqDSeTc6aP84rNPsLK0YsI2zchMSZOINgF34hT58m2YtkDxKaCgaMV8L6d0OKjJCZJKNuHLqRCEXMNsMmGYZ3OpcY5mcOgKQmfeYWWep6bi0p9RUWLq3nWZ3h3BoiCi3L+9z9+4PqY+dQLnbGfcP5iSMgQxyv6wsiBSMeq9z4mIjaroj11VCxPnwBUUTXt2Mlhw2hZth6/zOBvRhVJG8blgwys0I11nNJvCdxI1aoZ4VyBcIaiNj3vk6P0sVkN+75Uv8vZXfpOffvKvsLqxYe+zvI5QN6Q4ZdZNaGSA9zWn1o9Ti+NPXvgaD2/dYnnzGAKkrkVTi+aW+UxrozwQmiE4RyKVee7MZyc6VRZrh4RATMqsSxy0kWs7U67uJ964tUUlmSfOZRY3VhhuHKfb3UaccPTYUZ55aJv40iUOjq2hs8SN6zeoRyOawYBRM0AFLl3fxyE0o4ql1VVm4ymnTq6zsVbz5T/+Ix5cP8rffeYZFheWjDRaO7RyRfot4D1ZWnIHcmQNfW4fmc2Q0ciallHRBFqZB4AUIZk22IY37ax31glUnpQyOhrwW1/4DHvfaXACbYz0jqYx2emT+55WcdrPKRfkM3Nja+tdl+ldESyVJk5Ptvi7O7usHF1mYX1AVQ3Y356ZEXhZkKgynbXUdWVQLBnnwqEfcQkMQecM3n6mydwoocDTodBcbEQEfYgUkl15Yc7ksU4UlxIp5jJhWPCqtDEjvsJJgUUVHImzK5v8zSd+km9efYlf/87v8Il7nuah+x4tfAmDv70bMW3HdGmKiLI5XOLK9jXOPvwkX37uG3z6J/8639u5hhRnfCdAqKHt7GRxgZQj+3tb4GFhMKKqa3A1znuOLnhWfOJ7b16k7SJ7kxlpPOZL39jlxfMXePjcGT746Ak2NxYtrVWol5bI430GC0s8/NBZnMscXa74T5ev40WIbaQZKDd3JxzsbuPCgMFogHZKCBVuwdOMRtx78hSfPneOSp25WmalH0DkFBuAVNtg2Pago5soceSoRovozgxZWUSymFm7NydPHZsc21UFTWu8mfp5u6dS3P4HSzWzNybcTDNraJbmo7mIqlFpbOsjxkKK9cXsT97bGPyuCJYkjvH+mFffOM+lzQ3qnR2OrC4wbqdc3N9hezJjFEbUrmGS4MzSOnW2xl1yRojzviBMmoCMOo9TrGnpbdZiP3cFOXR17E30nDf9hXfGCkDyfCxfn7J5BxSrpDZOaXPLUITswMUSqMGTRTi2uMrHzj7NK4NVXth6ize++gafeupTjJaWEFWyKIN6xKwdM5nsoc2A06tHub2/w7fDazz3+gu8fvsG5A7VZDVTcCWQHRCZ7m9TVYHBwgDVMSJmqudwbKwu8eGHV3n75g63r92kIzOoFxjVQ549d4a//hMPcnRpxLBtYRrJ9RgJAYJRd0bLqzz80H0sL4744pdeoV2u2W9tXMPk1jazyZi1zQGjxUVOnjyCCkz3D2inE2LKdopo8VXzd3gg52yEymmh3+cy47IO5JNLpFd3CfcdNXZx0a6IQqqt/MaVA3Zq6Vcqt8NVjlQLjQuMsmN/0hHKwLDcN+OEMlZCmR50dF1ksFCXzKbIQt7DOOyuCBYVIW4c58GTkNqGvFXh9h1pzzGctOztjLk9vcJePGA/TXjVAcETQkMzWGBUL7A0XGJ1sETjBoxCw8B5AwQKwuYKOKCqZo0abES08/0MdDe/YeRM5b0NK02mf9Hi/IITJCcUx7AekcvJlwGfi0kcjlx5VqoFHj/5AMNBzaSd8T9+/TN88v4Pcfbs/XOmQN0MmUw6ptMDFkaLXExjPvLUx/j3z/8hk9bETLnYLdFP1xLF+wGLy6u266aO6XRKSjBaaPjZD9zLEw+fxI/3+S9/4VP869/+Ort7ExoRji0NObc6ZDELDRB8hbaRNJ4QGn9HPZxplha555F1fvKT1/jlX/5tFpuGrS4z68zRf/fWDpNpy2w6YXllico7Yuy4deEmacNqwFw6974qnmvRUEwJ3tQLpfcSHXB0QH5jgiwGc4UhkNqEi4IvgaViLI6k5m3mxaHenquLih9VbAxHVHnHTjMUjzdPBCc4MWMUFRiOBvggheDZo5fvvk7vimABmLrA8uk1TqSaWTVgMBAuXRmSB4usnDqNy5AQ8x6eTplNJkxnB0z2dhjv7LNz7TrX4pSxtkQHoWloRgssDJdZH22wPlhisV6gCTXDUFmjzEFyVoOoKwbilGMfg3mhMASUQlcxVMWHymBLdD6r0OguGRMumcpz0TU8unkvr968wMee/DhfevU7XLx5kY8+9VH8YGikm+Ei7Wyf1LWcWj7GbpwwOnEGffMV8JUVtQnzas4eJ8ri0jK+HtC1LdPxlNhNqetMqGYcH1U8+sgJ0MjJg5YQP0SatlRdy8bygNGwpqkDlQSca3D1EJJND5Yq2WwaoJ21qHecWBlS+yFh5lhZHDJtp1ZzVI7J/pgbbWRvZ9/QtbpiZX1AisX7wIkxiZP1wCSIeahVAA4/EFxlUHkaOlxONiJ8QRAXcElNYelB22RFeiGtUovpk5LaaD4BGTiObaxRX99mmnuCbi7wsTCdduSoDAZVGZtn/bVD4753v+6SYBGSCLcbz9F9oakdvglUoZ1/xUHXcnM24fTqOs1ghcHKMivI3OfJ50K/bzva2Zh2csD+/g47t7fYvfoqV7oJ49zRVko9HDEarbCxuMrGaJ214RpLw0Ua8Ub4owSGmumEGbYK6ix4pl1XQAZDynqKTN8QRYwI6AtMXYvw8No9vHL7Ah9/7CO8fu1NfuOLn+FTT/4YR4+exHsYLC7TTsasDYZcvHqda+NtiBOTTYfajPR6A3FnaeVscsB0vE/XTgElZYyHnRSdTQkrCywPFvn4jy0xvnWDrfPXOdiP7McpPozQkbOhT6Em01kKq7ZJSBUIvuLmpYu8+eoNfvpDD/PW7SnfevUNSNEcdQTI0M4mjIYDJvsHDNaXObWxSRhWkBy57cgDT+XN/V+qCinDgySYLVKOkJ3SBcEfH5CuHOCfWkWjLfScLWD8yJstkmNuvRqCx1X2/a5x+MXA0ZPrLFx+gwNRxAb0ICJMJzbRYDBowGWqwhbpdUfeHw56/UHXXRIsoOK46pUnBsH0Dd4aS7kU7LupZZJaM/tWy1d93+kWAQ8+ezR4mtGA0cYmqwhngYTSdR20M8bbt5lsbbGzs8XOlQu8mp5ngpIHA8LCMiura2wsrnNseYPVapkGIbiAVN702UmZTCcsDEeEYB0ZFSCbjNiFCkGt6Vg0MkbIFB5dP83re9e4/+g95M3TfPY7n+cjZx7lsYceR52nHozQOOPMyhHkxsu44HA+UC2u0LUVVZwS9/bwocY74WB3hy62hODxfshoaQ3vPO000+7uExaGuMYT2xk33rzB5Qu3uL0baRZqUjTNvxslkt6mWjAvMlGPDxVSNWVKc0dVK3vTlis3d02Q1hlx0heVpyuanTp4NpeWqYcDCB4GnmoUSCTG047RaMFsYh1WTDqHL2P5kkAOEBfBXdvGhXUkdbDgzLSiEB/90KET83fzlSd32QzSwXpabWZ9dZnN7LjmjBqUNdPNDOwZjnp9sUHSzlkQG/z+XqFytwRLYdpe66w4902YD8bpCmW/9hVnVkY2QMiZPZEWc2xVxYeerWyFoBYNSe+Q7+uKatDQLC+zefZeTuVkE4bbGeO9fSbb24x3dtm+covd9jpvM2PPJdqq5dSRezi+fprNxXUWQ80sRxbnxEjmJhY9KtePsbYzypO9Fri549zSUS7u3yII/PzH/yp/9L2vcvmbt/jU45+gWqjIzrPsKz5y9AyfjxN28x4km9+epnuAjeigzH9ZXF4zTY8bUfmaELzVBr5BFjZhuosLDr+5xm/8d58np0DdNDx071GeTB0nfWb52BJ5YgVwGA2tqYkjVBUSPM2w4siZdQa3D+huz+anZ7+ygjpi6vBOuXzlOv9h9lXaD8KzJ+9Fq4L+ZRgfjFk8smDDmDol73XUgwYqU8bOOkErIXYdQydkMbGYdECxYs8pkasyXqOkzao280UPEoygXqh4aG2TV3evk3JmVpz2B01dCn0xy19nGUGKCV88Fv7cRMq/9KsssitZ8Ml0GYhx55zzZIRRMzCaffEpVu8o+IoZv7XdfDBn5f2852KzSYyXFAuNvkdGcghUdWBxYcTi8WPGM0od2iWmBxOm4wMO0gE7N66y8/J5LuYxW01kFpRHTz3ByeVjHFlYYxhqcy8pZn8O+30+Wz2jYiTOrNbRP7mwxo3929zYv8lfe+yTPHf5VX71a7/JTz/+STbX10gID6wc4dXRAftff4Vc5rCEklWnlHC+YmXtiHHfYsC7Ci9Q4fn6y5d56PiQlKZcvXWA7E9Zv2eD5WPH+d7blxgcTDl4dcz+ZMwHZi1nZhNWji9QF8M670d4V5PVvNw8cPvmLm9fvkbS4necbHaOaYksJZ21JsA78cg6v/Xcd3hw4whrg2XEOUIt7N86YPn4klnOTpM1T0cBV5xyyEKuPXWXcd483HJO4CG1nRnu5YyLGRFPGkdkKdhcGW/9G+0yaa/j9OoGC1tXuamZGCNNXRVXzEJzwZA2V7h3OClUorscDeupB7ed6cPrYnvTZiGXoaLOlYGixcmlt0lyZMQlE3gppBTpklmoxpxNKKaZEKxzbwFkaZ/53rp5o880LRVu4Bk1NcO1ZTZEyPc+BEmZ3t7i0tULXNm6BK9d4IXqbQ4WPUuDBdYXVjm+tMGxhVWWBiN8odf06aSIlLTFaqAjS2tU011e3XqLp07ez+nVE3zupa/yzLFzPHzmfgbVEveePsnLX36BpNA4OHn8FK+9sWcLSAQvDTmBq4INHHeZpoKt3W2+9Z032frqd3l1e4+feeocZ556mP/yH/4k/+KX/5A3Lr/NpNvne1cTkzThI3KWcyNhbVCZVEHL1ABfcfHyNr/2u99ia5zY7WaIE7MZ6t9fbwABiPdk53jxtTdYWVnitctXeKpeoG5kPvFZJoJ6eybaOGQaqRcCs30bY1c5h3OZFEGGNa6bkcTGUOQuwSQRluq5C06eKm6pQqfRastkUPHmiU1OvOa5kiKV98U3zp6/D8G+t8hBfHGoid17T/66K4JFgRACB9Ezbac0zsbQdW0k51QgP2c+T2Vib/Aej9CpGUUIGadK5R2arHFoebWAllkcZd6IUzuGyQpOSSplsG5hHfccMMrkKCylG22u8eDGGg/6p6FNtLf3uHX7Jlcnu+zsHfDSlYu8kKa40QJH1o5yZv0kRxfXGFWNoTXR7F1BCWQ2Bss0dc3L19/kgbWT/M1nPsWX33yRm698k6cffpKGilOPneXW1W3yxbe5tH0V1LwDPMLA13RFFgCJPNtD85Td8QGXrggf/uA57pu2nDt3FGXC6XuW+N/97/8Gv/Efv8Uff/VbXB/vMr02Y315yD0PbNJ1NkrQp2Q0Hm+S4rd3x+xOOxyeugqEYCO2RTD/r7IYV1dWOHVymYsXtljxQ17Yvsn6wVFOs8zV67c4eXyd3Fod4Ye1+Rt7oVoa0u3Z2LsYHVXtYJyR9RqGCYrBoRdHDOYDYFPECs49y4hC7CJePTJy1N7z0MoRXty+RBIteiWrcw39Mj6HanERKuTK99B+3R3BAtYMiiEwyZHFlNDkmHaJmFJZDI4qFB5XLu6HYt5gZhdaxjj0PZVsFBD63knxuPClphC1o0Ryb8pXpq0kCtco2785jw8BNCFA7ky77b1nsLnGmePrnBahjZGt2S5Xr17i4NJV8stv8l3O87WhsLCxyonNM5xaOcbGYMl2z1yjCqu+4uFVz6u3L3Jm5SifeuBJ3ti6zhe++xUGKw/y2OPneHX3Oa5m6+QjzBWHdQhITjYFazYlx5aDwRAWVnklKc9/4TuMlhp+YXPA6Pgiy5VnbWmNf/BLn+Lcfev8q3//OcbTfV66eJUP7d3L4kZDlRqTdGvGaWZ5bZngAynPcM4xm3XFCsr6Us1wSDubEBTUVdy8NWV1Y50jawMWFis+/+qLPLt6kntPHqceNEXhCnka8bURJOvlAQdXdwiNJ83MODFvj5EjjWXarkxEzhlXmzpWvDns+8YM/qIo0jgDU5KSu8SDZ86wefsKN4qZnAWCZSoixVMOYT7064es0bsiWASrqzpxjHPEAZMuMu5Mo1p7X4YFGeXeO6Pa95Y4pltxVGWj0TIOrde8a/kFZdapzZnXhIqb+41lESRH05tkg2k9hcmqNo5NySTJeIKNiVAzdHNi6cOx4TrH7t+ku+9hLm/fYHdrh3pvit/e4eall/ly/CbThZqN48c5c+QU96wcZzFU1GHIYxsP8NrO27Sp5d4lAxJeDYml4YAwTqwub3B9clAo/4nZbMJwo0ZnMyTPaNspqpnkA9XCiElnPZLl5WV2X7nN81+/zHh/z9L+QcX52ZS9W7fpJLMjFbe3x5ySdRs710V8SiRtOX5sg2ZQw545N/oQ5szdFDtLcauKpdUlJARGaysQO4b1iIVRg6hj8+gaq4tGYaHYfwmO3Jp3wmB1hKtqUjT1avKeOGupQkUeWI2iQJ5ZM5lxNC29F3RmyJwEqxWzYoIzhNWlZR4bLvOH3W7JXsr0gb6IL5LyXuN7mK794OuuCBagLGxlmqwxNJ10xOLqCBkfTF+iakpJVajEWKPOyVy70jNwq+DNF6znBukd3K/C0bLubjFP1DyXE6vYbmUbuJpvsRp7GQQXBMr8R1Hr7HvvrWGZEs5VPLB5mrR+ituzHW4c7HDaVTzROabXb3PryiXeevVrPOdbqvUVTp44wz3rx7hv9QwX966zP7nO6vIybmVA3N1iMtlnEDwj35BiS0dib7xP5WEo0CabAJbFEFvxDpmA7s64ePUttuKE+0+ucd/jZ3nwYydolpd58T9+m91Ll6momIowkwxBaLuOhUUzAREPo4Wa4aCysRgwd5Ppae5aJginrAwb4e03LrA4GjA6eZrj62vcDi3LNOYWWheulmBpFFY3+CzUC0PSrCPTkRRS1xlqFQJ5UCGCubQ4gaWiQi0FvRNX3HScmVtMIm6xwonwgXvv5xsvf4dpYwOvRCg+CRZsIg7RTAiOqjQ23+26O4Kl3/2dIzlHl+HG1oyZZIKvSNnET8aqdWiZSBy1s9qkUEycWrNKnLObIVLMCoxa4so8FUHMJb/8ehubZ3a93uW5hVJWJSpUBVSw6b7eNDRaejxaRGVZ5yrL3lXMezg6XOHIaIVxnHJjususWeTEuQ/xsHji7T1uX7nC1dev8K0XXuKg8awc3wTvWNUjXHmtY2l5gaObx7hw/mWG9f+Puv8KtixL7/vA31pru+PP9em9KdtdXdVdVd2NtgAIEARAAiRASgyNaEaaeZqJUExIoaeZh5mY0MOEhiEpYmYeKBdD0WlEkaBg2Wi0r+7q8i6r0pt783pz7N57mXn41jlZgNAFkEIoEjsqI7PSXHPOMt/3//6mwaQc4ZTQ9w+Pdlhu9tBlwGcBawOZSWAwZrqxgcoSmkstzl84y6/+1S/S7CW8+fYt3vr+27z67i1OtLo8+8wTvHXjNvujUnwL6hg0CwRfoxNFmmZRhuvF2TFOvhWONElRJsFWlsHhCKMM3lpG0ymTqmK0PSBfzmWomyqBjAGVG1RNDKUNFAs5R/cn4AP11FMNK0HNnBPhRRUk6dh5OciUUPKNMrjgxd3SgnagChMl4YHVM6s8cavFG0wjL1DLAFN4ubjgyVJDmuo5UvaTnsdjs8Q+g5jUNNgdczAsUV5UiWk0pxBNZPQujk1ucFZeTMycch+CmCV4H6soArmOzobxYxhl4k0ReWIw90kWQ2xRYRrjUEHjvTinZIlCEsEVM/2Miqelj9HT3svcBx3m3l0NnXKuvYwNjv3xgFvTQ4p2yupzT3LSPIufVAy2t9jdeMitjXVu33+APdFhpxX7J2MYj0dYJVHUSmvubW6weLZFI8uoXYryJdQlk819eieXWDy5RrPfQrkJ/+o7b/CtV97n/vou3TznU9kS/5svfY7VX32CV19bYefBDkmR4/xUBo2JmEBMjo44it7JxqSY1FCXNc1Wg3a7IDEpSSNnsH+ISQ0nTixxuDvEoHnj/Ts83VmRQOJUofMEZyuRhE9dRAs1ITc01loc3duT98dDtT/GjUt0M5fm29RgvYRBOaHrh8oLvT83UQouGTG+ErhZ2UCiEl48dY53774vbkCRmR6XHc1GTpKoiPL5Px8NfvDi31VVFUkayLIAZWBqS6w2FCTMzmwdCW+yRmXCr7wXxxZl5t5bAjnH0io2xzOqv0doDkbJ7QFK7FeVpFeFoMQWKGrenYLMpOhZ7hpimI0C54SCkURDi6C8uIjYWP4Jrity2GBZyBss5m0qHLujI8Z2ykLWZPHUCr3jSxybXGJ0NGB3sMcrgzvc3bpHOZ5S1iOstXLye8+0rri585AnT5+laRzDumRwtMOgHtNpLzMcH3Djzk22tveo6hqF5kyzw988dp6nz55j8ReeRzU9L75wnneUJWs2cUqsZ0XupnjzzevsHY4AOQx86aKTSk2Wtbly9TQ7+4d0mmscHAwZHg05e3aVcjDm8GhM1l1ltH1E7hoEZUmnAdPJxWdNAzagnRwGWbOg3J+ICGvq8IcVOs+iF7KWG4TIEA7gLeLJMKlko4QEP7JoiyQt54FgHKfPnuT0+m1u+SmzUzFoTaORkaTyXlqnsOHPAzdMCbqknSPLDEU7Y6kF+wMnlIYAxuQi2tHiRplqOdFrJye3hBYZCDEbRTFPvoUo3w7So0hhBuKY7h85IzJjskotG0PlAVFkeh+HX5HKrSKdX3yymMPVSaIIJFKKIeiPEJY1KhjpKazA3+3OMsEH9soRdw730SrQSzJUmrK6sMwTiWOvHAupUcZHj/qFENg+2GO53+NYv0/pHZu727jU8OG1W9TOEoKilxZ8/vQJTk0NT+xrTk5zWi+dJjvZxVb7GA3PPHcek2ZoQpxnOY4OB/zuH7wpZnmRj0bs36rKsr97yMO76xRZymBQUk+nTHXBw51DmklOq13w7vYm1x/u4WtHUnvalQIdhDHeLEh6DZpLbfIkIRxNCaUlQZGPFeb1B/Qv9UkichkSg5pKr+iH0Q7KgPIK08nQRuMzxAVnKj2o8h7TzPn82YvcuvmOGJckhjQ3gq76SJ8MkMxyf37C83hsFojUXk9bp+g0o91WFGnKuJahklAsNEZpdPCieotuIRGunxmIIROzSJKT90YYWkHcI1WkOwTnxI9YRTPxoEhTYQwIm0QUfR4p53TwouAziUReREIfPojReJDfcz766RrpZZhh/AFQicwEbEldOcpqyqSaMpyMOZoM2Z0esV7ustZeZK8asuOGBOWZ1mO8ltr8401oIHD9/j067Ta9/iKV97hkSM+0Ge8egqs5F1qw7tjyJUkjofeFZRbPJFSjPYwJpM0GKaJrl4lTINiKt1+/xsHBhDQ1VLWdf06lFM45RuMJH15fR0XWhFKKuioppzWu32Z1qccvP/Uci2kbHzxpEIN2r4VlXFvHdFxSplDZmvFCzvZ4xHQ8paws1+8PybY0eiROmB5FsBbqgKk9ygWMFttfowJFntNcWMIstmi2C/KppjhMyEYFp8k4phvsJp4sVyJHDw7rFJWtyZL0E5t7eEw2ywwSzn2g6Q2J0RSFopMZhpXHu0Cio6mFh6A1RkczbqVwXkI35baYCzJmHcXc0QWto/GERFSgxXg6UzM5sTiIKASOTKKJho66F+fzyD+LoiYlV7dPxJJHh6jMDIpQ15R1yWQ04Gh4yHAy4mAyYDAZMvKWUjvSNCPJM7rtDp1mk4Vek9XlPs9xjuAsD+sjbo13uJfskxYtDiYHcmgE5iZ7WmnKuuatDz/khSefZHVpicoFKBq02z3KhwP2bMmWLml3GgyX+zRcibq9wYmzlkYjQzcKdJZE0ZvCW8t4NMbVDq0NWWYYDMMfs5gUIWiCd1ReKPmpEgh4PKxgMVBOxtRFCzuuSdKMIkujqjSgkpSQFtJfJALQ7JcNJqMxSmmWr66QLzUwqSaMKnwi0/mytNgjS02g9JaqqplMapyHybRkOCnZGY+xlcVVDhcc9dYu+lxOttKEEL3cnPSnYgYePrFfgcdks6BkQaZVRRpE9psWKb1Gwua4FAWgEqTKOwhe4N+ZIEvpEN0pJVFKR42KNkkEBWaNf4i3hUwoEyNS0+CEFjOLiRBHVh17ooiceIVyNmaop+Km4muq6ZRyMmQ8GnA42GdnuMdROWZCjU5Tkjyn3enQ7/RYOnaOi60m7bRJI0lIFELmDDZazQaCrTGpke9rmLAxPmAyGpE3GqRHKbUXQwcg5m8KvD4pp7x98wbPXrzM2vIS+1NL2mrQ6rWp64qqHBNszShUPNgf0384oN9vEpyl02jMTQhdNWUyGKHTnMtXz5J/6z2KIp1rdj5uJSTzLanzjYJmnkHlCKnY5C532nhXs3e0TVlCq78qcdsajJPbSSVGQosQQCYtEqYTg/eOMLb41KLXMpQPmIElJJrcGYpCUDDvEoLJCSqalvQVYSLm4SEaVpAEPuw0+HBhQJopvHJIMLWK9KNZxa0/cZk+HpsF2Sw95yjSdEYmpShyjJqKYjpI7odBunsV5Id4exmZ6kf9gkHcEK0T9q/RaoYainWql59dPJlnIToQsCGQqciPco4QAvW0ppxM2R1sc/tgg9wbDoZH7JUDqsRTNBp0mj16nR6Lxy9xvtmhmxc0dCqcJhVZyAF8sDjnObKllGKuFlQuEb9iH6CaWnYP9tmZHrDrjlhZWMMHT6PRxo0PcUG4WbOUqlkPMxgO+ejBPa6cOUu/mTDyDm80WhkIKVPn0DpB6ZQ8KSgnliQxVOORaNs12PGUuqro9RYxObQbBa1GQWoGVCHMB5JA1KwLOTHNCzq9HguNJp12i3c+ukky1ZzsnsC0EgEGrEQNGh0zcmovHoZpDDBKAlmREdQEZWA6qsVvbZqgEoXarsW0op8TyiDvm5ME45AYIVRWTj5eECOLkAdcHrgWjvB5ImWgE2ssrWT24iL51ds/Dw1+kMHgYl2R5gWhttHMW8WsQkiNnpvwEfuL4JhnmejgJYEqDszwRL1LfCHMrAk3c4h3RucHxHLIe+oylk6He6xPNhkcDhlUE6okoFsFdZLy1PELnM5Oc1ZrMpPON6fSQYzAgcPphJEWOrsKAeVd5Gt6DAqTGIxOKLIWiTckOQQHdzbu8s76Da4sH+dnj19iPJ1wOK347Wqb5YU1srTgYLCLdzZ6pT0i/4UQ2NrZJk9Szp06RcMYyuhmj05I8xbtbotxrbj14Ag/rTh+vI3WjqQQSbGzjvbKEkqnNBoFSwttHu4dkmWJ+CQTT2Q1M/hTGAP9TotOv81S0QVt8M4zrIb4FNI8EYCkFPvUMPUSTZ4ncqsYMZHQXoNRJJnwq+3U4VseN3KYtoHKSn9YaGikMtjUSpzHDajUEKZaDAkz8VFQWnN7fZN7S5rRqGJ7c58kMTLEdpAXaXQzDTgH9SeQKR+PzRLpKKuTUiSuka9VVyWpFqq+mOPNSqjZgDDI9DzeNjCboshGkEWs8EqGWFpBsE4adedwtmZ4eMTg6IC9wz22yyMOqiFkCc1Oi36zxbnjn2Kh3aOTFyRKMXQVnazAaJEBOIR9EIjkTedJQLzKghezCeQ000BaFKA0iU6R/HZNsI7rD2/y2r2PWOn2+GsvfJkkwGR0xFGwfHD0kCd7p2glKd/LbmCD5+hwhywrSCkYjg/nzorBee5vbJBkGSfX1lBJwtgHlBeG9uBozATH2CjKaZMksaS5QxsRSa2dWyFJM5K8SZIVLK/0aDzYodHIGYym840ib1ug2ShYOr5GU2vW725w5YUTWO/o9boUaU7lAkXEW1Q7lcMjkzmLGIpoofjH298oyY8ZVzVOgzOgKy92R8dahLtHqHGFLxKpMLIAaQLOSc/hPW4swIs2isp43pzuMeynODztTpMsT0U8GEOgQqRQWefFV/knPH8ar+MC+BaIvwHwT0MI/2el1HngHwJLwI+BfyeEUCmlcuC/AV4AdoG/HkK4/YmfJIAhcLp00BBbUx8FVEZBPXNDTB/ZsgpFPAjCNDu9dfTBjaleGkGOVAA3mTI43Odof5f9/V22JgcMlcMXCa1On+Vjy1zsXmCh0aWVNkiVQruakEhEhIm3hrKx+Y8EPIz0Pyp4XC2bo/ZSvomCT6F1RpqaONVPohRaqBoP7t3hlbvv0e73+fLl5zjRX6CyJde3NhkZx92Hd1kwBae6fd483GDiJqwsrIJz5EWTqi5Rk8M4/CRO3wN37t3FACePHyfJEybAtKpwpXiQVQns70856ID2NdNqysJKk6UTXXk9VQIqYWVpgYV2wV4rZ2cvEg6jQ47Whsl4gvY1vaUFxqMu3VaLw9GQ5V6fJy6cZ+zGdJOGsIhTORjE10lccVQCTiPivbGFVNFcauIOxtS1pXQ1SZpKgdHL0F2D3xzLTb7WAqeoS0tSRJMLleDGDp0nkMK9Bw+50/XY2pKZhLRrRCAXPcOUkuGxUmouM/433ixIct/XQwjD6Kb/HaXUbwL/AfCfhhD+oVLq/wX8XSTl6+8C+yGES0qpvwH8J8Bf/6RPoBQkwXPSRQWg9XNqBV4UcihPDJkQZr2SWLgsyO0h9BT559o66vGQg719dg+32B0ecOBGTFJN3mqzvLrM2f5Fltt9ulkDk0jDrrSaC4lCjNYTVoAHlaK0Jk/FycUrLZ/MeXHxt06sYAlyi8VEMIBEG8TxL9rAljXr9+/yys23od/mS8+/yLFGj93xgNce3CLLM071l7m9cZdO0eD54+e4frhDd6FNo26RmpTDvIVWMBjuxdP+EZTjg+SkXL97F0zGycUeK4kmFJrKKFKTsNjMWWwZcBV37x8xKSvGVY/F1R6N9jJpI+B9zcJin24rp9NsUeQZVS1iKo3QYIbVgAf3NvGl5eqVcxxb7HE0GPPF0+e50FsRcmquUJkEqaixfG26I1JtPKjaoVJkg9ayETsn25RHFeWwpNyvyFcLmHroNVE3BnBviFntiGWvVbG0FqM81TTgAnVlefdgl6OlSE2Kor+6cvNIRkJ0rYzSh/9FE/zoXTyM/5vGHwH4OvBvx9//r4H/S9wsfzn+GuCfAv+5UkqFT0qJAfp1zbJOcC7gywqVpdEpRXa7ZKcIC1gFuWFMCBgfKKdjRoMD9g632dzfYlAOGCtP0mrR7iywdOw8z7eXWGi2yZJMUA8VjSiCkAAJwhgLZtbvK2kag4oQcwDvSLTGWofTFuUsMfGeEAKJSSUqT8vMQciHQusQ83HF3sMNfvTBj9lP4aWnP8X55WPsTCa8vn2XPHguLizTThrcWX/AzcEmP//Ec9za2+LU4irh4CGtZhPnA847xqOBZLq4MAPI5LWKrxkhcDAeY7KUlgos54aVtsZ6x+7wkAdHnuAcqZHDpkZxeeSoyilpOSBRXZrtFlmSkKcpjSLD2QkuSoqn0yk6ppxtPNyG2nO0NWD/8JDV85cJ5ZSFlUXpC3V0EW2n4AJ+UktKWAgCVDg9l0AYLbOt5nKTvJVztHXI6KMhxXKDxkJBZSuKpYJZiptwjeIg2kMwGqU9+0dD7qY11kucYpCuXjiGQfSnc3aA87El+snL9E+bz2KQUusS8F8AN4CDIEU3PEr3go8lf4UQrFLqECnVdv7Ix5wnf+VLq5ytpmR1pKfM8x8jadhLqeODx1nLcDpgf3LA9nCHg+kRpQo0sgYLrR5LFy5zudGhkzRJ0zTeSpo0MSglTpVBRXluBAFCRNe8Eq6ZibEXs3Qt56w0Td4Lpdt7vI29kjaYRIiG2hhmvGftA1YHEiQLcbS7y+vvvs5td8jzVz/FV5eOsT0e8s7uQxaygqvdBRppjtIZ6xsP+MH6NX7pMy9xd7DHQqvDiVaP77//JovLHcblGGdrev0ldnbW0UYyGK2t50iVwLwpWd7AoRjWNYPRCFdPsOUUG2qy1JCnCY0sIU8z9Khid3fEyukJRTUhSRv0em2SPCUvDO1mg8FgAnF+5WOiWbtoUtYlu4dH7B8e0skKru9s8qmdbZa6PZgoIbhmYsGLkhgIj0MlCCtjIonQKlM4DSpVeBw6ES+0EYF6UGEHFfrkAtnJDtoGlJv5YIPKhGvmERnF/bv77GQR4CCWjh6UCtRWKpfaelScsYSPIX3/xpslhOCA55RSfeB/AJ740/y7P+FjzpO/uheuhMuTIbZyMK7QzRzjPKm2BHfA3e1dNid7HNQjUIZW0WaxtcjpY5d4pmhRJBkmMSTzmIE46ETNN59DIGUbe5yZpEz5iFGZmROyvLGEmUDMzmc0Wmo1cfKPWS/iI2ZmfJboPQY+iCpvurfH2++8zoejLS5ffpJfXP0su9MBtwZ7nGj1OZ31sHUV6egNjnZ3+eaNN/j5F77A1uiQdpJzor3AYDTGKbjoOtw2UmtPx0M8nnarTbPZ4eHm/fjqSq/WbHcj/UcRTIINJVolEqrqQSHolFGJACo2cOP2DkvLbfJGg6y5SG+hj7PQ6zTptZusbx0gvLjoH+wtdTnmWL/PE+fPsVQ0+dkrT7O00CNVmRBKQ0A5HQmzAZWn2GklgAuihTcNadhDJa99MEBtCUqRdnP6CzK8lCExkGjwchMJFUeBCQQr1KTaWu6NjnDtWC6H2AdrhcPjrPTBzsuczdlI+f+zcqQMIRwopX4f+DzQV0ol8Xb5eLrXLPnrvlIqAXpIo/8THwVcSjVj7ZimA47UPoNRyX4d2AxDbJ5wrHWOUzpluWjRSFLxGNazTBeZys+4Y8FL6UR0QRG6torolEYnEswKjuCiTaufvQturn1x6lF1E0IgqEQ2SyI3z8xR3ykZZBKHdsF76vGI6++9zRsbH7F89ixfeOKL1FiO6gnnuqvkWlPbCVVVkuVNEgzleMxvvf09Pv+p5xjXJUYrTrV7KK24vn6Xpf4CF0+cwe48oLZlTBDzdNoLdFodtrbWY+0tup9Go0mSJVTBkZsEkzXwlZilH1to8sxTa3gM77/3gCq63dzfOeLczpjV0zXO1aR5A5PmTKsxvU6TxeUl9nf3hCoU+zlnLXuH+3xwx3F8eZnfeKfkbKfHF5/8FN2OmIKrQqJYfSU6epUpvFPo6hEbIcR+EefFj82IZ5hAjkBDCa1fq5jsFgeJNkTABzDSu45cye1yxCSCHtZa/CyKJJI8ghfQxvmZiIN59Pu/0WZRSq0AddwoDeBnkab994G/hiBi/y5/OPnr3wW+H//8G39SvxLKKe+8+w52ktBWx2nmTRabq5zpNWgsJmw9HLN9WFJ5R2Fiz6GNwJ0wN8mGQB0USWQWRxJD/LWmCopURa6Wj5SLEAih/tiAT7QaIjOdpYARE6FU/E9mNUprfLxpZjT6alqyceNDXr35Fm5lgavPPU8rLWg3G6wWbQwZ3pdUbopCkWctjEmwkym/++PvcPHKRZTWTMoJFxdWkGg+w53DLU6fOM1SZwG/8wDvLU9efIYPb1/DqIQ0SUmShKquSNKcJCtIi6YI3LQYfZjEYMg5f7zDX/rqZc4/cYass8gbr37IN37/dQbjkrTZYjKtGR2N6a5U5A3Nl7/8aT58+wMGhyN2RiXkTUxdc+nYMTY2H7K1vUW3aPDpCxfZ2D9kEjzPX346uufruV9xqL0oFesAw4DWAdIIeVspk6gtOjXSzCtIcoNPAhjRq5g0Wq1mRij7cdP40hFSBdOAShTbW/vs51LWK61IkzweeLP5kBBCRRUrcLiwM34yIvanuVmOA/917Fs08I9DCL+hlHoP+IdKqf8r8DoSpUf8+b9VSl0H9oC/8Sd+hrzBzktf5K+XFalNSRfbAlx4EWkZrxlOLLqOpY/WcxMCtBArZ40aXkoEibmL6EaQhjGL10TwTti1Sk4XAgQjQUYBsVYyKJIkiRvHxTnNTFSm53MBHTej94G9e3f54XuvsJE5Lly+wPmF4xxrL9ErmnF8byX70U5lTpM10Chc6fneGz+gsbbAamuRSTngXGdRvjdlGBwOGQdPJ80pjMHkOVmWstRtsbZ0nOWFBe5vblLkHYomJGmBVxqdtyBUcsHGEjIvcs6cXqTRFPtSnWpe+KlnOXdugXffuMPNuztMpp7D/TGLkzFJy3L16UucOnmMh7evU3QN//zVfTqdFl0NP/XTLzOclLx3/SP2D3c5n3dZy1vkJkWXQSLH83gQGTBWSROfGkgE5nZTJ8NJrYW3ZT2qKeiZF71FHC8o/FRe72DF8FCnCp+J/Sp4Qd6ahp3xFJfnKD/zlkNY6TBfF0pJqZ4YLcxq9Um5X386NOwtJM77j/7+TeDFP+b3p8Cv/Ukf948+P8zbXDZHfPHAQWlRaTI/BVrNhIU8Y8tGgzc8Oqh4+sswygSZjsv+EXd8Nd8w4tAyE3WJeIw5OuKdE2/eJJH6V6dChUcM7dSsl1FK6CIotLKRm6SoDnZ594M3+XC6xfKp0/zs8fOc6qzQyAqC9fGaD1hb4b0lSXISk0nNbWveeOd1dnJ46eQZhtMJZ1qL8nUaqctv3r9Lr93h2MIyIUiGPBp67RZLvS6Hg0NKX9PuL4rxh6sxaSFDz/g12nhqBmU4mljKIBZKkKBNRndpgZXlbR5upxwMJjx4cMTqyQHtFTl9m4tLnG0a0obhjWuv8vJXrmKnmnd/+BorrYSvvniFpGiQ6MBi2qZOB/izTUySgVWEkYdxQE2lbHZ4tJN+0hgNqXDOQqSc+IlEsOsikYNTam6UcoQgEgcPQnydit+xzhTeRPeJRp9cD6mGR4JkKogC8nkVgVIYZC4nnDc9H27/cc9jM8G3SvHPTIuzjUNO7I9Q7baA1DqQppqlhZydYUkVPBrpEZwWdIMgcdguwogzzy5PRIWQU8Mjw0+lYlgr4h6DD6RpDklOgotYj5RyKuYTKi1T3plZeKIVo8GQH916k63DXc6cOMMvH3+epaJDoomfKYh5QnDUVYl3FVmSSz0e4eSb16/xwXSLLzzzHOPpiNNFH+tqskQUgtYqXj+4w9WTZ+k321jvmDrHdDrhe2+8QekcHk2Sp6AMJpFhoc8KJoMxjUaKDzVpls5vwINBzXAgHgfeCaKVtbocP3uM999/yM7+lHLiOXPmgOWzU1TRRgNJ0aPX7/HSpS69TNNYXuP0yl+iHK3T6RSsrfVp9zKU8FAEStcJSqWCGLpAqCx6q8TvVDCGUMnBRRAPAJUkBOtIcpn2C7M63kIREFAuiO+bCrEPQUJqgxPZ99Ry/vQ5lneHDEZiVmE+rlWZVRjxdhHvhT8Mv/9xzyePLP9XfLQxHCUp/zgrqIzDjSaiXfAeyppmO6VdJOBlgTuYh68SB5Jyo3i8dXKixvLMEPsUkAFjCNS2JqDJsiZ50ZLTxoeImclG0dqg5V/LK+uExq9cYOP6dX7nld/lQTnghQvP8oUzz7CcdSIhL956lcXWJbaaErwnTRooLTcKtWPj9h2+v/ERzz35LLW3nGwuYKNJ4M7gkHt7W7y1cZ09UzFNAr916zX+H6/8Bt9fv4FzgbF1JFmDxCQEG30BvGz0qk6onGG5W/Dc5SVOLRo5NDwcjmpuPxgyPBoT4jBTpy3ay2ucOrMMPjAaVzy4d4CryvmiVSqh0erx3MvPc//d61QNyBqwFla4+uRpev0GwUm8oPBbEhne6kSQd6OhkaFOt0g+1UWfaaCbCUkmxD2TiTOlCtGSNXoRx0kKfmohLolghHyLRqLax5ZQCXu5OpjQPbnGybVjcbAs/8Y5P2/qZ6MJCeiNxosRKPpJz2Nxs8jiVGg01xsdvjme8jPjCmyKjwTKTGv6nYyDSY1VQpCcHwSzenU+9XeSDTJP8Qrzl8B6OWWSNEPJPRMbeSealRBN9rSJNkmCjgWksRzsbvPjD37Mbup4+dMvcbK7TOlKiapLYoZ6cBBqHA5bi89WlhaiedGaYC37W5t886Mf88SnnyVTmuPNNluTHQb1gKwLa+c7HD/RYeuNfZLDlO/c/4CqdjI9zxqAwpjkEWtaXkiJ+1YKVpf5hc+/wNdOeooErl17wP7QcePBiMPhhAfbQ9Y3DugvL5LlbdA5WaPD6UtnyF+5g68sB7tjyvGEvDdTSgZc0KycOsVi6xr2/fd48OQXWQv3KIdLpA0bXVcinUVpFEksbTRKSXmrlNBc1LlAaI1x1wdgNb52qFRLZg4e48HZWGNPxLPaJULF91ZiPQjiKsksrdgGpgcVvZUepzkm72EcB0jvEhshHalQscT2zPQsj3kZFniUceIJ/F6vx1PlDqdEGyccIhXodFKyPUPpBC+P+NScPaxUzFM0wttSeNkwKBIj2vxgVNxI0uPM6JcSMeEIypBqE+cIMcRVGexoxPu33uLDvbucP/0EX42Z9yZJUJVAkrN5TFAe6yoZumlNlhVRiy+isNH+Pv/qze+zfOUMvUbBUjNnr3mfsy8U9FeOkzUL0kYTpRNOXzzGT//Ms1z/cJPX377Dq2/fYTipSFJDkmekafYoGyZqdC6e6rH28qf4t758BTO8TkBjkoyDgwPOnGixd2g5OJwy3BtzuLNH0eqRZ01Im6ycPMnx430e3N2jnlrKyXSOGqE0WjmyRoPPfP15br79IbffvUX/9Ao/vvGALzy3TChFyuxCQDkHaSoUeB15WAGUEeqQAsIx8a+2H+xDDaGYwfIaXwdMpnAOcA5fWUwrFdh/YuXvaB/zRRU6FYf/2gaSToMTrKIxYqSOi3MWjXNO5mRezbU5f0IFBjwmmwWIxaPcMOMs51+0GvxvD0ekmUGkuJ4iVzQKxWDkY9ScisPGOO2P/KC57jF+TOci4hVhwTrCw+nMgX1mHk4gDWIOPhN/WWvZvHuTV++9R2tlgb/w6a+z2OzOsXoftRw4ETwJFG3xzqKTjDwpkC0d0EFRTUu+++orcKLHhaXjtIqMB+ltLp5vkWQKnRh0koNKZSakE/oLLV743Bmee+EMn3r9NP/l//f7jI4a5M0mRZ4RKidlHp7nLyzy1c+f4Z5uoydDgpcMlNUza2R5Tqs7Znl5LJtcJWRpSjkekjS6mDSl6CzxqRee5OG97xFsYHI0loNESSy6s2IVtbjUYeFrL/JslXO4t8fe9vtUdgU4hdIJJknFj7q2okXBiwPkzAQ83taoBLXUhs4AdizK+ijoU+gIE4eJmB5qYyRbMngUJsq1gZCACfg6oL2oN02esmwWKRo549EE5wJVVdFoNVHK4GdrgpmllagwP4kc9lhsFrHQtHMVng/wQb/Pmwf3+GzlxBVba3QKvXbKwchGxoWwjjUyeQ1ONCNWGYxJgWjEF+typUJ0gJHgIa9nvUkasyhVnO2LSGm4u81rH77JXhF4+bkXOdNakn+LzCysQZLIainVXLDSdOJI0hyjEuZpSyFgy5LXXv0h97uOL52/zGqnx+88eI0Ll5uUDppeUUc3SOXEa1io/k5M9KxFuynH1zqsb3ja3QbNLKep4eRimxefP8OplYR3fniT/X6fzXsjwsEGzX6L1lKHbr9F3soZHqYkKbR7fXSSRmOHKSoUKN3g3JOX6H7jDdxowv76LqeeFR9iRYBQEuwQBegkp1U02bh+m+zMz7O1+21W+nsEuyyLUcfEaA0Q4m3thCSrIu8vIEPDBcPDf/Bdjr1wlfTUogwqy0BIJCIvVIowcdKrFGJMojIIQcv8xgf581zjkwkkgWbSYKHbYzgUlnJWFHGkQOzDvPQ/8aBU2vz5QMP0LPuRqEEJit9Z7vPU/ohG3kMlMhjstDPMXknlHN55MZWLA0SdCKwb73yBfkOI/sYBokpvZgg963u0lzcvRI8vN6m49tE7vH20ztMXnuSrq2dIk2QONUOQk87rmFPP3IoJZYXmITRaQXB8QDnPtTde5/XyIV96+kWurp7nGw/eYKce0xsmrI5r8kxTtDJcXeGtRZkKHVNE66pkOpnwze/fQhlNt93g5z5/lroMXD69xJVnLpIkCjc9olJ3GI9HDEcJlJbBg306gxHtfoskS+l2mxwcjjjcG7J8fDU6bIKtDzG6otFu8uSzl3nz22+wc3+PMC3xlLipIEvytRVoZNh57OJZ/tWr73Hh8kvc3LvF5eVFVlSNd45gDM5ZggqkkWAKeq7lmSGOxaklrp9p88o/+wOeX1vh3MvPYo710HUgJBrVEHTR6BCdRh1ojZ86qB06MQQjQUnDbEpZjWnkXY6fOMmDwyHKGHSWQyL8PZWlBG0wKsXkGSrJ8cag/6ff+4nL9PHYLEGIcEHF3iU2ZdvdPj/aO+SrKprraWgWmmaimNRWmtug8NECSSyMZqiG1MhSFiGnmAoSYqoUabQ0ElujePWHwNaDO/zg3ru0Vlf45ctfo1s0pGqI8KIM61X8ml0k33mcryEoEpPhIoSpiDandc29mzf48f59vviZl/nU6csc1Ie8P9ykDorN3Qmr3ZRWrhkNp+hEk2QZ2ntsrXCVZVpN+eFbG9zeGLC23KbVanLpVJt+r0mrWWCrQ4n0RdNutSBpcPHiAqNdzXQ0jopSCYGypaW/2Gd/64j33r/DxctnKHIvuSs6oJTnuS8/y9bdhxzt7THZ26e1phkPHtLuHwOdYbIWqBwCNHotPnfxDHW9z7Buc2OSsLDYIqmHzH3doozAe4v4NQOIM0vwoBPDl/7iC9zSLd6+u8Xr//K7fKa3zNkXL5GcXpq/V8FoQmnlxh6FOG/T4IKQL52n1m0Gk0NaRZfzz3yWd/rnUNqQphlaablJY4WgdTJfA4LIFT9xmT4emwXm1pkhyJBKkBfFd5cXeXH/gMZST67LLLDcyRhMHFYbaq8wTqbxDkjipH1mTgES/pMYcXbRRLcWxYyOKnDj0YAfXX+TDV3zhade4ExvhURJT6Jm/ZSbMaEFnlbxBrOuQilFEo01QFLBjILgHPs7W7xy8y2e/sxzPHfmCkrB7cMtJq4iEBhMavbHNculpVFZ0kmFLq3MhhRUleXh1oDvvfkQVRjyImNpZYl3bh5yYnHC2kKLbqdB0WoKN62Zs5M1CFqTNYTMaKOptzEGjNTs/bUlqu0B7775EU8+c4VWMwekVyqaGT/96y9z/ftvih/Y8ICiuYTO2iidorTMUCSRAI6fO8mNDwZw6NnfPGDy2VMsZk2cmwLIfEobOeC8jXwwcVCQ6awmPdZmsd3hp84XjE6d5N2NLV77nR/xbL/L+ecvUpxdxVdBynUlpbfyCuU0KhciJXWgffwkB8M9jvXPcLLfI92bypA3JiXIyhCL3hAip1Dum4/hpv/z5/HYLOqRx5ZQpeO0KHj2ul3ePjzgpTIQCkOWaNLU08wSDkobWb5GjO1CgDC7MWY5g1J6ETl6CplFOGfJ0gRfW27eucmPd25z6eRZ/tqxM2RJio7hR0ppvHNz1xlnrRhMIxT12pckKsGoFEm+DfPNGqxl52Cfb733KheffZoXzl+NzFjHvcG2vGU+MK0t49IyqSxlZXGHI9JEVJ+1dQwnNd989QFH44pOvyHoXqZ4eFQymtZUteeYdfScp2gUjK2ludyTXi0Rs78i9oNBiaHhrF9YWe6xcf0u1996n9MXTtNdXiVtdEFnNHs9rnz+KoO9TTBLZM0+6JSgErltXS0LUGmc95y59CS/+Y9+yHt31jl59TRLa03xXfMOHUttIUaWJLoApeIhKRsnaWY0z/eYvrdHR2u+eO40w9PH+GB7h7e/8xZPfL/g4pNnyc6soDup2CelCq+9vKch4CvP4tnj3Js8RGnFaiMnVYFKOExi2Djj3io+Nm/5pAmLPI/HZgGBC5XQ2mcsaW0MjsB3+z2e25uQpwvY4OkvF+R5yv3NEXsTG+eFXhSJyO2kg+gcrLUIX09Hx8ogE/+g2N/Z4ZW771M1Cv7CMy+x1Gih0xjPRiRNOglJ8jaa8yUSV4Gy1HVNmuQkZsYhC7NuFuUsVVnx+x+8QtlPeebUBWZZ9rWrOXCT+aDT1Y7J2FJbCSpNEubWTpV1vHXrgFsPhyRZSpYZQeJ0Qu1hf+poDhythiXNK/Hz1YrVRoPaDoRLlyVoE6n6yLRbmxSFBgXPf+l5nPfUk4rB3jZa7aN0QFGjVcrC2jkpT5QmOI8y0aRdC/09BIHXdWL46qdOcOPD6yzpI7Q+Jo4xtoJo7K4U6DSfJxeEMIP9Ba7PznSx94+gFEOJdsj43PGT2LMneH9rj9/48DoX3r3F05+5THFplaANOojPgvZg60DR6zM+uEttS5rK0E00u7XHe0+aJBHUkRttFjGRKEPl/hwkf4HcJnPtg2L+RqLgQavDrd0hV2uLKVK085ilnDNaw/qA/UmN0dE6PLKBnfeRLlJTmCTOYFw0raj54MFNrg12eO7UBc4vrpIZIWgqM0uCcuBAu0cDLKFvB3xt8a4mSYW6okAc9GOaWKhrcIEPPngHn6f8lee/QFIYgpFSpLQVg2pKcFIiKgWH45LDYUpRaIqQMLU1QWvWdye8eWMPnWqanYwQPZanoxJrxd526jzjaU0xTUCVYsYQET+ltMgZ0pmeZAaCCC1mJoBLlSLPi4iOpYTIf7PVdO6ppo1GWG4qHg7xDQohTtMDZ04v8ZmTKVuHlrMrSug9WkRVosR2aO2x9UQGnd6IhVUEQ7JjbcpmhkrBVRblFFQSj/7s2hKXlrrc2j/iW9fucOH2Dmc+d4HiRBvlwY8qVJ5R9Ntkwwaj6Yheq8dqnrBn69g9RXg4zGZ7YS4r1p9Mjn98NoueXcd4HlWWiLGAMvxoocPVwwmqEM8g4xytVsLJ5SZ+a8iojtBkABMCznm0D7SzDB0DgJQL7Ozv8YPN6yx2+vylq5+lkaakxmCNFmAhBJlNRK6Qi+VDqC3eeryv8cGRpjkmz0VY5r2YW4cIQfrA+tY6b4zX+eXP/yyLnV7UiHuJY7ATahweH2P5xJy89FDWHo3DB8X+sOKH729T28CZk11ccDgVvZ9ViLJYEXGOK0+jdKAk/co6T1VWGFVHqDWRBDNm7iwaTCI8uZjxSITfPVFx6EGb4hENPhYtWps5kKKU0IF8kHzOvJGRWcvR5gGTM6dIjNwa4hHgQZl4CCoINd5PQUmIkEJ0+clyg+rWIcHHwaVWqKhpKUzKMyvLVAsLbAwO+cZb1+jfb/P0pRO0mwUmyzBpQr/d52B0QKfZ5nSr4IOxiHp9HB0opQSIUYI2Ou/nngk/6XksNotiRmaTG2XG51HEeDQVuN5us7/7kCU/IzfKsK3dS1ktG9zfHlMHkZkqPEmQpCcTDTBsafnx+nW2/ZSXz1zmWBRV6STBRpwdwNVOjLxVpHjE8sopT6AmBEeaFdLLRB8tPROX+UAIjuHBIb9z+xU+/+QLrLS64ASuVji8r9mZHFLVlpm/WSBgnWNS1kynYgVbWcebH+4ymjhOrTbpNRXTSlM5z8ONDXa3N1g6tUjwEl09LD1mUKJNQlk5ClsxnZSkiSPJUgwOb4NkrSglG0fp6PkrX4e4nDD3D5jpOySPcUYj1HNlqELhnCVJUkKkFqWZ4fz506we7+OqfSj6cZQifjtSjsmGC64m2CnapDhbYhKD8w6z0oC7AxHZWR/b1wClvKfW1hirOJG2WDnV4mE55huvfcjJboPL58/QMtAqOmzsbHBm9SzHikwOS63EmF0RpcgBVPx6YB5B8pOex4ZICUDk78iAKNbE0XJnaBTXco2vKrwWm1ZvaxLv6HcN3dwQXEAFJwGsBIyHUAfu72zyG3ffoGg1+MVLn2Ktu4iKTbz3AeVkDhIbp3iSRhmqD/iyxk1LQnDkaYMkSSNQ8MgWNv6K6XDMd979IU9d/BRPHD+PN0ryL2NYjFaKremR3JixrFMRyZuUjknlGZWWd24f8XB/zLHlBmeONUlUIFGK/d0xNz5a5+H6HjsbeyilqerAwbhmUAaORhXDYUVd1gwHU6pKbhhXC6tAUPQZzUPo6YC8zknCfFYYnEDtopsGFEonKGVi+RSjCHWCs7XcHMGjNbz0F7/M2YtnGO6OcD6JqlQvtyuzBABp7kM9wldDlJ+ilQPv0P1CZmKZmdtb4aP2JfjYhymUSUiAtbzg+V6Xav+Q7964zvW7t2nlbfaGRxilWclziliaai2McRVC7DVnxZmfVf0/8Xksbpa44hBUJKoT44ufJDGkKEn4sFPwhUGNzxKBL2xA20DmPd3ccnBU4YOekwmHwxGvbtwktHJ++tzTLDYaOG1QJoHIPpYSQgRCotmPIiAF1LLxXF2jlQSx+rmPsvQAc6aq97hatCnqxAovnrmKSTMZ4MW3wFuHcyVbk0O5vSIRcIbxW6UZWcftOwNubw7otTOunurgnWSi1NayvrEvKc7BUU+nuNoytpbaJJTVhNJ6BoOadDjETgcsWQl61UrRaDaEKqLSef0uRjDicqMwhHR2+iMb3Ft0msfSWPpI70USoYIiBBu5eJL7qKNzjtGaxaUlDg8n9LvipqKjpFeBhBhZmI5GJKlBp4VIpdMGqqPQzYRQh0jNVxLB7RSu9jJkDqCTiIANHQfDKatf/yIvf+WzjMoxoCiritpZullC2yQcOmFP15EbFt9A4azFQ+uTupbHY7NEJCyoRzs70ULH9yHE5kux0Wwy3tmlUWUiErJx4OgszUJTJIHaS39y7eFDro12eW7tFOe7C+JbZZK5p62UzuLvq0hlKWgDQegY1OIJVnsrZhhJSphlq6kZ/VK+bu8k2fijDz/gXlHzK09/EZOlc6QnhOiWGBylrdivJzLzUYo6yKa1taPynvubI9a3hxSZ4eq5PkorqtJTVRZrPXXt4qITDYv1QQbhtcPViofTMcORJVscsbs/BNMizxLywpBYcbKfFbxGS58Wd5N8T3GAGyL9aDYw9rM+QwnV3ZhYLs+PYh9Ls2gcbiythTaD+7vU7TbGD0R5SpSCqxR0RlV5fF2SFBVJIulEKlGo3OCHYoKujI9EvECSi5LSVw6scAkf+kPUX7pCcXmFuwe3aKQNTvXPkaU5k2pKMytYzBP2htNoKhIPh1g+q9n3lPx5yGdRcvK5OXV61qvIm2fiMOkgMRwoRz4pMUUKlUN5YZo2Q8rJdot3723xysEGvbzgZ09cptVuCcM13gISNSGOw0QPMI0nUU42pRYUKVgr6V1pgkoTfJJIME6UKBtPJG4qQlWzu7PNqwf3+IWXv0SW5aB1nEMEtErwvsb5momtmNhabJ2i9Y6O/fbe/pSDwxGpMVw61aOVasrKUteOsrKMp1UsUzVJkpJkqZy0qZYNo8B7zdSLXv1wWLI3yOg0LVlakhjZIEprTFZgrUNJ9JWUvWo2gNCxVxEj8Tm1PURUCyFhhuDm86bZ6RyCjQeI3IbNVoN33z/g2SsBR4wlNAlaJShtMGnOcHefRq+GYAm+llClpsEfxl5SK3zlHgm4UlnwlavYbRnO/vW/QrPfwuhUykSk5Oq1uuwe7dFaOclaprkOsqdjeekj3SnEUt9/grMLPC49SxA0RSbronmXK1Ixo96jFNYY9tsFKliCFYM4X9YwsbA3waZD1tN9rnYWeGn1LK12E0wibGMtkCdReWeiQYHRwhXyM8jHeaicIE15Kiex9IGR0Uqc3IuJdrCWyXDIN2+8xeeeeY6V7qKc2HUkewZZTM5LQz90FZWLiyY20yEEqtJycDhGK82JlRZFpqSHKS2DaUVC4OUnz/CVz15kcUGUi3mS0G1lzFz0k1QL5aPyZGlKVcNg7DgYVkzGNeW0pqpK6mpCPTqaG5nLfHJGZZ99XQna5KAMzlrReyDsBblN3PymIc5vtEx9mUm/Q/D0ei2yQjGtFb4uIc7PZyTGyioGh2NmkRezBREaWrInMw2FkWiKSJHx3mO15XYDDj97Bp9pHu7vsnO0w97RFre2buKCZam7wMOdbZTSHG80mCUlPwKQmMcmCvXmf6EG/3+NR9CXqJcn6qGDNF1GJ49M95RhJ09gWIK3grWOPd6V7F7WGNXjL3e6jMea3SPLtPYYLXn3PmhxntRGHAudNKMuuvQrhCBIkCbSpIbEiP6bOMCaUTtmDb0KCl9XfP+DNzl56SJPrJ0Ss+ta5HxBzYRQHmdrkiRhHGwsAQX2xckCnEwrjFb0+wVFppmUlkrDpKoZjWo+s7bIX/jSkySdBX7tr36Rb33nbbav3+bXXzzBOzuOb7//AJ9IKK2ylsHdQ6oQGE5qBhPDKFc0i4Q002RpxmBnh6W8SZI35zMH572Y38VNo7WUxtpolE7irORjiWcgSFmkJwU/I14IDBsCBDynTzX5YDjgmbwk06Bmpw8QgiLNUumlnBwKLniSZsLYCkOA2goDI5EN7ZTn7nTKsV/5Gjc2b3Nt/TrHF4+zub+FNpp+u8O0HrHQXeDd2zdBBY43G2Ra4+KGmJmby+xJxSrmz8mcZUaUm+U5KnzkK7q4kaRE2GukGF3jBjV2NOVoVTFqaJbrJp20SehB0XHkLc/GdsXISsRDpFbOZxMqcoEc8aR0Du1lw+gkkSBXGTTIv7Qyh5ApttinUjveufYuo8UGP3PuKjrmX4ZK8gzxsnCcr9FRnVlGKYBz0f0wzBpjaDQzcqMpS4Gkg3dUtWM89bz57h5PLL/Pxa8+z2K/za/84sscbD9LsvOAF59e4/qDQzaGQ6ySQdvBnX2qXs0kUxweaXpFQmtqyXJNntV0ji0RvMW5Gh1LMWOSjzW8Uc6rtMxJYq0o/8ZFqYKK8xEV9UNBND3iRhh3TmCh06Y3PmLoUnq2FgKjSQk4UhNotxsQe0bva4GmE03aSYW+5zRMPd4GQhJYH1me/rf/Gnm34NTqcWEPaB03sAjVtDYY7TgajbHW0U00Da2ZSDU5+9Kij9is4vjkQuvxKMOQul1FdxYfZCEnCGyo1aNJ8UGWEnIY+zH3jtW4qefUMKet8nhaeZI0JWukpIkIe2YfEzXLnvR4JREDzOrWIGiVSVIh2IHMWAjinxtzO7yroa6htjx4cIv33CE/+/RzZEUWM2N8BMeirVIQUwiCDCurshRAIDw6IGYZlwb5QmrrKWtH7QNlVRN8xlY55fe+8xE3v/Ma1XCPgGfh+DLNSxfQyvGlz12hrqWk0kZTVTW1c9R1YFw5DiYVg6llPHVU1svNHRwqxKAlb4FZLqaC2VQ9xBofI+TJ+D4I41qg6ICPwAhx4yeEMNtksnHONjMSk+Nn1BdfiTVUKfMVuW7lWPPeoVJFXVmYRvmwk/5lY/OA/teeZmyO2B9ts3V4l/3RJntHGwynuxyNd9gbbnAw3iIxhsQYpnVFQ2n6uZk7TkrlNft1iDfNn4ubRfI5IHpDgbBkZ9oTxAzPGMPW/oi74wckHk4Ol2h224RM0sJsCEwnnqNBzf7hhMoFfBALnVmZN1uczskcIThRVzrv0JnBKyXOMf4RO1mFAEk0ZnVyMw339/n+1i2+/PyLdBtdaRZ9nHEnCd45fIy/I0QTch9kamxEvSlzB/DekQRD7Ty1h0SJ4blGYR2EYCirkpsu4ZXrI1auDkgafYIbYoomwY44e6bDcrPJbjnFNBJ8qrC1p64c47HisEjoNBydpmcyqUhSTZJmWFeDEg8zYoCsWM/K0gjoiJQZVBwmWl9JIy+hIBHNFEKl1oqAASVuklppfFDkzUX8cIC3u/Kaa4VSnulwSKubE3ygno5RLaH9ewLKgFc+zn00R8MJ2YtPsXbxDEZJlGAjbZClBZUtSXQa2QXRZ0wZ+s02B4MjWssrLKeG9amdrwFm5X9chTOd3k96HpubZfZTcLJIjDGiqxbKEYkxmP0dtnYPeGvhBGuXL1CkKeW04nB/zMONI65f2+XmzQPWt48Y14HKIadgeMQqddbL5xCyFz7IsExF4ViQaSjGKFTkhwlVzBNqS7COejrh27fe5uqlJzjdX5HN5HyUqmqcq+X70QqnHCbPZACKOMTLsI85XKsTSUx23jOtLTYErPVUVY3zGussSbuNXjnG5v6Uw/UB2JEg2DoDU2Co+PynL+Gcx6eK9HRPjAg1lLVlMqmZlDXD4YQ6bqK6rGU+5GqsreICkqEjcZGjFVrFqA+EJR1CwNYl5XiIq0txA42LrjzaJ7haylXixzNGTCU4lMGjkhu6mk4Y7B+JRRKQZnkk02qSViYLP2pPbLBsZ4rl589zMDhkWE7ZHe5QuZL94QEHo212h9vsDXfQmDhHgeOrK2wf7BLQHG8UmNirqFjNBITXN3MJ+qTnsbhZZg1+QG7j+fWIfP3aRCi5v0hjcZnvBs/BwRHPfPQQExSlynHRNiRJU7w2kASCMqBnzh5yZ6mYoeKdJ3grNXeknOvooesUaASOjR45wpuyHqzjjQ/fwayu8umT52TiraI7iAuoVCQCM1BCozHezP3HEpWKMVwMi7V1TWKEcuJ9wCUKrxNq78CDCwobAs3FBVRlefJijxNPHkMnBcpkaJORdteojh7w9JU+9zdP8t33bpOcy2FfELs6KGofIrpmmE5rms1MlIzeiREeM6RollHyqHfxnlnnPr+dUZqb737I2cunafQXCTi8Tsi6SwI7BzAmRYUgSkkCSdrE+m0pZQkMdgfUEyucVeuir5uev45MBVQJmWJr+5DFX3qe/fEBrbwBQKdokZkC66bkaYeAppEVAkAgI4C1hSXeuf0RRimOFWk0t9DzMszMcHv1SUoWef7UmyXat74KPAgh/OKfdfLXvHcg6k+Y1ZKzEVoQtaJSmJBwbXGB+199guO//SZnbCDJcpI0pQqSiR6cdHFe63m2ZJwSErxo2lFgklhfw9wONi6VR19eEHsknOf+1jo3Tc2vXLwSB3Mx8Zgg7iLOih4fR7BywupE6OchxvzNYvxUIqWCsx4dhGBoK0eVGMnQNApXBZG9BrBecf/2IUd3t+ifj0G1mbj866xFGOzyC1+7CgquHRwQnKOsFEmRUpaWcW0Z10Z0M6Ulz4WTJnmc0dDOGDFJl4+KIkqBldiAuJjYTPCcvnyKpMhRXlxxCA4VvQ+EgOEJuLjJNEnW5+hwk04jxdYj7t9apxxbxpOStFkQ6hprSpklFQm0FEwV49GUxnOf4uLlZyJXbzZQFkpSkbTnTISP9x2BQL/dZX94iAuOxTQlVwiHMJIp1exfBZGEqE+4Xf51yrD/I/D+x/7/P0GSvy4B+0jiF3ws+Qv4T+Pf++RHyRdi5h3vH/njWGOKvxhRIgvT1UXu//oXeG8tYVgO5CZCWBrWh7ka0rsaCeNUMlsI0g8lM01HBACCjhTuiMKpmfYFBbVlMDjiu1u3+JknnqORNJlFrAU1G6DG00kH8DOOmsH5Wm4VrSmS9NHB4OPUO85aAnKDit2sDO8IosPx3mOrikY7Z7AzpJ6WmKSQJtvbODPxqGrCz3/1Gc4tdOZmcpWtqerAZFwznVpGgynjSUVZ18IZi/JomXPITTv/oR/NN6Q5Fv6dszXGKHGhd9LDGKOR+GELyqGIPmIU1PTZT9YYrXyR9TsNqsmUrXt7TCuHLWf5m9GzWgV0bjB5Qgie/Upx8gtP4eTLoLYlEWqUnkMrxuUEsfJ1jMshtS0JIVDEwa11nlaW0kuj/W6crkHkIUaO2Cc9f9owo1PAXwL+b8B/oGT7fZ0/w+QvYhk2y6j/w+iE/Kwjn2eWH6kUuEbO4Bc+y0c/usbp1x+w2l9Gp0LwC8GRGBNziBwKj9MBVIjqxpn8OH6KuE91kHJrFkMQvMfWNd+8/TbPXXmatU5fLkMR7uOUzIOEjKci7czJ7a6NANQRUGgluQQPOYcKGh+czCkigq0iNI0O2ErAAqU106MhL5zr8df+rRfJ8wXpG8ZD4cE1m4BGmYBKNCY1/OzLz/L+2w/YPByhlOSVlLViOK4pUkU6GJPlhkRDmqcYZYBUoFsFkMp7EAKzdC2llZSQGvBWFqd1Ej5kUmxdQoTcTZJhVYYyPUqdkWoJSe2mhrdZRK1/yGB/QLayiK0cNiJ0II22Dw5faIY7Y+xTpzksB9y5e5/jC2t8583XWVte4PTqcW48uENRpAzLilaRcWr1FPuDPbSyNLKCp09/mmbR5HA8YKndY8lojrQnRFZHxFlRGvzcR+6Pf/60Zdj/E/gPgU78/yX+DJO/mqtrCHwsqJWffRPxzUpnE/MZaTHOSTyKJNLopy8/ye2lNpNvfMjZhTVQYpANjzZgjSUoQxaFYrX3UZMvpgUqCEUjRGjX+4gMOMePbr1L79gaTy4dExJg7GW8B50ZGULGhRWsOJcITYdIwRCTt0JnJEpjI/+KWUmhgrAJggxKU22EFJomVJMpWZJyuDNl94M7nPjMssyCGg3QDfz4IZ6ATjLRpriaXqfg3/ubP8v/+7/9HfYnY5zzjCYW4z1ppkkSRSMbk+qAMYpmVxN0ggmKJNOCaAWN1vJjZmColGiFAo66roS9qzTKVDK49A6nctZtizNNicxoxllMZh0NNN1eh8M7clDV1mGdipJtcezRmQEsOk94OJ2w208wO7vsHOzTyJqcOn4M7yu0URxfXMMkmjPNJllq6Dd6nFhco64l2cwHz2J3kY3tbZY6PVaLlFsTCY8KYZZ+7eUwiHOaf+PNopT6RWArhPBjpdRX/6S//6d9Pp78tXzlydBMpCn0MC891Wy6ijSVOjyaLM8ippV3UqY4j79ymu1+m/pfvMa5xqJQXXA442RxayP2RfjoYxybc+/iFFv4UKmXnkXmD45b63e4p2t+5ewVTJ7io3zAV2LHE2yY0/VBjPl0ojAmjVoQEU2ZJKGpCwz60ZwlqPlk3Mc3KngFVmOVJ5QVtqohSbm7fci7r63T6Re0T56G9iqJKfDeM7xzg8ZiD91ZkGa7rjh2rMPf+htf4x/8zjuoNKXbzun2miyfXKORaB4Gx77xLATNKZvSTwSlcs7Gqb2gg9HhWRAvpSBY6smY6bhEEUitFUd+5SnTBX5rqPnRR2O+eHaHJ5Y6nC8KGlpRjCy7/91rPP2rT3Hz6gs0br8tacUuxOStiKVE+cC0qTj5i1/hwqkFFlo9nj5/Ka4dD2jev3uLRpGztrhMajKG0wHbg20SlXJ84QRJ5JKdWlnmo3t3ePr8JU42G4S9ITpEqkvsThWIS/8nrNk/zc3yReCXlVK/ABRAF/h7/Bkmf+VacaZImDrP0HomHizEvA4X+xQT3Th8/CYRb+M4M0kiMc6vLjD4tZf56J//kCt6gXoWaWA0KcI9s37W6AeMBhN8DCMgWuv4mCYVOBoO+M7eXX7h2c9S5AUecZDxwvAEIyo+HyzKJARrpYcxGS5YKW/CDAKADEMjSRlWE8Bg0sDMG9jFDxmUp6wmJGnBdDIlzTKcqzG5wRrFrffX6e8c0ezcZPHcSY5u3WXz+ianP21IGm0p3RKZj5w9d4yfetHzP75yl/bSImqhwwelZWoVV5cWMEXObpoySBJeai/w7uSQZ3WBDVOqqqaTGqEIxfcgOABHNR5zuDPAB2g0C8o7nvGrO1z6313hyW7NXeP4zfeG/Lba59xKyv/hieOMXrnF1nc/ZPj+Ov2vXqB/5UX89vv4ELC1lGGi/Re42JzocO3WQwav3sT5miQ31HVNq9nh/IlTvPbh+7TbTbL1Gzx1+gnWD3aYTA/otRr02h06podSiqVunx8O3kZrxVKWSq5ocPHzyHjCeYcO+g8BO//amyWE8B8D/zFAvFn+TyGEv6mU+if8GSV/aSVBQ8pEt0evGDuPDTFaIKJlejbtVo8YoxFCi9VmQIdA3esQfvXzfPDPvs8l26XQBUlkGc90FwCJ1nhn8TqJdXiE2gMoo6gmJd+49y4vXX6S1U4fQpyJzHqZZEZzj+xjL2iRVoYEUeQJgqLFFUZJ7mWhU0yiI3w9+yYUaRxmosDkGltKGalTDdYzHU3ZPBzz45v3GY9rjqWOl547TkOnJFZJ/1Bb0jQQdAIqRZmMl55a4/c/3GKrdGx/eMjmzpT+023OnzjOvnU82e2y0JCIc5u22TMNFnSP10aHvEzO9IMHZH1DfjwH5fDWMT0ac7A5xj00lDe9mHsnhhvZ9zj5t15i62DK/u4h1bRm77blXkcz+v3XcV1PubmNu91g/4vPsDK6hY7olKQfSMmmtaHTatLOKlbXznIwOCQxOd1mQbfdpdvo8BdfXqTdaILyZCbn3LGTc/BkNpgMIdBqNCmtxXlPN89oZ4ZR7SJwFPBBzeH7P4ue5Y97/iP+jJK/omKUqbc4ZdBE+DcOWmbYvo83QQizXkN6mlmzrwI4ZJLuWw3qX/0iN/7HH3Cl1Og8QSmhlCdKzx0kZ0gU1omqJfZLPsBr6zdZWFnmiaWTEBtCfBABmH6kpAzeRZaBmFTPciUh4JSeN80qyPCxmzVZL/fndBfgEWQZbzwfPLV1QuZ0FhVqKMfc2tNc/MzTnFxb4L3vvc7DwwkXEg9pxnj3CFM00I2O6My1RqmENAv88qe7/P3vvsW71wsY5Xz6dIu7oyn9ImfqLHcmY042mjybN3i3qjhWNPlqq48yio039ln/l2+RHitoXGnRutTAbeecal3Au5rhdJtxe0SeZozv7/P+2w9499ZDnPMUnRZZt8u/Osz49a88z+Cfvk+9ZAkPjvhUOyFc/SzT9TdEGFdLxqfzFkxCkhqef+oSmD7jejIfNlZuitYeV084GI1p5B3WD9bpFz1KO0YE3IFEG/Iko99cIU1SDkcDFto9+olmXMdaIt7mgjx/Mjj8rxvA+k3gm/HXN/kzSv4KiK+T9Zo6yI0SP1gcGoUYI23mNGvhJsmkWYzBZeMYZQg4rIfQzFF/5SWu/9MfcHUKqpFHqncgszKQlKGjoFheRSawMazvbLLhJ/zSmecxqcH6GJUXTadC3Gh+xo8KQeDUNGrUXUSSnGjIhfYh9JbFtCXljFfz7182WJjD2JVzpGlKXVYQNKoc4csRu2qJZ579NEvHVvlqt02/2iCdwODGbVSAajgmX7FxluQwyqKD4/LJNk+dPcY2ms03R3z43S16icPljp87fYxO3mAjzymyBnmi+eCVW/CjDRaeO4EfTAmpYbpXcvTNIw5+NGU5aXDmdJvt/+gLmN0pvX/0DgufOUXv+VME7XmqTnk4lgOp14CWfkD2pTGr559h8kHJ5jfeJ98c0HvqPLfvv09WSYNv64osswRnCTphMNzgo8P79JsdNvZ3OLdyEhumeG/IEx1tdT2dtClJayGQpRnNTPzVUAqlDcf6C+wcHLDY7nEsz9mIWS4z5FWImBFA+gnPYzHBD3F+Mnu8D9EMTkobhZIT1sdsjjjkUvE0F9wvzBt9Zsle3uMbBdWvvMSH//33uOpWMWiMmbmRxCtfKSpb472jleUcDYb8YPsOX7/8NFmaYWdDLK1xLtr+zBC5qHiUYCXQKpppRNmwyKMFQZIbDRa1CNIkOs/FwWhsbGPp4AlYLOBQPlBPS2rrOLG8TNFZoN3sceKpS7QRh8j704rxvT2KpQ7eVphMSKVhlmGJ5+go59xCzkhvcfKk4sJSm+WVPpdOHGecJKzlBYXWnAjw6r/8LvbmDtUHm5h2RnGpA6VHbSiWioLTv/ocjbTBcDhE9XNaFzrs/9b77P7uexQXFvmLv/4kP1w/5OaHE55/puAXFzSJvYs5eUjnzAnaF15msDfm2/sVx1ev4Ic3wQWJLHQ1Oi3wKtBtNXiqtUyaNDm5cJzUJFhXo03KaDoRBoJKSIs2w8kIdI2znp1yRJoYFltraBTHlpd5sPWQK6fEwEIdjFBGz0GAmeTgk4aSj8dmCYEqQO1n5m2xgVbRsEIJYVxo13LKP2ILh/nHeORxbKLJdwwh6rTwv/Q57vzTVzjfXROVojFoHyTjxSiSoMS4zjr+4N4HPHfiLAvNluSMzGyaIp8sRIoLsfxy0TlfJRqPJtUpwcr/E2qCjZNxJ5ugo3KM1jgfSX3xpFBa3C/nF6sKJIXBlxUBj0ka9Lt9unlByxSkuoNWY1SwnP35L7D147fxgxKqEvJSPoYG3ISjvRG7m4ds4mg/t8io1eStsuTsoGTwYJsqJIzQPNHp89Jih0tfeJr9++/itqZkz3VprTWZbE5wd4Z4D4vPnKEIhhN/7zsc/7svk//cVXYvdXjwcIMPd7b43n9/nfsfbKKOFL/1/gK7VwuurB5wvhOoeJ/F9lfZI+Xh4SLLqo2zjqr25M6TOEkMMFmK1oEHD25wc29Ep1HEEEPPan8RrVLKesJip8e0ntJMC3pFh1E1pZW35kPqEDyrvUXevH6doGA5z4QdoaLB+6wvloX0E9fp47FZkIk7RHr4jA4SiCewwHzWB6yz0VVkhv07DHFfzWTB6tHHCN6D0dRLfUZfucreN2+y3F8kiTLYaHMvFBtjePXuR7S7HS4urko/4RyodJ5HSaTOKIX0Lg68r0FHDb8OeGqUCYQgsLIXOy+pi4Pj8PAwfm8KFUM/QywFQ1QYokUam6RGvh/nUL4kKVIu9nqstVqocgK6gXMVClh65jLTjQciIbAl3llMZjBace3BkPWBZeP+EaoVyC4ams2C3/mDmwzKQCiamEbOajfHnMxoPNzHfdYw+eER2fdKOmf6dNodWo0+VWk5vL7JzuubHAwPWUhHvLa+zjfu32NjY8zGfs3TT5zlK195kf5Bi9+8dsDPfr5kp3eSC9kNmsGjkuN096e8urdP4+oq9WGBsx5b1SS5JWtERSVwbLHJ8so5pnVNM2/SzHK0NiQmnfsVB0QBqxR0GhCCkgMq0oy67TZlXRKcp2MMLSPmIImRG17NGOaP+80CkEV1pAvSNyRzIU4g0UosVFU0OyA25Uri7GQRygYJhGg7pOaT59mt4584y4Nbm/Q2xiRFC0XApKmUVkZxd2eTDTvi5889OzcE1xE8CEEyW1Sk44TgI9Uc3EzsFctJkXeJnaj0ObIhZ9/bvZ1dzEKCqmSmK4fZo9tUzSgFUb2YFvk8KatG084aZImhGk4JupRBZHyt0qU+uujgS4eiwk5LlA78q+895P4HR5hTHdaOJ6y24Gh0xKefafOjV44oncWkLc6cWOCpL57m/n/+Hfx7+7QuL1Pd2+Pg+gY7+UPM2MuJfu0exz93guWvHWe7nfKRarJ88Rlu6IrmcYNZzmmvFhxQoxYWeOUh/FLf84E7y1nVpat6qHM5K124tltxRWfU1ZS6tqTWYutKSuYko50rdNFFqwLRLP3PF7SKszgQJMz7SMKM731qUhSGwWREr93hiWbKfg0HVkwdGzow8vF1/wnPY0HRD8DUOSrvcPGbUzMoOFK9xYUx9ilqpuN2EPk9PjghTMYFrtWjhFoJeYpS4K99mtv1QLy0vKd24pU8HAx5dfc2X7vwFEWaRZd3gaRrF/2ugo9CLuLQVLyUfYgalhghpZlNV6PV0NxAUFGOp+w5x8m0G5vLGdQZdRWR0PiI9ymAQpKnuFbBIE24tXebo9Em5XSf6mgL6omQNk2KyVso3UAXTQEavMX5gF0w9K8UrLgBJ97d5YQv+OLZ05zoNam8lQRg63lvt+Q/e22HlX/vS6y+eJns+pSTT12i31yh316k312gMBnu5g6vnyn4veM5v+Vr8m6fnz95jMS00Cbn3pFm/SNLt2841XW8+aDkt14rcRue//tvD/jtD9/kqNqjAv6HP3iPddcgmATrAt5KiloIdj4wxk0I+LnkeYZizofTcafMNseMdQAzeyY4sbLIztEBCsVinkrceZhZIM2JLz9xnT42N4sIvmShiToxPJqwhrg+4xBSRhNybYaP68VDmL+IegYa6NmUVpARXxSUX3uSvd98n+X+EoSA9ZbvrN/khTOX6KRZHFDKV+KcFaa+in0LSM+Em9PstdGo2srnCvJDIzQK52pQcVrsHNtHByz2eyy0U25Mt6JxhlBcZltqNvXHAqlM+b0Hk+WYPOPN6x9i1xZYq8Z0Jtt0T3qSVk8YvzqTWy/2exBwrmZtWKPTNqM7Q8rRlO3fex977yGHCwuEyVgYC2NF9XDKGwdb/I7+kL/yt0F/7TRJr4l9O3B454Ci22b5hGHpxB5v3Rnw/NXz3M/lFvSlo17fpBxNyUPJOwdDPlI1nzrXpJFWrDzRY7l9mX/nqbM82N+naGmaWcrzX3yW4+FNlDFR1epxtsKkKUoLgBPcAJIes0rBfwz2n2VFzg7ZMDtkwyN4OITASrfP7fV1Lhw/RaJTtqKVlgKGHycH/oTnsdgsMr7wc3O7j5dZLnghPMKMMIbyfj6MFPpF/P34nUpjL4TL4MUPaqZWCABXzrD95l06OyNMXvDeYIf+Yo8z7R5iNSeeYUaL04w3CuflbUh1MkfSpM8QzQoKETH5QDCzNykabQRFsOKHfONgj0vnTpG2IGyGqM6b2YeKGE0WuECbRiu8kfQyW0OoKu7c3mWyv8slE1je3+OCtzRXK5JeP9LMRYeC99hyilcGN7VMbz3EjiYE5zna2aZWQ9LPKJJ6QF1Pcbbm3IVF/v2f3uHpsyWNZpveZzxlWKJ/5SXSac3WP36LpZe3cGqLrz/o8cZm4HubD/G3R3zrjuWkCjwxHtPtNbC15npngVtVj//wa4E/0F0We56xanFtfcBXOo7ivS2mNw/x5zSYgKslGjyzLvLtpJdTYSxsDmb0p0c3ycf7jBkZU4R2Zq6IVRERe+ujGxBgMUux3mFMNof+9exU/gnPY7FZgD+80EOcO8QN42fNb3xhTIzUE4QK4XrN7ECj630IQPSgDt5FMwJBUqwC9dPPsv1ffpvy6IgdP+Bra1elH3IVtVXCI4scMufc/LTyXpKIjTbo4DCJIlXRfywSD4NzhBC9jUNkVgWP9Z4tar7Q7VNrS25SplEI5YkOKy6+2d4TjAAYSYhMZB+oBkeU4wOK0GTLwEfvbKA9nCo9XQJJuy20mzhncc5DkrLnNEeTCmXGqNSxeukUJz5zkvcCPHP5DPevaaZ5l5d++STPntqkqe6Bqgl+ws1ywqvD+zxhU9J3bpJ/5gKts4ETjTbYJpPuCb6VOVafbfK3tw9IXrlPs7VM43Oneem1e1zfsbz50UmOPd3jTm2ptePL55u8Oxzy2UsrLBWWRjWU0hlRpQqp0qGMR5sEfAVuhE66kcPn5+jnzId5dnDOUDBr7aP5iYJ+u8u4muCCpZtmLOYpVSx7BTAKM/HqH/s8NpsFJTF3PjbBykWT6tlMQytUmLF445yEj8PF0kPEbp9ZtEA8h0AJ7dvMXtDlHrvPneT+v/weL527QKhqKj+RFN80QesgfVIEC1T8eC74uLQ9nW5GURSUR3VE5aSHEa16mPsIx0uQg+mIZrNBYRIyY+ikDSZVKfoXIoITXUpMopntbxfz4VEwHQ6ZTkZMCnC9JttTz7feuMdzgwmXphWLZ5YxnSYmTdEEsnaXoFLWVgr29TJpdobljw7pTFKa1y0/8/IZfrNZsXKh4v57G7z99g7/4O0Rp5LXGVaQqSFVXUF2hqr7KTbXpmz+k7dZ6xfkxzL6p3f4tYtn2N2fcubDdfZ+eIO6tqi9I7IPdyjHI/be2sRt93jzRznbexP2dIunP3eBlaWME8mE3lLKdHNISERG7J2PpVgteTmk8v65IUG15ofmxzfJrJchMiVmr/oM5AFp8hNlGE2m9Jodjuc5e2WF0Xo+v/ukJv6x2CyKGdLwyG9W2g0prWaO5zO4ddanBHgU3Bqv0NlJotUsvEcGgzq6vAcgNVr6h1XFiX//F9i9scPRvT16JfS6HdJ2iyTStkkkP8R7R0K0aQpyQ7WaGa2mweaa/AiyZsZwZ8RwXOI8BOeEfxZqggvcP9zndLsnsd86YSnvsDXalzt1PtgM8fuVGGytRXWZaKGbKzvFlRPKKqXX7fPCCydZv7XPGx9tMh1NeCY4+qeXCO2CYBJMmlHXgcHuHt2pmNdtJ0fcGpe4Ow8Y3n2Hg7LCtwxpq8Hed3v8d/uan//CFX75p1Z4r3GK02bM+XafbX+cYy9mnAuB4f09dj7YYGdzlzfe+za9D7aoPzxka7lBcSpncm9CdesBo9IyCQ6zr/jsV89iV89zzy3yleNdbr55j8PxhIPBkI6pIddYa4Wt7UTSEJwjaMm1CW5Ikh2TEvtjtkViKRXf+zgoho+thZk1LbDU67O5t0Ov1aGjFUcaGgoyLUbnn7QhHovNAkhpFenysu6Fhk0IJJFi8uiGFKVbMnfc1zEUSBys/LyfiUnFkc0qlPzYGHrHsSeukjUaqGfO46ua/Z0jtq+tk97YojN2LLbaFO2m9AGIfZVSTnB5FzAmwRhxpDGdnERD1s9IdGA4KglGmnrrxHPs/mjET60dE71OgNWsxfs8yr/UxmDrShAxL+WI9WAS8VMmeAqjmKBw1nF3c0JR1Zy7uIw51WH3zh737+yTNwxJ3SRb7OPKkt39gEsa5M1Ao9dg8MQKt+5vwcMJduxR6SInXjhD8eklluwih3cSbjdyHk5SrmZD/uEbgYHb4MXld3l+9Th3JjXDXcviqUVWyxWe/vJn2Pj+Og+u/ZCQ5CSHmlbZIDveQt0fkwTF6otPYT6/wrDe5vCjhG/f17SLHmMzRqmUSe0w05pWM5mL7VI3U3GKPaymxvsKZQpJP2B2YEr94NzMI+ARz24GKYuPgKaRN9k9HMCpwFKecX88oTIK6xQuBD4p++ux2CyzKWvwcsMoo0VvoKXxTVQgjUpKh5wYBhkSJjq67BtxUvQCPT3qd7w00YmendzS+BLLLeIE1+QZ4cQS2ell+Ipjujfg9ru3Mdc2WXQZC/2uWCV5T21Fbeiqkoog9q5WofIE4yHPNA2Tkfoght0OdoYjVpebnGhlZM0ElRgu95b51sY10fCHgDKzwaT0LNaKHRAWtA8ok5FkhuDkz6al4/b6kAebRyznmtXFJg8PRjQ2G3Qc9IocEsutuyOG0wE6b1FPS24Gx+XPXqG91+b1LU3SbWNbBU/1+uzmIxbuHbJ/8x5va1haS/lcs2Rse/z2tzX9r+d8+XyHuuPRNNgeKO5PC/R7ByxdOUl5/xClNNmVFar9McXn+uxd2+TB2w9Y+foSw7TLieYe1fAGR3Wf1x+OabcnLOuKVEOqNYmWWEJnxaTdBU8CMipwQ5QupBoxM+b2TFMvKPMMYk5m2ZfRcb62Ne/cusXPf/7zBKXoZRmNREdirjCPP6FleTw2SyA6AxHII00/6Ec0kLl+BSgUpFoawRRoGulVMpPMMyWNllAc+bhxk/lAGQJT55BsrRAFcmpuieNC/D2tydd6tI49B1/xHN7eYOdHN2itD1hoNcW9PkmYTksSDEbLZ6oGHuMdmRf9fhocQQeKkHLHT3nh1DL9OmDGFmU8Z9IWRZIyslOUAjsL05GBC8Gpufw1EGh2unQ6LbYf7oB1VJMSW9YcVZZy7NnYcbSTwPL5FSY7I/LlLplJubVzxKCfs9TtMd3bwk2gdzplMjgk3D1Am8Ak0bz6BjhdEUi4+nSXhWNtOH6Fl9ZGfNee4G9fzXBHr7FeNrk33GJv3GF9o6AR9sncDru3bmDLmtxojg16pMuG1488N3NL5Sq+/uYrhNNnOQwt/sLiSW48LPnqc2e48d4rDH1NhidLNHkhBouuttRVhU5SQkilsbdHkC7NWd9hPjCuqKylrEqqumYyLSmrkmldMpmWDMZD3rt1D2cVF08cl4oFRTdaLc3WiXncG3yFIjOaQkFLiwhq5B2WOINQgVQHCqVJlYjFkuBpJYZciTQ00xojoxQhUM7GLLEPsgqyAG1j8EGABBc/u/jngtWKKvLTDIpEQUg1vcsn0FdOMt08YP1775Ff32at32OcpfgqIU1y8hQKE4dalWQ0KutRXuOCZ9yBU97gbT2XMbfJWG20uDeusc7GnuoRMOCDw4lCGR8SJsMjThcdOu2MdjNHuxDZuZ6JjTmXJqHZb7C9PaA/mrLQ7PLBh5uU5YCDgx1CeUQy6vHGP3kFO6y4eHyBpalnrdVheWmRbtVl2l3kjeVzPJwY/M4h//zDAW/e+wBz5NAtUKcWSRZX6C71SVyF3ky48nMvcPfyExy/t8VBoyB75iRff6LLz2WKA5vyP62XvH7g+OurKZujAWdOa365aWkauG4dtnYc+YpGISGz2jhM4gjOMT8qVaAc7/H7P7jO+tYBg/GI4WQqxhsuhu0aQ56lNIqcZrOg027SajZpFAWffeppPnPlKnmaoRSMnMWFgAnCodNz9sQf/zwem0UFsjgXKcPHNPgRTtVBGvTZMwvNDHHBK6Ww81QrPa9llYrafe8jAe/R84eiVqRym2tkgtYkWm6smZevUZAeW6D/V3+KsHvEzjffYff6OiudHsdX++RJhvWBzGuSRoIuS2ZV9bCuyduG5q7BGzHBDgRUpjnb6HF3si96GUJ023c4Jy4pgRxXOZyqCB52D8c0spyzqz0erI9QGpy1WG/x3jK1MK4c+0clp6wYg5dWkSUJZdZhrZvQH8MDs8TzXz7Pi9tDzPaAdtFiaWWJ3R/cohit89z777N5vMV3dZvp4gqme57O2Q5/+6UTlAtHOFqsBsv6aA+rj+gWGe+WmtdPn6fW0G7kpC1NPRpghiOenQz5vVd3+Y33D7h84ST/xavwUyawN3oPYwQIqWvHeFLRbqcUPuCczEe8dWhd4oIizzStwnDpwhk67TbNPKfVbNJqNGhkOVmaofWMTazmAJDS0tPMQKQ6eG4PB1Re4fGkAfKEx3/OYpSKpgYaG/wfyiTXHxtOWiVmccorCgM1nkIpCo8E4EREaebG74MXV8U49bdByrHZoDZaX1OFmXdU5JDhqL3Mc6ybDUAFLEi1pljqcPzXvsB0/YCH33iT8a17XDl/nMWikM1loxrTBUKi2HYTFkkl/VjeQ1AaHQKX8z7fDqI7spXU3yE2tcF6XDKz/AGaLSwJXhu0ShiPK3TwOCRjRhuN15qjScnxkz2yRsb2oWVgcwZ7JQsnLfdKTbPVZenhHumrb3In0Zz51FlCUvDwuzfJlCbxBScGGVdCh8//3NOkP3eGXktR6yPubqc8vK55ajXlcPqQ549d5870JK3mFs93pnz7+pTpzj6HBy2+fVPRSDMa3T6HaZv82BqDzmlWryyyeMqznI4obpVs3fsAowNOMc+jCdE+17no8UY2d8b80vNXSPJTUq6HRwfkIzca5mjZTBmrvMysbAiUbsrN0YiBdZGIqRgHQdE8P/l5LDaLBhqJLFYTFNpDHSBBpuQOISTKHFJ+r3aKBMgTFU30xDxt4j1uRnVBtNVBa4lTQ1J8VYQUEyV+uzbIAk2UfCwfGQHWCV14Zhhn0FTOUhmNdoqw0qb961+gvLnF/g9ucrxdgP2YtVLT4CaWnaTi6iCXuLdo3aoU+NpztujTTnNGrkL8tnyETG08VSuhbRjNZFixeWuTrFHwxmAXXdc4JzqcABACk7Lm+6/d40svnKHTa/PR3pRat+leOcH29IiFrGZ5uU2vtLRv79NPG5x+8gx5FXjw3gEjZ6HhUN5x4Cp6G9cZ7sPrt3Y4bTf4/RsNqqnitbIk73W5+NVNfvfdKV/jAZ9ZUZjuszw8vMpuY4XmlUU+f65FlmZ4ZTizW/KfvbvHubUe51JFYqd8sLGLDkQF7Gwoq6JQLnK8kKFvkhqpCOoD6nSNSW2xKNlkzlF6T2kdpRdTdeJhJwnFkbAaA2VrFLV0/3gEOKqtHNQ/6XksNosnUFkf5SuaTAdypcBDpeSbyrQmRXBwozRGazINmdFkiI2NV1p0McHjYtlW+0duISF4AgrvpWfR81UNqdIYHTlgMxQtiIGeVtFu1cgmtP7RXMcpTTi/wsMzq2Q3Nrg0qPB7E0wmuvlxaVF5oDlIUEbhrJcAV8Ey6aiUC+1F3jrcRGtFWYpnsvcBcbKNwaBJRj06IOQL6EwBDjePERQ5Nc5jFCy0GxxfadNsNpiWQxYXPcoPGO8PyBLNONSMrSfYmv29TT74//wGBQntRorxCWohZX3rkL3gGb+2zenJNf7yz1xgd/kzdE5lfMYY/n//4gY9ptzbPcUXn+nw3JVAmpe8nE+5Z1tM7S4PyoxvPhiyMFHcyKZkTnPV1+xujGh3cuoa3nzjdY41BNgZ+xpnYTSuaDUzZupXHzyGGICEh2B5d/cBd6qcGHsl3pmRm/eIYOnFMSdAomKcCfJ6ifJVz7llRFPzP1quf/x5LDYLESaW+AMxz25rhUkUUx+wQTZPM5WcSR0XM3HiXwOV8/KzD3ilxBNMR8/B2JvM69cgXmQCBgQIXm6ZyDXzhKiPkVtmttkIPjoZysBT5M8BlGGsA+9eOcHGeMJzB0MWDybow5qD7X2W0wJVeYK2mNREGofMjvDwTGOFd452sPFG9F7FbHoZwGoFRaOFdRO8rXHRQSY4h42IkHNu7nbTaaY0mzkoxfbWPdToIS4/ifNN8pBjdcb65gaZgobOaTZSQhKoOymuYWivduhfOcuHwx7p8iJPPZ9SPrnKK6+lXFgb8lNPF7z0dz7H4eQW2/uOo7uGV36rwZnP1ejpTXa3f8jULeJbz/OPN3JGqsvf+NpJfm5pkc3zNQ3goC7pW89Cp4ez2yRJQophWnmcC9RWMjRzmPtNK0E68DhOZRMe2AbOC4Kl5r2J/H09I9wioEkdoCbmiapZ7N8jqosw22fT/j/+eTw2C5AqRWEMqZbyKiW6viSidsR7dOxnbJBrM0TLpNo76hDwKuaOoFCR8DjrN2Rde4yWWyrKUsQKCdHSzNjMM3uled+kFEacWkm1kZlNXPAOiboIOhC8Yrso+NZSwrPNgosrJdMHtzjXWkLllfRTWhp4gokEQbjaWqBZFFTlNE6jhbFgXY1WmqTdJmm3yb1mOjokbSRC/6hriEG1SsWyxRhazRQI1JVlWFuaiye59oHDj7bYHx2SDdqcXNRAn+1pwjDNWDq+wt/56jFWFjp0F7rorMHvPlD8s1sVbw8VT+0o+mbMX1gbc+1DxaR8nzPHxpBfZeX0kDtulc2dJZp317hw8TTLV8+hu01239jgn72/z+5BxTvtIVfyJgTomkSslp79NDuv/Ba9JCWJhxWINMJZJ2COViiTSP5l0Chr6SYVp9MpN6oGRM6evH0CBcteEPRGq8DMPk88GuT0VCpg/cwZKIq/PmGNPjabxSupI20sJXItDTBx56MM1kn/UoeAJeC8oGEeyRQhCAc4IBNy9zGiXYhUFbnKH032Z0hYQE6iMH/R5cpWCDSdaA3eYYJHB1FV+kgJV/HzEbNYqjThjbTJ1pEipBNWlpvofh+Va6baY7KUalJBloKTr2Nh1Gdnd4+Ao54OMCaj2e2SpBkmEdNz02gw3rhJs1cIEOAkKgNkKBqCsJSzRBpe62CwtcXBwZDl5glq6zF5j/bxFQAGe1O2OudYXFvia8+tcf6JLnY8YP9wxODoIUtHY3rXDvhoZ8p7w4qmPs433lnk2keHlImi10/5aO82v/bkLis/HNE8bLB9d4t7/kO6L5/lyb/zeerRIefOBvK+51454am8hVGBuqrJH26RNxTZwjIqGZEpw7SuKWsX3xOPtZZMvsO4UJDDwQTO5VPWbcE0zG6LR9ogWQORFqPi0DeyOlBqLodI4pBNAplm2sw//nksNksAKhdQRt5wDVgxKAbFfH5ilORCViHEG4U5PDtbtj7IxF55H/MPZy9LkOZeCa0hnuuShaIeUbtnPUyIJ5pRCuvjFHmmr0GsTEtC7BuYgwLz0CQM662U+xfOs/XkRWHOEqT8AikrlEiHnbPoh8cIN65TTUuyok3RbKGVIQSNTg3VdIJOM0wIjPYOaTQlWkGTRM8CAUFSLZFxPihqr9BZmyQbo/WA9NgxapWytT8iVYo8Af/gQw42NTfrDne/V8rt7QJ5I+f4iUVeurzAXmuN5z53if/mh0dcurDAT7/Q59rNCdfXJ0zzAZfP9hls7rL3gw2WnzhJCJq9l89yx1r6l5v871sdzhZNUp1gFNTK0FUZ69d3edg55MLlJ9i/8SMaeYJCMxpZBkcVRSMnsw5b1aS5mFloxG4KX9FOEi5mEz4smzFjhXjAxUm+Ela6VpqER+rZmbBQMRsh+D/sRfcTnsdis8z4PMLtEWcX632c0kKFoFgmTvVtADBY/+iWACJdX055YwwBL/HbEUacKSlnL+QsA9572WzS04RoTO3FDD+WcCZuLMEE3Mc2V/wejIkEpBD/C9jKcuriRUKWyNfmgtBWZsCCB7SgNSpPcSFQNNqkaY5w3gwmE8qGtzWoQN5bojraEUZxs4gyAilBpffTkg2pFaOxABHNTgufdNmfdNh+aNnfnbK0kHJ8TVM0a0aqwbo+xl/94grdPKHfb9LuNknTlBENvvutQ/7zNyxJq8HaQpPPdluUFyqGhxa/M+a/+h3Hs72C/qU+d585SZU3uXqqR1G0eL6j6agEbTIe1iWnU01DOlPcVDNeaHD83Bke3ngXXU3xAUbjmp29Ca1OTtGUuVRdVxgM2sxsdWvquuR0kXCvzhn7NLK0Z9WEirJiFflhf7iSIFYSBB/7nUi4/ISp5GOxWQiB2oNFUKcklj7eCfynlJja2YhayBqXW0dUu0Ko9PFjCWFSTpCZ9gEEMREYMea0yyeXun+2+GPTL44fPm7cjzXkMDdJmN9aIeYgzr8W5JZpNkiM8M+EKatijIUUFUki1q5GKepJiVIJWZE+KhsSIxvX1SgV8OWY1uoSdrCHrSYELc6N2swo64ZmoyDPDISU7Y0po2GJN5pxpbH2EONK2rnlcHeb6lChVns8/+I5GtMO75YdfuliG+MdVYDRdMrwcJ+rWcUPru2zsqSori1yq7vEM70Oh2s1+5uGnz3RoZWV/H6nwGcVo8Two1e30WGPhabm8rGMxW7gGVfwg1HNpq95oYTyZI6xE8rgOHb+CTY+eo3aeSrn2dqbkjWGNNsF7a4WgimQmBxrxbRQJxmpcpxNRnxQ90RNakSwJ2vGRDFddOSZHYb4CPrILK2euQXNaOk/4Xk8NgtEMqHAey7qETzC1pVmXEEsw2b6EvlbcXaBnP5q5gYZfMQUVbx5wrwHEVZAdLg0j7yI44EjtxtxUxDiNNjP0TNJw43ctXhLzGTMM4UmEGMM4hsRPrbJZqdXRPaSRPqzrNUQr2Vby0ZQEJzDYSFIZIaejOicPs/u9WtkKKYAhSE1iixL6XYa9BealKrHW6/9mJ2qQztLuLt7QDMrWF1tMK40JA0G00AnswyufYuj4Lh5s8t3f6cLk8DKpS5Pnl2h32pxebnLzz1zBd9IOdbpMLAlOlg+GO6w+3DE378NXQNfb4+p39riYcNS/fg65XhK84mT6E9f4Ae7nr+/qfCdLp2VBZ7+zBr1wxFjP2TvaMDJM+e5/s5rJEqjleFwVPFwa0ynMyTLDc1ug4DDu1LYFUYjGv2SEw24XVVMSGKGjIp+bbPWXd6jWU8psg/5PZmfyYxvHgDzE54/bT7LbWAAM/AnfFYptQj8I+AccBv49RDCfsxu+XvALwBj4G+FEF77pI8fkClqCLM47+jiEksm5qWLLDj9hyj5sxcCZiEnJiolZ4iXihQHWaxyCwUQv2Hv4g00o/LPFJWxjwk+lm9InWce/blSQCwX1cfRlPh1PTJVeEQGFUq/zIdSJSigQk44kyb4siZo2STWBlABbcT/GOuoqhptLN0z5xluPsQeHaJCi0a/Rbvd4NyJFRq907z3w2tsHB3h0i6h0SZpa8aVoSan6BraaYpqVpSjQ053M6wDWxvW7w2wpaFTrHDi/HFOLPTxiYHUsNYoOPSWew6+tLjKZy9m/P76Hp8dTDn5/h3U2/dIJ5ozxxco91fEm+CW4czPXeTbkxJ1zEJiqJKM3556fu2ZHrz7kK3BEc+ePo9XObYei8DOBzZ3x6QNQ7eb0mhmkBhcPcUUTYzJo7d0TWo0J9IJt6r2fOD8R9f8/7+9Pw2yLD3v+8Dfu5xz7n5z36qy1q6l9wWNbmwEAYKgKUoiaFKSpVFobJmOsOSxR475MJI8EY6Y+eSZiXBY3sZyaEZjhSVTC80gBEgkAUIACYDoRu9LdVXXvmZWbjfverZ38Yf33KyiBIAto4kuRdTTkdE3b97K++Y973PeZ/v//1Osi6+ufiTCsK0QoQ1QkYN+aDnL573392us/A3gd733/4UQ4m9U3/914E8Ap6qvFwkCRy/+6F/tq875dOv7A8wH8IecImxCdxCfhkrUfVWv+yoiB5netBzp78sXqp/JKrmXYvoh34++DO8rpyXNKuYV05AKcUBcfkBaPj3Oq1NselwJEU6+WEAsBDUVBjWFCOwx9UYQH8XZoCvjzb01lYEgQwkfGm0mQ4mCmdVlytLSiDSPnznL6vIcUVly4duvcvnWHdL2CnUcqhxRjnqUu4ZhalA1yZ7WOBNC19Ozx1k5vswcgq2dHQb7Od2kxdv7A9ZnO8QobqYp+96yVG9SFBnWFDyqNGefX2L29h63vjamV2hE5vHXxjRETJ2EVqPFbGeeU6cF6V6OyXJaTDhSu8jVvmI0GrItHTrWLCwfZuPSe9XokSNNS+5ujdhZatCoRbQXJLWkjjUGp8MwZLgslhU15pZsBwm86hK7g1DZT7dZxR4a+JSr7CfkuVNYx4+wHycM+xLwuerx/0TgQP7r1fN/r2LO/54QYkYIseq93/hRv8zhDjZktQ0PTgaqJF5V8ngCjxZhGnka2oRyYODmmtY7wiaW4YQIFcRqvwfYr4eDbu+Uhzh8rr6S7QP8VN47LEbI0BAN2jFVXd/fo2eKVFhn6MnIaiZt2oUO/aNEhX/jpn7sYW5hLggBVTJzBzNktmrIWV8VG9zB30AxJvKGcpjz2nfu0tGCMytt0rjJ23mNxU5MoxEzLhzG1XAmpRYrOp0m48JT5IZDaxpESbezwHK3xUtnupyaFxw63GQSlYwQHKnVWavViaOY2wjm4jY13WDxwg6yLkmlZRKlREcXiNcXqY9K9l+6hpm32M/N8drtt9i/tUt6Ycihpxb5Dz+hefLwkJv9YyAkk7LEOsOhI0e4ce4dYhU0NaUS9AY5d7bHLM02aOQGG7mAdK2UPKWXOJvTkIYGBX3iUEae7gI/JcEKN0QEeFlRAFe7yVWyJB4+lATfA78jQnLwtyshouX7HGATWK4eHyh/VTZVBftDznK/8ld3eSWQ6nl3APgScMD+WD2s2M7vHbFKClwVwoX40x9g7IWUOKZsKRzcaZiGWl5Moe+hcmZDbT+SlXCP98EZpt+LaX4EXoaejQOE80Q6MPOr+z7QqSy5wx8g8CyhISqnJ959sNe5xXlUHDEZFWBK0AoVhRGMUK2ruMsAlEcKW/FAe0SthmpKjNJcpolzEXk5Zt/mRCWMMosaG5YPtZD4MNI+SBEe6q15Lg0m7G9sMN+r8dysIkoijsR1FmZnadcb6KTO7bLgqIqYzXM2bt7l19+6yv63ztNd63LyxVPUjs6y14bvdya0bU6jNqY5zEj/xfs8+ld+mssnl+n4Hoceb3JOpaxNGlwbhMHYUVYyyVNWD63ihEY4h/KCSCmK0rK1NaG/ktKdq+HKCKESrA2KCvlkgo4jpHB0mDASSSD/IETN4ZSp1KQrgKGS8uDUdm7K4hPot/gRp8sHdZbPeO9vCyGWgK8JIc7f/0PvvRfTLPsD2v3KX4cffdTHwlZVrCr88dVdv7pDSAjjHBVwy4gweu+pZPBkNVHsqcgtwmiKPsgfph37KVHflLQPhLdIFRwzUiF3Cror/uC1eHHQZwlTzCEGkDLMHblKcg/BgYKZr/KZaeNUABaBs74a/nQHDrs4O8vs0hzj3nZAelZVvFiokFcdDEtaJPpeqdOJKo9TSB1hEUwmBflenzwvmJvv0Ls6YDJM6d8JVK7Wh82Cirl+I6UYZwzHJfncLHOLHVxfkl3dosSCdYxLw7goOLLYZnm2SfHGNs2tjFlqzDcajG/exSWO9L0eP1U/zuFf+jiXn16nNxmTNxTfTLfZGRuUKLn8Bxlnf26Jc2aOi5tXKiEjS38y4fDcMq2ZLuXuLhKHsAHPsrc/ZntvwsJCnXoz6GdqqUDCZJTTmgk3lqaYIMVc+JymvHNV5OCdC+Ty3lUOMj1X7r0Gea8c8IPsAzmL9/529f8tIcRvEKQm7k7DKyHEKrBVvXyq/DW1+1XBfqAJoIYIpVRCGXZKYyqmYZO6x+winTjQMpyOzuPD8Tol5pNVUh1VJ9VUAG1KdTPNaw6yIe+4h5cLoZ6SgSzPMy04TB1A4QnO6sT01WEIz/tqcqAqMvgp9oaAzMNVVTV/L+SUPlTlDh8/xsbFS9V7KZSEsiiwRRiR0VqHvoEIjsaU0YSwOGNDODvaGTA310I1WuzdLcgKjUg6CCRWqOpEC2yM2SCnuVBn0SqksFy8s8N4ZBhdH+GMR9SayEaT2YUZPn1qhYWNPqs/9TR3/ul53KBAvFHQ8AKjHO2lOZZ/5nG2jOd7y21u3PQMNjPKfs7MM10+89NzfPflfcb7DT42W+NSmQfJDg9b+/scWVxj6dBhru/sEElJrEOD1TtHZhx5bjBFAbVa+JO9pzvfARFC06YogmxidX2kmF7NgJ4NA5O+UmeoQueqSR2uYnUz/CH2QTQlm4D03g+rxz8H/D+4p/D1X/CvKn/9x0KIXyMk9v0/Kl8BKPGhj4KvqIp9JfNcOYS1FWNL9cdXH4aochmcOyAVV2I6FFlNmorQ0UdWHfaKLXIaDgmoyMbvNS2nVbQQ84YPu7T2oNpyfwHBhP/h8diKOsmaapCzEmDCBfwN01H0g43uD/6WuaPrqFoNm6aBsLpi6Q9Vu4DRkVIGlkYXHFFHCiU11oCIBGVpyLKCRk2TlAVzM5rhcExrpUOSRFjniZxHlh6SBmluMFmK7U843Jjh7LNnubBbcKXshypYs0nUbPDZ5w7x+TOzXPxvv8n22++zeGyRrD5mvDtij31UXTGam2FnPOBKaojqMfKOYzKsoyLF/isZNxdu8NPLESePKnaGgn46AAJf153ePnjLkRNHufT6m2gfiEu0qoooAmzpKIt7UuT39748Au0ypJ82IgmYIDEtzgDCHeTDrlLKDDfbEC6LH+Eo8MFOlmXgN6oBNw38A+/9bwkhvg/8IyHErwLXgT9Xvf6fEcrGlwil47/8R72BB3IXKj1aVvWJaX4R+I8CU6SfNpYc3roDggFjbJXAiQMS8YOxEwGqSvQD3asPmw1/IO08LcaHD9gdcOP6ylEcYXMbqC5OGLSEe03I6ccsRAgThaj044WvVH6pavoVS6KcNlepjjtJc2aGzsw8vfIO4aZRMeozvQv6+07DqpJjAwwbL3Be41UDWc8hrmO0YtjPMaVlmKWMZYlzjkYUMdOJaDYSJnccpYsxdcewP0D1Btzd1tSOrCIaDXwUEbuCdy7f5FS0xfBojwEb6OQOc598iqMnH2e+1sGPDFe5xUt5gcNw1kmuXd/kma5gRUjKPryTtjj76RX6rskaJXhR9bQk24MR1hmW15aQcYwwOdoFtk8hQzitIlV1CKaNxerC+dBz0cKivMVUdeBp03g6uX0wvXHfzpvKsYdRqh9T2rtS+Hr6Bzy/C3zhBzzvgf/TH/V77zcJNJWsjkOBVqLi6KrCniqMEfJeGCN0xRhZ9ShEFXopUeUWfvq7p1xc00TZB2xKVUYIyEigCocEgKuml4WsknNfDWuGfGg6ZiMBWTlNwG9Pm2GBailAku+VrqchxTQ8mCb30/mDehKzcuwYvZ27WFMGmQl/X6h1EDIGmLVH4l0ADSAEj7/4STpzS/z2P/l1+r0JhRWMBzZ8wn1D3EzIlGRkLBM0c97TWu5Sz0rS3T7bTct7596l1xsjRj7AeFcanFlVHFsYUgwSHvlEk/k/Ocf1xudxcol26rj4tXdpNPbZXhpze7zCxvkRL/cUv/jIEh/fHNBIYrSq8bGNITtv3eHKsA3NOExnOIe1jmGWUhhDq9mmu7RE//YNQlARJDusDShK58M8njIu4OZlCNGtDRXSuvTkLtBUBZTkNDqQ03siodrp4WAXgHP2j2TJfyA6+EpAS1WAp4rmKOQMslLFmg7HeYTmgEY1bD4OWCvhvp5MVTe30zuJD+HPlD7HiWk/JQhwHpCxVbBkcxBW3Wtk4aGqjSEJJ5ep5PssVCM04YjSSlDYkNarijLEORt6NlQVGcnUZUFIajri2NlTXHztVUqXQ9UPkC5cZqkUqgoiQt4iAvJPSE5/7AU+8W99gevv3yRyEotgNLFM4QA2g/xOiegqombEfL2GU4pskKLMPs2GRBSaztEV4iOSYjNi/vAMh56f5U8s9JkT7zEcZ4yHE169M+QPBt/keGeW7Hob/c6A43/tM2SjPZ46tMTGYMJTjyR8/M2LZJf22E4KRifr2NGAp07M88m1mDubJVeThLIs8V4wKR3jLKXeqbNy+BC7d24DDq0Vk7JgkpYVIC9EElKWKBTGOKI4rpqLjtPtJu+OSsZVH2Wq2RNSWRdwUHJKATwtK3OvSvoj7IFwFoBYViMlAFSTw0xDKqpSq0BUZdR7khBQ3aarUZnpB6Aw1UYPkOIqma4UDCQVi8oB5Dhg+6dUoAdNR+6xtE8bV9O8xbmKtLzKl6byeYjAGSDE9DQJ+VYgOA8VPFNpTfqqiBGGNmF1fY2ltUPcung+qIcJgcdUuRaV+pgK+vQSZJTwzGc+ywtf/BxKSYbjAY2VJbIrl5ntKqRu0NsvyTOLc4LDc7PMdhJoRAyNw0YeL2YZ5VB4w+DWHUQjIR9rzKUee9uXuNZRtJuep1csx5aOc+yI57F4g073NObUCW6Ul7n1uxeZDPc48kSfpfc2kZubfN0WrH96Dc5nNG5a1gYNumsn2d7ZQM01aQ1bjEYjvHeUBvbGIxa686yur/L6y55IaZS0lNYxmhhs6TCFqWb9guNIpXE+EB9a56gJx9lOl3eHffJKT2eqquBFiFrCdgnXWCkZRmSm1/ZHxGEPhLM4D0XV5zg4Ihx46Q6GI5UQyPuw1CZEO0EgqPpnxoe0zeIDZaoImMbpCP20aOCqE0WKcMrcS9bdgbyFQhyQT4sqh5l25vEWraJAvSPCRZjOfE2bq/d6PuF0OdCirG4H09wrwApcuANWTbNnfupTbFy7RlkUB4UID3gbQg8Io/uq3uJzX/qTPPLM40EwyXnajTpYiObn2dveZ2UphqhkayunvtBAk9EfF9y8PcQh8bFmeWkRsNx+7y5SK8QRTzLXxu7B4P2SRz7V5hMvrrGpWtS7jpYckahD3EwPsVtMOPEz67QiSXa9TmuuxS8fLdi/scf8oyu0VIN+qWjf9ggtuXz3Lul2RrcdoZQMim9VmXOrP+D0qmdldQkVx/g8w0uB0jGjScFgmLMw36imicO/mZ7WXlXCU94wW4s45Ru8tdcLtFpFhpaKTr1xXxofbj6uIiSZVlrvFV7+VXsgnAVCNcM4W4U3QHVqBFiwD/2Jg6OkIsSbHqX3kUIDFeRYVuHKPTK9af9k6o9TAglXyVII7uv4VwUDKkdQhI6997LisnIVDr9iCCH0XKQLeRZVn2RaZAiOck8NAH9vti3SuppGDifY2ol1Pv+lP83vf+Wr5INe4B7DVLotIQuLmi1+6d//SywfWWM6vOm958btDTZu3KWzXLJytEG93SJpW7oznsJrJrlDKkEt9qS5RYqYIptQpFBbXCJZPsSZM/MkJ5rcvuno7wncYszduyVHF1NeOVfw+ju7zKuLHF+9ydn14+wWgiypIWKY0S38/h7NnTa19xQMcw69cISbept3XzzJJ47P0D63w2V/jXPDHTrdFUoT8rveZIzzhkYjoT07y2BrkyjSRHnBJHX0BhlpWhIXJUktCbujukFJJFJprBmTqGWW6w0WxR1+47Xvs3V7g/Zsh0++8NMstGZDuCwCIckoG2OcCRBxLxilkx+6Rx8IZwknQ0C7TdGH0zF1fGCnLKcSA1MIqZiOu0/HVkLvQcjpnX56VAe03HSkfnrU+ip0EyKoF4vpHJH3qKpYYH2VAKr70ZDTUe7gcbLCykxDMH9QmJhKVEwHKe9/zyrEFFSb3B00VoUPZfHTzz7B2rF1Xv72N7n1znsMd3s4ZykKgxGS5z73GMwGBecwViN4+aULvPbaVWqLHQR7TDJHOi6oz9WpNzRuKMnGmvZCzCNHLLev5vSGnoFNWV9dw5dD+jfP8db5LJCTK4mQmu1jMXdERr27yhPrXX7pxdO8vqU5n2l+/siA+dZhJmnCd966zY1//gr+Zp+oW2Bsn+R0i5X1lKVfeYGv3YHfuzLi3/l4l2hmla/9/b9PoxlQm9Y57u4Psd6gRMTa2iF6W3cR3qHjiCwr2NrNWFnK0HVFLdEoFeDjQoSmsFICVw6CMJQQPLF8lPear3Bl8wrlZI6rt6+RLxuWm7Nc3b7DxYvn6O/exZscpSUyblKk2Q/dpw+Es3gC9VEIY8ImtM4HztuQdITkXNyrLk2rSWGHhvGTsIdDdjINfw6qH5XEnqs+yPspYQ+mlP29wUrvOWh6HuBgqvkhVzVEVUXANy0zh19YJfPc+1ZKGSo4B/FU5UDcm3a23lJaw95wB49job3EjfFdduUmS588THdvmc33ryOzjKUjK4yaKW9c+D4/9czPEKkGzlpub21Tjndo13pAHR01GPY8XtbYHOzTjDpMdnoIURLXYkTTsNipk5oxgzsbdO6MOX72MO1GHbVfMJqd53xR49M/1SZbm+W5pTnm5Xvc5AiNPGVreJs3L90g1/tcdfO0s5IjR5Z59caI0coC+fIc/+dffpIThxKubg3JJpbasRaHHl1m47bl6NoSRVGikxiPpz/JKYyhHsccPnqIN994PZDCm7APBsOC7b2cek3Tatap1T3eW3x1TREaZ0dIUV0/FfFvf/pPsTfo83svvcNLv/XPWFhf5sipxzn32mX2r19BSU+tFeOEpTXbQYoHPAzzgKlOinCHCIrDYQAxVMt81Tii6plMo/+pUq0g9F5E1VgUlRiSrxzAVyeXrhRstZqGQ/7AIWTVzJz2aKZd4IOpgMrprAwOYKxlUkyI1L2P0XlLImN0VTgQVdikxL3pAsQUvRcQmTd7W1y4fJ5s3Gfcu0V7Zob5o09w68o1RhtbmOU1XL2Jm1PEImHiRxS3bzD79LPEMsCV797d48bdTeor84hCov0Q7zZYWFuhl2uiZIZap0vnRMzOuauYkaTW8oiZDnONDihH85Emvas3GBhBZCX19i5zUY23fruJPlHnVlmwMxywl13iSLPD4nHNt8oVku4cLzbr+OFd+rMtXj11GpfUmJltsLG/j8pz2mvHOTOfcaauSXcdZ+YTLh87zYW7YZDdexgXJYN0TCNpsra2gkpq2GyC8gItFeM0Z2t/QqcVMTub0WjV0XFQGFBaVcQdIqg3i6BknMQN/tIX/wz9/ZRX3rzAYOsu7+0P2b+ZYSY5LlJE9YDpT4fjeyH9D7AHxFmq5l+VuN2bMgvl48CIrrDehM0+bR7d10+h6sNM0ZEBHXdfR14EIgzBNLmu7uzqD4/1h9KyPZgrss4yLIZs9W6jpWZl7jDGWdJ8yDAdcf7SBaK4HtZlDUIJHjn+OHPNLnVdC2wi9zubVpSmBCQ7o33ubN/h7Ve/R+/mDcosRyeefrLHcOTJ+gNM6SgmI9Aaa2oYVxLHhlrsaNaTSkXA8d23LjHaGVOUObPtBF/sMs4lNSXZvzpgebGDSQdQOEStw7iQjHeAnRQdQzvRdLSnUUZIIVHSIYcZh6WjMXGc/cwJ5tZus9V8Atk5zRJLDMc9Ti/VeXnzMnPpFfJOm69eHqFlgbl1g6NRl+NzZzhy7DROa36lYemZAj/u89bQMzj6OLr3B4EE3kHpPP1JxlLb0W63mJ2dY3tjTKQkpXQUCLb3JnQbCYvzNZqdAhXpKo+jinMNzhUoHR1MWLTqXX71S3+Wfvp32djdwtsxUB6E19YE/Zt8XEyj6x9oD4SzAExRbErJg9AFG5pKqhppnzJDTje291SMhZ7QsQgJPd5C1bOZCiCFClLQGZx6mIAD/Mk92C+k+RAhFYV1XL3zPtl4l8HONrYsuDa3TDnZxeUjvEi4e2MTkxegIuqNBqUp2Ll1naTR4uTZpzm5fJxmlNDPx0RKEbuYCxvXGYyH3Lx6hdH2Dr1bN/BlIOaLY4nJh2ydP0dU72JSg3c9rNsjbswzGRqEyMmyjK07l3k7qjMpNVd2tmgutWnkNcp8gowSZhuAG7L4yBHczgRd1yjg2FNrTHYHUBiSRky9qRG7E9LtHkpYGp02NSuJjCeWini2hqwV7A4a+N4mw/ENfu9yQaE7fPqxdUQjYrfhyYv3+cuPGc5FJyjiT9KM5pDLMwy9Z7/wWK1oyIhI1Bj3Yt69W/DUzCybgz5CgnSO7f6As2sKhOT4iWPcvn2TBNCVvs54bNgZZuzuZzQ7GSoSRDoCZIV6tdhygo7uKYR5D4vdFf6jX/kL/Ff/6O+wtbdDa14x3vYU4yD0igRvHM484CeLIGCn3RSf4izec9A4cjbMAekDfHs1GFkpgiFDWTk8X4VoKrzQV5BhXzG0DKpNW9cxWZkfhFo7w220jImV5sqVV3BIBhNP/9Z14thSZBnGZGxcvUZSj0mSUEDIxxmutCALbJ6ik4h8sM+4t02RTRgc22amPc+Vq+/RaDY4vH6a9y+8x86tu3gHo609RhsDlBbUZjS5AB0J8lFG2kuDTkuZIBWk+4HhxWYGnOD2lQtsb+yQ1p9kkpbM1xJKa4lki+FklVJAM57QqSny9hKFhbqzRJMJ8zNBxgEscV1TVGyOuYjQiaY3zunt9qHToKYnnP/ODRbWjhI36+z7CWvPrXF8do0/eLlHu9vhrpjdagAAN6xJREFU504foTV3h5UVxal2yjv5LV67sc//+2t1Pv/JNb6w2MLLGO5MuF6zLM3CZ0vNijnM3VE/kN9JuLm7F9hupOLkySP8/u+F66i1RilDnhv6o5z9YcnMIKXRTFBUkYAKYbct0yrEDif6dEDy6NJx/oNf/HP8rV/7/zNOx6ga+HHIj2uJojxoD/xgeyCcBUJfIozhV1xPIS67NwxZVcmmhSgpQmOvtIZEawyBGzkzObo6UbSU5Lbk4sZV+vs9TNpnNOih4oR6vYkxJWVRoKUgG+whkhZFWlL0N4ljxbBXUqYlUpdY41CxAuvJRinNjiaqCVxpyUYGKFBaYE0UsDHKsX/rCns3r1PmAu9yZpda9DauMRoY+jd7lKVHSk9UV9jcYtLQdMvHBS7zxJ0IZx2TnRSVKLyTJC3QicMUCZO+JaovoF1Et90lG40ZjQs63SY11Qn6iWWHtAw5nSs9w40hM3MxQtbBG1B1JpsDhpnj0edOsDy3QOwMBs97PcfVIYwiQWsOTh/pMru0xhtpDxuD793mWCel0Wlxt79HPzmCvz4gaSWcml/GryzS6+8xa3IiMYtyjttv3uClY3UOL9dZm4G6nw8EerYED71RetD0WFhYoNVqY0YDvIMkCsOs6Shnfz9lPJuQjjOUrIUGow5DmdYMqRpjB5CPkJp6Hlt/ir/087/If/9r/wvJjMOk4MqqB3cApPrB9kA4S5hymioDmypmDhNT04ahACZlzub+DkkcM1vvcPHW+wz6eyT1Jq1Wh6bWbO3eDb9DJ8x25+kN9rl56TJmPMKmm0SRRGvPwIVju8hzJv0cISMaiysIGSN1gzybUOQlxjjMOMc7Ta0FSU0wGUyIa3WElOg4RseeYmIxmUUoTUmBjiXWeNJxgcsKonrEZJBS5CWDHcNwo4fH0pyrkXQ0tnToSKMijxCW3IXWqNY1jClwBbgyIxcFQiWYbISqt8nMHPlozO72LlppVLeB0wnWSZw36Jpmtzdgtt1Eake8UGPr/U0aCXQXZkjaGjlbpzPTYDudsHHxfbw1gSZVSpLUsb2dkiXQvymYmAYnTtTZ6beIj0fEUZv3b1qOlEN+en0dcb1JerRNNtOi8Cl/5VPHEThe397l2M0x3BxyaD7hm709bL3Bf/zMXDgVZGCgHOaGrMho1JokScThw4e4dK6Pv08RoTSOwbhkOCoZ9TOiSKGVwhpDFGlMMcCYAq3jgyLPQUgGPHXyWZYPvcLdrfMIKcn3S2otTdSiIrz4wfZAOAsQBEZd0Imc9i7wgswUTLIJw2zMzdtXuXvlIt3VQ9RqTYa9HfauXseT05lfQLiSdLQfxDtthFN1cJ50sE8UQVyzOONwUgCaLM0CeCtOUHGbMiupNyVCedA1ZCRpNgRlHjQsTWnwCNozTZKGRkYCHSfoWLKf5XgFSoPSgTNMJxLvzLTBQpFaRns5490UVxqSrsYZQ5kZdCQDvRGgYkHSrfo2xuIpcVlISFWkwBmcL5mdfYL9XDC82iPfGiPLAtXV+KMzNOc7wWGkxmYZhY5IajFeCWpH5tm/02N4e0ijXaBrCuciXjx9mLa3RJFCuoqIO4r57tUxu05jVpqcfGyeL50+zNYoZ6+f8frbA1CemVrOW+9fxx5apHlsFltLyJzln711icv9gq2sxl9/fBU3Os/jMx1e7isaUYtvb/XZ3DDML4ATktxYJnlBPWkggMcfP8vFc+eZYpq0DOjJ0aRknFmy3JKnJfVY43W4wY4nfd545yVefOJ5El2jyvSZ8lUP0hHzh0/Q2+uzt3sXkxvSvj0Yuvyhe/SP2wk+iGVlwSTPSKIYay1ZOWZ7b5fNrdsM9reY7O8TJzGTvbuYskQUBRvDEbrWpEgLvJswYgMlDVlaENcExciTj7cCmz2h0+eMIC/BlhqlPUIJisyQp55uW9C7s0mmNLWmJqpFaO0IeAtNsxNomSYjW8lfaNJBTlSbThH4IJTqDB6NTkBqQ2dRUo6h29XMzCg2blnKocAVPgxKlgGTYkpL3i+JmjHOlCgtSLoh9BIyohg7dN0TtzwmM4ioTSmWqQlHfalN1ozwCgbX95nspuh6EhCgSYRXMXuXB0TzEUkjotltUEpHQ2r0eEJeZMhmwZVty/JsE5EavAxlSVFCd16zdaVPanswO+CdyRb1WkxEg+eWBHtFwrWNGp/8zDFemsS88dIWJzb36Z6e4fZdya2iSd3ljMRrnP2rHa5OGnyi3aVvFN++MabXOUa7vIQACu/YGvRZ7M7gvOfIkTVqjTrp0KKFJ1aSTEgmhWGYlkwyS6swlIUhTjQeQUMrvvXKeQor+OmnP06sk4MJDiEEu5MR1sPcodPk+xG+zEhHOdnAMCVk/EH2QDhLOh7y8lvfBZ1g0xH5YIPh1hZpf0iZlSit0VqC9MhIMRlaMIZiMKrmewpMnuMUQSVLKWTi8WODECG3uK/GTD7xeF+SNBRxLCnTCf3NlMlOQa2doKMEW2aUhUOiSUc55UQyt9xAR57xyKGUIh3mOOvIJxnOOZIkxqQlzhqUDpXtejeiNhfCK2stKnbU2xJbSLJ+hk5ihHDUOjGqI9Cxokg9Iha43KMiidCSWqeGrBlk5JDGY3yXrO84fKSBNRPKpE7hoejkjMcZmXGYvCDaTLFbPVRSp9zIYVFhjMWWirF0zDfqLMwIluc3uZNGvLa1QHFXsHyqy/qZLme6XUZCcvxTNVaaLZ5bmGXHepajhOt5ytev7VLbkSydLrjgc44sx5xcP8QLNiJdm+drvQHX39ngV55NeXb1OjfKBd6f1NAz8+z1Rkx8wuzyArXd62S2wCPYGgx5jPD5NZoNjhxd58p7F7BliZQQiUDlmheWLDeUJhB8W+tRThAnEfOx5yt/cI5Oq8XHTz8Z0JM+4Fc2+yNKC41OgzOfeoxh6rizlWERiLff/aH79IFwFm8KNt/9A4yx+NIQ1RTWVPjv3TFxPSJuVmPYZYEQOVKHMq/zHh05pFLBUUpHZl1I9jxkowJdCqKaxBlBoyODCM4EvLOYJBBSCgG+DJDmfFKESWGlyPIUZyxFphGiRZR4VFaSTXpkowkm0+RpRq0dQ+xpNGKsKbHWEddinBX0eyVSS+7mlnpTU2sqyjzGWofJDcJ4aosJK8dq1OoqlEd3LXkOZVaEosKkwA9LVCRRsUK0F0gHOW6omIyLwOmrJQunFrDX9ohVgt1MGWwGNKLPUqRwZJf2EUmCnumyenKJs3NDci352GNf5Iiuk79VsKGG1BYSHj+8wFxTg81ZjSP6zrJvR9wsSmZ1nVc3d/A9w5vXHJ/e3eTp3LL09DHGL22w7cE/v8a1VpfaTJMnjmYkseaRpMO8GPDtXsYTDc0rXvFLiyWvbBYIHShwN/b2w6RFhYQ8deoRLr93IZC5E5CxxsBoXFAUFdzY2jDlIQPJx9GFJt+7tsPvvPouZw4foducAyTOFmwNh6G/4j2F84yNooyaGEsFKPvB9kA4i7OO0e4AnSiEcJS5xGaefBwoUaN6hEktJjXhNbGh1oqQWlD0LFJ7EDYcHtKS7Vts5vHWUJtJUFrhbBgxcYCuebCEo7uukNrjnUTGFqk0MnJk+wU6iYjrEuoSk3myNAvtHDy1lsTmEcU4xbsCVRckDQEktJoa5wviGHCC4d0yqIg5STbIEUiKocFkBmccejai3dUIpZBSMDOnaXcFdzc9G9cLioHFZjnWlCTtGkl7mfm1M5hhSmkLknpEcatPsTPGtSMWVtuM91LSrQnCCXAl1okgr+eBNMNkKZM5zw1RgFT81ivXELEm3ZWgGywuzNCo1zk5M8OZOGbPlTwWNxBCsD0a0hQxL84I3rEjHtsbMXt1QjyK2fnmJexMwuqXnuT8U6usbo9ZMZJrrTly2+WwmOXGaJ8VPeHCZJnPzEz4/PEmdy93uJgN8d7TS1OsK5AqwXs48cjx0MMaDXFOhlPCWvLckeYlZZFQpBZTtwHWkMScOblC+VbJxY2cf/ytb/MXv/AFWrU2mS3oVZHAdMS8HVuyCMZxjPgRcsUPhLN4D7YMTB5xIxALFLmhyApcYZnsjLBjj4gUeIiiUCLMxwabGxAaHVOpakmksIz3JmHgrhMjFEz2S4QQ5JGg1tJQM6gkQHvLTKBiQX0uwqSGbGQoRgXgiZsJ+ICTyTKDlDDp5ZS1BBUr7ACihqpKk4ZaDXQMcRJkxpMEvJTs3YH9GwVSg4wB5zETU3VGFekEclvQqEl0DLUYmk1JLYFRf4zLLaqpQUisPoFJLV3tMTomz0p8HEPLIdeaSCmoRSWd5ZjhLqi5ZeYVjHd2GPkSvbBEMj/PaDJi//YOM4/WWVtdIDaeEyswyAvyjdt8++4NXpGeyHushG69RrfeYCQFwkUUF3Y5u7ZIfm2HnaZj74kWazRZHEb4iz30ap3NTcGgSOhv9Pn5p7r8w2s5Ymj5xUc0T0W7nFmqIVHs9sbQBCzsj1MmxZhulCCloNmscezEcS6//Q5CVNghAXlWkhWWvLrpeB+UErQQrM3UWJltcHNo+c5723Sb3+Hf/sznGBc5wzQPVFmlPcA2LXQE9VoTqdUP3acPhrMYR95LsbkNm1OGpmJckwinA4tJ4lCxRMZhwncyLMBC3NLU2gpvJWUWmlNJB0YbIel21mIygU0duqEoU0neL3DGUpvRlFlJ0orIBoZ8YCuibdB1j26WWG8pBjFYjTEWjGeyPSHpEgi5I4jbGiSUJpBBxFJRllCkHtcEqYOKmTcOVwrMOMOV7gAUlvUL+ncz5o/UySYGUoFtCmp1T6sr2anoTJ0DO4F4YR6Vpkyu3IW1OrIeIecamHocOICVRLViRCpgewLJPicfXeP5oy8w6Rt+/41bXN/bo73YZWl5ne1sxI2Lt0kizZGVeWbm6rSiiKxwrHQatBqamaaiozzeGO68vcHdlzZpxw3KTYOWAvNTx4kfP8bg/btEmWXpE2e4hGXlUEH9yi7zosWx2hK/dGTI5Rs5DHK6q6vU64pBv8dqu852MQIpSQvLYJLSbVRyhhKeePpxLr79LkpKIqUQ0mEQZIVjXFhGaUk9K0LFz3miSPK5J+f4J1dKStvm6+e3WZx9g/XVZfLSHkiK+Io+SwKNbIB+0Pss3rnAjSUFtjDIukXXBVLXqCc1vDfgHMXE4b0J+JIoEDVkw4K0B+U4YEGsydE1BVoQdxKiWoTQlnzssYUnaXlGuzkmdRSTAhkJklYUgCjaU5uR6EiQpRH1dhihcblEqybpoB8UhIucbOAD4EoY8hFELUnSkuTSEcU1nHFMBqAihVQJiBKViOCo3hLpGJVIbFnixyXpIGfQk9jMUBpLd6HF4loNU+b3EJPjktbJkywcP4T+zkUmhWf7ek4RpSyu1pg/tMD63Crv9nOGG/uMNkcQNymt5bvXNnj56h2WGhEnji8zmO2idJ3ReMhpFbM+08Y/vU7hBUXu+d65HbyKMbcmKC35uUeX+LjLibZLxLjDYNgDLDaaIE52SF6+zvOPPsL+55/iUpbz5bdvcePiNqeeXKLbWmR7MqEwtxhHmhceP4Lv7bKgDXme02rWUT78fbIVUxrHZm/AofkAiLPOs7a+SntultHOLpEUJEoxMYY8t0zSKtHPS0xpcBUvwvOHanxtEyYuxiQRv/G98xxfu4UpAmGIddUEiK+a4sLSjB7wMAwBuqHxpSMfZjQ6AhVrMCWj3RyVVP0F7aGEcmIQsUTHGtVSlOOqrNsfQVFiZxKiepBumPQL4mZEXE9wORQjiJsJUcOSDTNsGYR/dKywNRtIuFWAVhYpRLGmORujkPTvZhT9AlcWeGOwEiBwcOEjlIqQyiFnPDJ2xA2PjCxlahnujpAxuNKE0TUsQoEow1yaqil0IlEiwk8E3mrKXJG0ajSXakx2LN4IVJJg94e43TETB0NbsnZygagu2b69x+a1bcaFQtTn6Z4+TjkYk+/3cJ0ca1M2ipLexgaPz8/REIrd7Zz9a9foTcbE5y/SfmaV27dTendyEAqhA8b/TrTPpVnBzreuBxh2R+AmhjldZ/YOrP3JM8h2xHdfeYvvvHqNXq/B7MnTXN2IWE13eOT5Gd4YtuhtZ1DfJ71wC3luyOE//yTpcos8LWlImFRI053hKGwMEWYCIx1x6uwp3vjuLsqJip/dM0wLukVCmjvGw4JmoySpl0R1xUzkOdyUnB96okYNl6zw/rUb1Fo14kYCThzAw6daOQ98n0UogTcF5agIITw1hHMYM8HmHm9iSh/4f8u0BCtpLNdpzoRysmtDc77OaNsy2gwy2EIFggdvBOW4JN/PqTWbmJFFaImMPY2lMALiyoKoFthZJj1DazEiaQlcGRqJSjjimqfRrVMMClxuUA1N+3Abm1smOyV4hSsdJvWkaUGkFbWGIIoCorLR1aQDSzKXIJygHOcopYNokwq4Fq0FWWaRcYRHUWYOIWNmjs5QZAPsWNCYn6U5Lknn6lglaexWozBZQZJoXCxR2pCObjK4NIJWl/r6UeR8h7oeM1ED7HDA27s9ZncHPPvkURbUUVqdmDt1x7mi4ObVPj4vQx6mJGjJa6+Nab/YZPkzC0zOFSwemaO8M0Lfyhn4gvd9ydffuUBTzjHsPsmZQzGf3t+i/ls3aTx3hqURvLRzjqOnV9i4UefuxoTOuVvs/Jf77PzlT/HZ584yvvA+t01KVhpu7fUDVsWrAyDg2SfP8sZLr+CNJdYRKrdkWUmWO0bjknaiKEuLNSXaRkSR5ok5uJyFEf5C1fCzC5jRXhijipIDcJ4XATMVqQc8Z5FaABYvAxOJ9xqpDXEiiRsC5yUmFeS9kua8QDc8Ogk0nLVEYYTDKxumdlshbjdFoDn10lBMSlzuySigVDSSGmU+RukwaDbp5UR1TVSLQZTkY4OOJUUa5suy1BFHEbVmkygZUw4IXMSJRMYCPfaoWIbJWe0p0pLMFrQ6TVSkyfMcXVfo0mNLBRn4ehTUwgZlGMT0niJ15CNH3JB4VzIZ+eBwsSTuKuKVeY48foZ8NGTmzJimr7Erm0S9nIVjHbwRDIZjrIB2vUWrlbB7Z5/+q99DNRt89lc+xjPHjiGlZBfJW6/vcvfmBomEp3/6aT5+9hCNbMSznyz5nTd7WGc51IKfO3qLR1e36NQy6vU68pc/h8Oze91z6xvvc217G3tojhOdLu9/6zYff/0yzy/UOb20xkgkbH/jVRqrT/Nnv3gEUVygf2yGXjJP79vXSGSd97ZzDt3ps7V9FznfwXnPuLA4bxFeMRUdWlxaZOXIIe5cuor0Di0dRQ6DcU4tUbRTy3hc0mgaoppDKs/ZecU/33KUNiTzoj0TULejHnFjinuaduHcg+8s4GktCdKewIwF1hTkKeFkcAKdaGQMuhNiWJU4hLAUI4d0gTPL2JS4Ycj2Da5UeOOxGOJOhM8dMpHEdY2VFamEgmzfYUuBalRyfJMSFU8ZKRWuNFCXRHVHkWeMd8a4wiKUCkpkmUUlEfWZCFMUeONxVpAPPdaAko64LhnvZ4x3LPnYYsYWl1viZo240SD1Y5SQGGPJxyXFpMBjUQl47TDKEdVh/kSDVnKUpJgwZsQuLbp6TO3RFcqXb7H7yg5nnj7MFz5zCh012ekPubLd4x2dMNov0Ery9luXuHWh5PSjh6E9w7CdsfbkCt1hxm//znfIX29xObZEooba8GitmZtTuMll3r48oisKIrHN+d33GbhZnj31SVSR4euK0c0dLn//Cj+/P+HYjqD74jpX37oGk5x6oqG2y356kkge5WbUpBGlpF3BYl3w5090ca/2SDfAtYJ41CjNSfOMZk0H4sKq7/Lok49z99p1bGHRQpA7Q5aVpLmhP8qYmcR0i4TEGFQUsVCDrhRsm4pnQUrkwgIeSz7sE9XDjJ8XnloS3VNu+wH2YDiL8xjj8SiKvEQ6gCh8ILUYKQVlanAGysJjC0lUB5c7JsYgIwPCoyOozyWYMpA/xElo4EkN5QSiWiCcy7MRzpa4IpSbvTdhotlLyrEjakLScCAc+bgk6SqSWsL+zRHZICNuRaAErnToRKCSygERlKPA6h/XJPnEMBqMUVphcotLHa5w4CXl2GDGozAoqlUoXyuJKyRGREykRscaMY7QUQN8hDEzzCIYZprbuzkcOgrUEE+u4G/t8c73b3Llzj6LawVHVo/wqSdPcTU5wuz8GRbrE37qZBMxGHBxP+P65m2SVofvbV1lvt2mNdNi7DV2rJk93GJ1SbOUN/jW7TEd/TiPn20z8JLHxQ1W1jdwCBrtBunPPE5+7jKHZha4K3Pq/R1uND21775LfXmOnVsjFn/lUc6v5nz3pS1+5eSAM8sztA7vsX1mhvnHZjj25A7m8U/y515f4X999/vkwpGXjn6a06o1Kz6FANI7fvIY3603cGZYccxJ8sIwmOTESjAcF6RpQa1ZEsUxjXrCWsOw7yVOVKQjQmJmFiHN8ZMsjAYpwWwzCVwIP8Q+qPLXDPB3gCcIJ9a/D1zgw1L+clCOw0Z1ob1BnhvQEqUlpgCXTydHAzWqKxRxPWI8nIRwSghcFFd4BIdueoQMTlTrWryQlNZgHZiiIG5LrPbYscFmBmcVcSPCOoMWgfDPlYEgQ6BQIqEYloiKUlXLKKypYZESsCAihTMebEyWj+m06nRaDWo6YXBnh1J5ZBxjM4AY1ahT79ZBSZJGgwKYXWmh6jUmucNHESKpkTpAKNaXmoy2xiRL6xxKUmzq4Nom/vQSHFE4axiPMlpS8HLpuHzuJs3emDVR8ORjx3n26HGWWoqVNKN3/grZrV3mVxaZHWneePc6ZjxBlAV7TUU8K4kmltFOyasLDa69GtFZbfNPLu3iappPPwEffyKnLj1RR5JdvYPub+Bw1KTGZpLlk4fYGjb4Hwez6NcGLEd7HD80z1cvGoRM2FtY4uzjipuTiPj8LQwZUQy5FRR4doZD1mbmAk9BIC+g2apx+NgxLr75JkHhILDwp5lhEJXsDwtmBhnNdoOk6YiAo23BuYlFCoXWYbLYawFLS9g7t3FpStSosdhtfih4lr8F/Jb3/s8IIWKgAfxnfGjKXyBkSTEMZM3eOqxxxDrBTHJknOBxgevWh7zAGygyh8sq3HsnoRwbUJZiWOIt6I5CJQUBva9wzqBqilqsKcceMwk0RDKRGEDVNJH0RAk0O5JyInHOECcxkVCoWgSRQEYVRRIC4RUYz8LyIiBJ6yU6TnCuTXumxfEjy7STNhu7G4iOhLiJ1wkiihFSQRwF3IWS1IWEWoTXAmUCz5aSgZxPA3Hikb2SyfUBrNWp13OEzTEXd7GHZ/H1Fjv7hs13S2bWrtOLanzq5CJ1ucTo2pD//Dd/h7luxJFjddpNQ73R4MxMl++9uo2LWiSrSyS1GCsU7bbisedmuHHJsNDRvHimy/J8i6vbEy7cGnG8e4XdrR0WmpJ6PUFuTJAywmQFrX3J7NF5lp87xObRBPXWDvFik//0Fxv8znsdVFTw+rkxh2Y7fN3U+HzWYXTlJu/O1pG+wPlAfLc1GCFVEGG3zlQ0vYJTT5zl/JtvVNStgqywiMwgBAxqmnFaC+ToZYnRmiNtjb1R4KTHe4UUgbi9dJAsLeE2NrDjnMVm40fu0Q/Cot8FPgv8ewDe+wIohBBfAj5Xvex/4sdQ/vLeg3TELRfCpYYk23OY3KJbGluWRInGSY8SEbYwqEggY4e2geGjPucY7zmiRgweTFqQy4AS00KitMcWDps5osYcVo2Im4H+VUYSMREVIYxG6QhnLbVWgi0d9XpEJCxHHlsJmPhSUBpwTqDiOg5PLlp4oSkbklLHqLjGXhTz8u0aXkfYxTNEKzroRkqJ1DLwFQtCLC0EiKA/g4SI0KkOqmQCs59h0gjVrCNrEruVU17dxYyGpAw5ebTDI587wY1Lu9y+nrO4NmZ+eYkkfZ/tuxdJiwbzK6tsT3KW05iT9Qnfu2D4ivOUueVnVzvYC1fJ0xyvIsYm59qbDfaiGPNoi388iXj6aI0bewVPrR1jXCwx2xlwoX2IxrjAp3B41EZ164ybBdunPOn2VZ564iT/wdHj/H9e2+bmjmClY5hZ7PCZI0t8+bWbPJHGPDJziNs7N7muBaYMHAlOWG5v71KUGZFODphCpRIcWV+j051jf3cHTQAH5lmBkCLgYTLDZJIxUzZxkWO1Dg1X0i8lWomKI8wH5pi4junOYXfuEv8IZpcP5CzAcWAb+LtCiKeBV4G/xoeo/KUTzWTbENU0UUOg4tDBl5EiamhMmuNSi4+g1klw1mCtJ6lphBaUQ89oqwhE0tZhjMEWHpV4xjueuKEQMjCsFKWDscGZsBFtFfbpeoxWTWxWUuome+MSZIMo0aR5k9RrciRGRVil8CoOJd44xnmB1Sqoq2qFjCPQEqkIuu1xjJYqjPFH4c42re0LGapogTA88IxJJRFCHXCiaaGw+yNsLUF4CWlKFEVkJ2cY7cb0r2yx/+0L7A4c64ueU08/w83eLC+sLaEbc3QmG8yYGi+9EbGzv8uh2YRaa5aGuIHf6HFqqUv39j6yhKjVZvXZw4ze3MIbjzq+xvaZw4glCXnGGTK23trmSn/EoUfnKfKcIwPo/tQpfvPdDfT+Dl/42CJPP/8IM/OzIARH8j2iW7f4/bPH+StPHOP94Q6/9s5FLlzL6KYJj9fv8vvzDZ45tci1i3VupwZhJb1JGpTbKn1DUUGfG606jzz1GK9+81soAbqCDhtjGI9zeuOCmUlMXpToWky3HrOWSPYnYFylz+KptHc89fkOTju+/vIFClP+WM6igeeA/8R7/5IQ4m8RQq4D+3GVv5qzda8TgS0trpDkI4+znjLLUeNAcWRyg4wlRaWt4gqDUSHHccaR9jzJbIQdGrLtMnx8WuFLTzrxJK0WcT2m3pRIWSfXVQ2/HuO1QkYxTjeJVltYBFIrnI4wKmKsdWAw0jpwAVQlZxVFeAI5n/PBMYQMiXqgqBFESVRxk6mDeFggKtk8Gy5c4HcK5UtRcaHdB4c11tJoeXJhyUeVrJtz9DfH9K5sE7VjVL3OblZHpYL65BouV7zzfo89PLoZ87Mry5w95nhva8Q3LqYcmhvx2MkV1m9doXX+JhObM24KpMu49U9fQdQkZA5/6wby1Rr6M0uMGjm7xJTM0hvCqUab8V5BnmX8+qU9JtRZOvYIz3zhUVoy4+7uLrfTgqtFm+c//ghLEdzu9zjRmWfu6Jgnr95h7bWL7KzHXGvCo5PLOJ8hhMLjGeeWrDTE+p7WjZABMfn0c0/x9vdeoizHgY/NWJyD/UHBfj9nMtegyA1JYYiU4ZE5xfnU4vICFUuiA8FVgTEO0e6yOyjY7Q9/LGe5Bdzy3r9Uff9PKmf50JS/nA13BqUUTgZmQesdogCTCmwRwquoqXG5C5ht6SknqoppI6JEEsdNiglEnRmEjlFxAkmEihNE3EC1GiitieKEPPconaBjhYgUQimUgUeOLnFjr0/uQngUOMV8EGiVGgglTMmUPxmoYmCtVdCBqbDgrmK09BUrjfcQxRHWObSaapEF8g0HmNJiTEmSRDgLUlXcu1aSpZpiPGCnKJkVMc1GE92pM/vMYURSMluzmNTSMBusz8ChEw2a3S7fOtehm3nOXT7HZLXD5M51JmNLcvoYzzx5mL2VAcNbGeutDqKmKfsl+uOzDM7dQc5KfOpoNJs8+ac+we6iwZqCGZb48jtDHIqnOynRypBPPrXGWC3x1hsD/ttvX+RPnKixMa4xWmkwKwzHDkvOrq7yxv4u9v2rfFHGtD9xgrd/9zKjrRFnjy/yifkbvPbGANmZR3jHKM3ZGw7o1BtV8zDgkryH2bkuh0+c4Op771FUp44Tgklp2Rtk7PdzxuOCRjPBmJL1JkQyqCI4GwRtlXO0NMwbi4slnYUaV3+cqWPv/aYQ4qYQ4oz3/gJBk+Vc9fXv8iEof01zBVzQ6ygmFowAGeMKiUShojpSxVDTeCJ0u4GXGq80sW4gogRXq6GERusYEUUIraqvcNx+/NFlTviU5SjitzYmXM88xgdeYqkVvrBsTSaQKGpSMZXwdtaihKqokkLTyuGRFV5bSonQIf+QkUIqEXDsCLybcmLeU1tWMnCTxbEOo/s+hI9RpKppZR8oeWxwQOsccaOOlprOeB9Xa5INHfu3eriu41jL0LR9mvMdZmLFOM9561KPQR5x8+Ymv/Cps8z1GhRrx3j9ZMyXzizR6w24/I3XscOMYcuw64a4PUdtJsFne4xbGeMERjanLPt8/x98k8aSJ27H5GiiekRWLPPuqMef++xdTpTv8PZem2dOnqG1ssI/vT5g99Im/4fPn+ap9SO8Ndjl4s1NdBLTOJ/ihrsM5urImZg7madXb6MbLyB3LyPiHKE0XsDuaMKxJXAufJ5TgSIpFU9+7FmuXXifSDtsYREu5CH7/Zyt3ZTFhYxup4YUnuOzdZ6ZUSTGcESmtKShpWAm8dAvKCcFoqH48o+YpPyg1bD/BPj7VSXsCkHNS/IhKX+BRLgZHBofC+LFGkQxcaODiBK80hAneK3RcQ2VRHzu2cN8/8I2mQGvJToKH66odCW9kgck2lJ6vIWrW33WOxOu9zUyaZAIEGUY/RdCBAy4lAdCsN6CrTQ9hKjCLecIorYhFItjXQkYhZK2dWCdIdIKYX2gJ9UaqQJQTbgQqhlrg6NwD8Mpqj7AAQw6D8OlQnpkJGgWBiXH7N60qIbBTCxRt4FghFMa7YY0YsnCQsTM3GleubLEezdusnO7x923bzC/WuPfWnGsuX1EM+bS/CEm9FjPC+z+AI+gUJ6FZ9ZplQtc7xe8cnGMQTFLzF99IaJoPspX84ynlhQv7Kbs7ghqjZSuFhxdmWF3NMP3L+7w9pUmjz9ygk1ZkNy4xYnFOVYfWaO31+Ob3/4+zuU8+8vPsPyJVb52vs/O6gjZHfPZ59b4nfd3YHYG62Fzf3gAB3YugPOECDeb9eOHmF1YYnPzdmDAN4GwcVwU7PRSdnYnLM4lSJnQjhV/8Tjk+yPytCBqxJjC4kqPiV1oEtfiMIP44ziL9/4N4Pkf8KMPR/kracLRF9BxjUgrZBQhtA4VIxGk0agwDGVe0mxqljoNji02ubCbEddjRBR4bAQCZ0J/RU6p7/EIJelbz28Pugg8pTGBMFxUqlFKB0ZuD95LnDUVA36YMRPeY6vpXyElzjiiqFKX8iHxdFikdxUzf/h3yVS3RQXSQOdCY1RKgRIKU72P95VYhrGUeREIy6VCCE8xzognlqxXkC2v05iR9Lc3aNRShjdy+o0VZn/qMdZqBYfzK1zeG/NWLnliVTL3s6d4vl1nd8/x+GefpMgGZD3L1TdvcPpwi2+XLW7HR5FuB7Tks59foaMMRR6xMhOzZxu8fGGfWAneug27Ozc4MSu58SY89UKLC3sT7vTaHLuds/bYKU5++mk6KyUXx+9xsj7kmZljHJvpsL2xyStvvc/Nr7xN82JJ93CX7L0BSivW+/t8cWGLtlZ8/Jl1Xnp/i54P17I/yQ7UA0LK5w/+q9ViHvvY02x99U6g75UCaUPI3B+mbPcz1icltVhRRAX1WoKNY6wrGewMEELRaMQ0ZupEsUZE8b8BVEhxTLK6jFIKIe8vpQqopO2mCa9KQmj1/c0xO0ai6zHIcCzDPeEhZDiyvQh38sDOrzA4hBc4BzqSoAItn7UmcBVUJ4aqZoQqDr/w/k5ijUUlkihSOO+wZQUZUIoo1mhd0bhKcSDzJkQgHy9N1ez0U9I/A9ajIh3WaAxSBB3FwCEeOABELrDDAislrckGIpL0xwXz6xHpWyk7b15F1nKuzyn+nUcXeOFYRtwc0dJ7xFEXZ06w9eJJ3t8Y89W3e/zMasITp1bYynfZ8jEX67OIJw/zqUM1Xvz0AsKWeCzOeh79pOCr5wb85uUhTx3b4HxjBSU8l/cH/M/fcjx6bpezrk7er/HuxlVeLlNqRxb4hU+f4JFmg7fevs7f/f53+Vi3y9OFYvG5pzl/43W2d/bZ+9aYpVMtfvrfW6A9Z5EokkTwxHqXb+8VCKnY3NujtDmxiMNeqVQUvA+kek888xivf/dlip1tpDOU3qAQ5IVhd2/Cfj9jYbYB1TUui5IiK2m0EpJmQtEvsLnBG4+I/w2gQhJCENcDEF6pIFAaaY3Dhc3OtMQayqzCS+4O8xB2ReFPKI3FOhfCn0gdSKqFaRKFLUqECHLWztqgBizuH9EO2Aapgu68qhgnZBUnO+vw2Eo9t5K7sIFRJCT64bl8kqIjjZBB495VZOVlUWBsRfVU8TJb6w70Z5CCJEmCgJEMMbqSIJzBlSVmmNOcTchSRbHnMBs5/YYGJxCdBCsUet/z937rOutdy1x8lYW2odWcpb48z2u3Dbe/c4nN/Ql7i22eflTzs8+M+QvRKr/1xjVuXM64fhv+51uCGUGgQfWQW8elLUd2O+Vv36jzzCdbfObUOrNHZ8lue27v3uB3C8PV+ToLT59k4msc3hO8dOEV+ldK7iZzzC8d5z/82cPc+nu/z+Y7d1h4foHBm9ssPdXg1C/PUJuRAbRG0Nd89rFVXv7GNYpYsTcq2R8NmWu3UCrGOwsV2WJRZCQ1zXOfeZHf/vJXQIL1IZ/0zrHdm3D5Zp9WIjl1aqkibZTs7efgPEtakZYlkTFgBaXJceUDTt8KgY84YKsdkY7wMkhwex9KgsIBNmxWY91BIu2MC+JCzlGrBVI1rEVFurrrC5xWobxoXeAUm7Lma0WZZSAEcRSqVL50aOHAC6RQ1YnmEQp0PcEUJWVh0AK0BCU9lI6yDAOW3jqQoXnm8hInBEIopIBIAj7oTobStg4IytLinCNLc2xZENVivPAU1qKEJGolbPd3mT+cMMo6jLb65JlispfhtWfh7CKLyzMcXmoxVmu4KOb5xQRp79KUbZLGYY4fj7i8lfPS+1u0010+dXoHW6acWBzy736xxbfLs8wME3ypWZ5JOLUYI7RCKUU/hZ1v3qaxGPPso21mrSfvD/j97U1WXnwEeXyV+q0Jt169w9kZy8n5w3QWzvDV3CLLHO6e5603X+K5P7vEE+0v0j8/4kbZ45Evtal1Kq7qqlHo8SwtNFnvRFzIPMZZCldgbUlpJ9TiLkFiBLwQWFfyzMee5P1zF7h44TxTulYtPUWW8d7FDcw4Z6YVsX5siVpT0J1pMthL6W+PqiHWUKRJh0UQwvoh9sA4S5hyA1TgOhYyLE0qhVCSsgx3Wuv9QV3dVAmdlvIgJ1DT0yIPeBE/VQz2Dm9N4JuQIZ9wUqKbDZyxGEBGcWCMseaA0T/U9itJaV9VwGyJFaCkpixybOlQKsYSknUhBa7MQUqUCE7rpKp6KoK8tMRVo7IsDYU1aK0QUUSc6OBM1amjtcbKnNahGYzSpLtbLC6XuNRj5lrIrqLTbeFKw929AYdWZlnsNDBxi5nukfB3ephVmo83LY+ud7jw5R43v2JpfHoJUVxkPL5FPd1jo7/D5sDzjdtNpPDMrLeYbydMUk/DC1b9DD21zPzhQwzjGtYLbuV94nM9Tr21weHzOR/73POcOHOG/+a1O9Q3LvMzh7b40396xFy3YGIzLF3iZcWjv9JGNyqhKanCdSVMe3thefr4HBfe2cUIwU4/Y21uGRxIoQk6n/dUoqUS/MIv/Tx/57+/TVn0kLbEBR12RsOCyzd2aCYCm1oOP7JMe75LkVaq19aStCOKUUnhHD+qh//AOIvAI1xAqgXBISriOo13BmksIpJhPL6i47TGECUxSgqsKUP9XKqKWDxcCC0VwhqsNeADZVKkVDUoGZxCao3wDmsMQkfhJPMWKUK4JL2hLMLHaKwBQkNSIjGlQenoQF5PCkE+mRBpUJEOOZFWWOuDBAa+4jpzFAdDmQIlCFBXY7B5jlKAE2Q2RwjJ3OE249wilrrsjCfMH4PRpI5PwOYFpRNYC73dPuP9AZGDmZomrivuZIb1TpvnDq2y6wzZrQHm8BqvzM7wq7MTOskiiIKsaPBPN9p8+SuOtZUmH//0AocbNQZVWTZxkiu7Q9ayMZmB5X7J/NeusDS3iLyVIMcxV795jm9tXuGlQnH048f5E19YpDN6iZv7jq+PjvAFuc16PSeq+AJspXqmZVA5EFLijefU+gzt87vsGrjbH6KEwokgdRWqhlVOWcl3d2dafPpzn+Wrv/GbSBV6ddJ6jBYM8pxXL2xy4/oun39xyNlnjjK71MUUhklvzHhS4jzsDjOy3PzQPfpAOIsQApzDlK4SEgJvDEIpTFEgHOHuXIJKNBCSdmkNvvCBSMKHUIa4ho6jqgk4nVQOPROT56A1IolCl1gKlKhyDg9Yj3UluCBN4YwDLSjKIvR6lMRrhROS0oF0BqEihNQV9juw/QspgzBomCyH0qCjKGC+rUXL0In2TkBpA02qNXghiVXQobF5YHePGzFRHMFkQHptgjUZfi5h565AuTTMypGjnMKUDjVbp9uqU2/GnJibZbZRY9jvc7cseX2rhx2PWUhiFudnODuMGVxZwy7MsvjkGnPzHZaLCeL4DodPzbBwZpZPtdr0TWBMyfB0ypwXag3y/oR3bo7oFzHqVsqeH7O/VKI/f5JLT61Sf1shlyW3+wY16eKHjp9z2yw2NagI6wBkkCf3hJtOFIjQlVS0m5pTiy36d4Zs7g3CKRDuoFjncN6hpD5Qg3Ze8MInnmWwu893v/kt8nyCBpqRpJ8ahqPAOvk7372BlTEnTy9gnSctLDu7Y4z13OqNyMwD7izee8o0CyFXlXcID9baSpK5kq9zLigzqTC671GV3HagWI2TuCodl/i8RDVqIMCWJcIKpI7RSUSRZ8S1OhqPw0HpUJGkdCWm8OhYYwpTdYwVUkdQTsVvJMqHwT3jXAjxhCFq1JHK453BQsU/FoUwTkmk1lAabFFCHE4aY2yQyqiGPb0tKU0e+jJR6NdIDTpWeFvQNntkZU7vSg2T+9Cxr2uSBPb3++T5hMiW2FGDqJOwO5pwammWw1Ly7c0Rr73jWCom/JUz6+Rfv0VT13FDg1MZ4yct3V99kc8d7fAbiynxIvSKIb+30+cZFTMeT5gMB1x67Qq3bo9I7mZsyj4bR2O6zTqrNxyrtDn7/LO8307J9q5zuF2wle9QsxkzcY1GAyqK/JAbmKn0YCiTMx2Pl2Hq+Mn1GV65PWBjf0BWZCRxHGQThUeIUHCRUocIRAb48c/96c9jV+f5+jf/gGRvD5VlNIwldY5Y1thPS377u5d4qjdiab7J3l7K9v6EUsClIeQy/qH79IFwlhAeSaxzSGORWuKKMsSzTDXlRcCSTHJyW+JadaSW4DWyVsPZwBklrMEMx+G0UCpoOWpZMchIhAxMk2WWIWzY7EIrnIoQQhLFleQzjiiOwlhNVuCswYwDMhIl0LEM2BXCbJcrCjwh31GRRk1H8LEID+VkgrdhXMMaH8RiTYkzNoy6+NA81UpgTIkUoOMIpCLLSiLjiOMQRipVYJVkc8eTuoLR7phYaPy256bpM5iUdPYiiPZJRmNmui3ceITdS2ksdbn20kXS8Sj8Gx1IL0YXbjH69Q0ymyP7fUZbkmtO8HZa8g2hWGrWWU1qZP/iOmSOspVw6FPrNM+u88kj63THkqiuuCMn9N58j3UxYXZ3k6IvSJbrgSutiqGCSK0INx8B3luQIZQNrJIKLzzHVlq0Y0V/kHLtzk1OrZ+s9kRVgJFhzzhnKnmJ0L9aXF5mb/0R5LonynPaO3eoXb6Cq4oz29mEb35vwPx8h3FhmbTalKtHyGa7WPWNH7pNxYFw6EdoQoghAUz2UdsCsPNRL4KH6/iX7Se5jqPe+8Uf9IMH42SBC977HzQh8BM1IcQrD9fxcB0/zH444PihPbSH9ofsobM8tIf2Ae1BcZb/8aNeQGUP1/GH7eE67rMHIsF/aA/t3wR7UE6Wh/bQHnj7yJ1FCPHzQogLQohLFaXSH+d7/f+EEFtCiHfue25OCPE1IcTF6v+z1fNCCPFfV+t6Swjx3Ie4jnUhxL8QQpwTQrwrhPhrH8VahBA1IcTLQog3q3X836vnjwshXqre7x9WoD+EEEn1/aXq58c+jHVUv1sJIV4XQnzlo1rDH2n3Sx//pL8IcKvLwAkgBt4EHvtjfL/PEsg33rnvuf8X8Deqx38D+H9Wj38B+OeElvMngJc+xHWsAs9Vj9vA+8BjP+m1VL+vVT2OgJeq3/+PgD9fPf8/AH+1evwfAf9D9fjPA//wQ/xM/i/APwC+Un3/E1/DH7nGn9Qb/ZAP6JPAb9/3/d8E/uYf83se+5ec5QKwWj1eJfR8AP428Bd+0Ov+GNb0m8AXP8q1EIgTXyPwJuwA+l++RsBvA5+sHuvqdeJDeO/DwO8CPwN8pXLin+gaPsjXRx2G/TCOsZ+k/evyn32oVoURzxLu6j/xtVThzxsEdp6vEU76fe/9dKLw/vc6WEf18z4w/yEs478C/q9wMCE//xGs4Y+0j9pZHijz4Xb1EysPCiFawK8D/6n3fvBRrMV7b733zxDu7i8AZ/+43/N+E0L8KWDLe//qT/J9//fYR+0s/9ocY38MdrfiPePH5T/71zEhRERwlL/vvf9fP8q1AHjv94F/QQh5ZoQQ01Go+9/rYB3Vz7vA7o/51p8GflEIcQ34NUIo9rd+wmv4QPZRO8v3gVNV5SMmJGxf/gmv4csE3jP4V/nP/o9VJeoTfAD+sw9qIsz+/3+B97z3/+VHtRYhxKIICgkIIeqEvOk9gtP8mR+yjun6/gzwjeoE/N9t3vu/6b0/7L0/Rrj+3/De/8Wf5Br+dRb7kX4RKj3vE2Ll/9sf83v9LwTO5ZIQB/8qId79XeAi8HVgrnqtAP67al1vA89/iOv4DCHEegt4o/r6hZ/0WoCngNerdbwD/OfV8yeAlwncb/8YSKrna9X3l6qfn/iQr8/nuFcN+0jW8KO+HnbwH9pD+4D2UYdhD+2h/RtjD53loT20D2gPneWhPbQPaA+d5aE9tA9oD53loT20D2gPneWhPbQPaA+d5aE9tA9oD53loT20D2j/G+y8leMctJemAAAAAElFTkSuQmCC\n",
+      "text/plain": [
+       "<Figure size 432x288 with 1 Axes>"
+      ]
+     },
+     "metadata": {
+      "needs_background": "light"
+     },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "%matplotlib inline\n",
+    "import cv2\n",
+    "from matplotlib import pyplot as plt\n",
+    "plt.imshow(cv2.cvtColor(img.opencv(), cv2.COLOR_BGR2RGB))"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "cec8ed0d-8e6a-4997-b67d-a5e49f87c0b5",
+   "metadata": {},
+   "source": [
+    "We are now ready to use our model!\n",
+    "The only thing that we have to do is to pass the image through the model.\n",
+    "Note that there are standard data types supported by OpenDR.\n",
+    "However, OpenDR also understands common data types (e.g,. OpenCV images) and automatically converts them into the most\n",
+    "appropriate format:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 7,
+   "id": "6cab7dae-8892-4a16-ad03-651fa3bb20ee",
+   "metadata": {},
+   "outputs": [
+    {
+     "name": "stdout",
+     "output_type": "stream",
+     "text": [
+      "forward time: 0.030s | decode time: 0.004s | "
+     ]
+    },
+    {
+     "name": "stderr",
+     "output_type": "stream",
+     "text": [
+      "/home/manos/new_opendr/opendr/venv/lib/python3.8/site-packages/torch/nn/functional.py:718: UserWarning: Named tensors and all their associated APIs are an experimental feature and subject to change. Please do not use them for anything important until they are released as stable. (Triggered internally at  /pytorch/c10/core/TensorImpl.h:1156.)\n",
+      "  return torch.max_pool2d(input, kernel_size, stride, padding, dilation, ceil_mode)\n",
+      "/home/manos/new_opendr/opendr/venv/lib/python3.8/site-packages/torch/nn/functional.py:3609: UserWarning: Default upsampling behavior when mode=bilinear is changed to align_corners=False since 0.4.0. Please specify align_corners=True if the old behavior is desired. See the documentation of nn.Upsample for details.\n",
+      "  warnings.warn(\n",
+      "/home/manos/new_opendr/opendr/venv/lib/python3.8/site-packages/numpy/core/fromnumeric.py:3474: RuntimeWarning: Mean of empty slice.\n",
+      "  return _methods._mean(a, axis=axis, dtype=dtype,\n",
+      "/home/manos/new_opendr/opendr/venv/lib/python3.8/site-packages/numpy/core/_methods.py:189: RuntimeWarning: invalid value encountered in double_scalars\n",
+      "  ret = ret.dtype.type(ret / rcount)\n"
+     ]
+    }
+   ],
+   "source": [
+    "boxes = nanodet.infer(input=img)"
+   ]
+  },
+  {
+   "cell_type": "markdown",
+   "id": "f3c85496-89fa-44f8-ad03-a234f466ea4e",
+   "metadata": {
+    "pycharm": {
+     "name": "#%% md\n"
+    }
+   },
+   "source": [
+    "We can plot the results using a utility function from the Object-Detection-2D module:"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": 8,
+   "id": "d7129fe6-a198-4196-b35f-93ba41e50031",
+   "metadata": {},
+   "outputs": [
+    {
+     "data": {
+      "text/plain": [
+       "<matplotlib.image.AxesImage at 0x7f41dc03c610>"
+      ]
+     },
+     "execution_count": 8,
+     "metadata": {},
+     "output_type": "execute_result"
+    },
+    {
+     "data": {
+      "image/png": "iVBORw0KGgoAAAANSUhEUgAAAMsAAAD8CAYAAADZhFAmAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjUuMiwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy8qNh9FAAAACXBIWXMAAAsTAAALEwEAmpwYAAEAAElEQVR4nOz9Z7BlWXLfi/1yrbX3Pudcb8p3dVV1V3s7PR7AzBDAACRBBEELgY+iUUiiQiIVchESpS/6oC+MYMSLkIIhQ+lRIt6jA0HgAQQGbgAMzHiL7mk3Ve2qy9trj9l7rZX6kGufWzOYbsyD4StG9J7pqGvOPWeblSsz//nPf4qq8t7x3vHe8Ucf7r/vE3jveO/4z+V4z1jeO947vsfjPWN573jv+B6P94zlveO943s83jOW9473ju/xeM9Y3jveO77H48/EWETkL4jIqyJyXkT+8Z/FZ7x3vHf8pz7kT7vOIiIe+BbwI8BF4MvA31LVl/5UP+i9473jP/HxZ+FZPgScV9XXVbUF/i3wE38Gn/Pe8d7xn/QIfwbveQJ4+67vLwIffrc/GCwv69KhQ/PvRcT+Ld8riiBI+Z2Tg69ByysVVftOy9eU36oq+a7v538CqIII5fWKiKBAzvZmWXX+9/aZIE6gfJYT7HzKOd/tqefnICB3fWb/t/PXl/dwcvdVM78HSn8eB++rqnxbTKDfft13X9t33le96/X9Cysiztt1LTYVToQ7exNyzDjvWBhWbCwu4sTd9UzsyJrZGe+Scj44t+84Hy0P59uu+zvu0/zBlB982485OF9B+M6ISL/jb+Q7PgNA83d8j8Jd91WA6XhMbNtvfwjl+LMwlu/pEJF/APwDgIXNTf76P/2nOIGktgCHPlB5T8qJLmcctiAd0HjHUl1TBzdfNME5uqzsdx0pQ5cSVRXIwKyLTFOmS/a0quBJOQPQxQwIOWeQg4eQE0xiYhoTs5jQZIu6CZ6m8qgqMcOg8oxqjwiM20jbJbxziIOcMl1WnPMEcagAKDkrMWVUwIvgnSN4RxDBe3toThzBOcA+Z7ftmEw6ugwpKzkpMSo5Z1SEnO2h90Y1Xyh3XdPcVst61GzXjmaOhm0WBopk4aFDy3z41DH+77/+FcZ3pnhRHnvsCP/ok9/HyugQOScgl0WWURJfPPdlbu7v0XV2v2Zdou0yMSa6mOhiJmk536ykrGi2e5GybU5y17lnzShKjoq4cn3Z/l6AlJPtBP2mlhOobaopazEE7LkiaE6ICFltQ8wpoZoRhZRy+Tz46i/+yjuu2T8LY7kEnLzr+/vKz77tUNV/DvxzgENnH1QFvPeQtXgPuwEpZ0QcIhBTRgS8QswZkt3Yxntq51DNjLwneyEFbzfWedRlpjHhyFQhUAVB1dNlIEPwgVkXaXMipkwdPFUQfHA0yTPtEjElFKX2gYW6wgfH/qyFrPh++1axxSliiz04ckw4J4hYzOtcoCPiVMhlcZMzzgnZOVJSghN88TZZYRY7YizGnCElRXO5kWJGad5wvna+zTu74Mkpk8tiS2UDD84joqQEbRJGWUEzdyYz1kYjfHA4L6Qusj1u2ZmMWR6WzSRnW3RZ8d7z3AMfIKVoppMTMUWmsSNlZda1tDExSTO6NtKmzDR2zNqOWexoY2LadsSUmM46upxIWYkp2cYSEyklu85s1yqqc0/Wu/0YEwBu7imUFJNtUtnO2dvZA2Lvp4qUfxUQ/86ZyZ+FsXwZeEhEzmBG8lPAf/Fuf+BEWKwCTQhm+QjemSG47EBs53chgNpipnghykVmwDtBXCi7VabDISIMvIcapjmD2utyAjQjzpFIiEDjA05S2dVlHvLVwc8NtQq9YSqhqYjZwpCYlQTzuCfnTHAOH9zcezkRJCfb/fsdXyneL5VQUWhjYiZC8JmYMuNpZNpGkgqxSzjn7cE6wWVBne2YFsHYIkZBRRHnEOQuQyrhXvFyKdmC6xIQM1IF9mYd3gmjJjAJgW48ZdImtidjTpSFpiUsdt7uce1rfD2yxVd8WMq2UWjWcl4g4kqIlnFyEAIrxeugxHKPcs7EHGljokv23yxF2i7SlZ9POjO2yWxmv+8i07ZjFiNdzLRtR8rFCGMkAalLxC6Sc7bPVPNwZLtf73T8qRuLqkYR+UfArwEe+Beq+uK7/Y0gdlPVXGxVFmpSpXKeyntijihCJcF2UxFqZ99XztkjLAG+QwneE7wyTXYDKgV1xW0jzLK5Ye8DsY22g+WE9446mLHYBQnee9TbjqbZdrfgHOo9KopXR9RI0gwozjmc2ILKyRaqYglEyhHnHd45HH3OIsSczYPFTNdZyNDnZaoQo4UuOdsiTKo4JwTnSArRdg6Aci/thvQexnu7dkXKywScImrft9Tk3OIUYlbamFipK27J2Iypjdzcm2AuzePEPHnwnpTsfGOa4sSRi4dFIJV7ldU2B3Fi+Zs62+SSuUhxDnElFJVcrsPZJlk2SxVFxEJT7z05pbJB6PxcYurDrVzOKRVvqnQ5YptRy3gWSZpsY4qRWUpMZ5EXf+EX3nGd/pnkLKr6KeBT3+vrBXDzjBu6bA/eid08EbvhglB7R5cSQRyN93j0YHcqO5TlOoqq0KbEbhcpObnlE6p4J5DLzUTNM3lviWEJ5YN3SCg5AmXhCkTV8roSCmTFCwxCRUyJytnumcUWblNXKNB2EUQsbCseLiZoY0IV2jbRzqItfLE8zPUbR8o4723HTWaQqhaSpZRJKRfjgNiVkI2MOkcoiZB3roR+ZvheHHglSSZqQLPixAxva9ZxZHmRN69u4xDiLHJ1a4eUOqqqIsZUFrPd9zbu8/ybX8cB3gfbtJynChXOCbULIJ6sUIWaygecOLwIoXwdQlU2Go8rz8chJAQRV9ZInueXYOGoiOBcIGPPXNXCXbCc0HmlEmFAbQZbN6wN7b3M6G29eedZHjTvuE7/e0vw7z5EYOC97RY5M40R5xyVdzhnJ2k3ETyCD1XJSxUXPJrz3GAsujAvlRUq5xh428WmMZJSJnh7r+yECkfKYL7JQoiMGtCQMxbClvgWsRyoSySxJNyJ0okybTNttNwDsGRWKcmkMusiXYmNc2S+86WkxGQ7/nRmHk6KR1CBTNmJxcI5xX6OKs7bteec8d6VEPYAAaTkFAnblTUrIh7vfEmv7HO9c6gLTFJNLXbvru/scnh1ERDEO7pZx/W9CbPU4X2297DAswAsNeM2sTcb2+bnPKp5jjaJCjFHS+ZVD3IPgxeJMeOdUHk7P7AcT5zDOU8dPMEbkOLFU1c13jlqHxjUNcEHquDL7yqqUJGTRQ7OGZbqxOF9mGNqFsKnfhX+ITTxO497w1gQQnD0+K53jjoEqrIbmk/ReXjhihcR50CxUCBnOrU8RgpOKyhOhFEIdFlJznZkX8IewWDhSiyxa0teQjajcSWksp3LG6iA7d6dZmZtMrQnm2FapOWYdpmYkuVUQFahbbMZRkykFEllgasqqYSKKRmKI2UhiwpCLFDpPIK0RVgMGOzcnTikR8GczmFv54SYbXGGEOx955B7nzMIOQT2ZIUl3UXEcXV3j0c21hARQgjEWeLW7oRZN2NULZnxFWTJeyFnz51uma9fnNEEpXJK7YVaHMGDSMZLwGnGi3kElZI/ZkPNlIx3roSbZePSHrxQUkrzHLW/HzEpOWWqyuNFqGvzSrV3ZLK59/LzyjnEeSoRhnVFypmmrmy9VYa+TtrJO67Te8NYSh0il8SzEj9HmFLK4B1Byk2kQIgIPkeiEzQq05yZZfMsTXaEAgAkiisWYaEKOOeYJUs8HYrDkSvIMVOp7eizmOk04cRDtM+cdpE26YF3UObnU/mApkRCmc0MuRIs5LEQqYRKBa5NqmgWugKDKkLukgUZauEWGVw2Y00ZckpIQfzAwjskz+9hLmCFqpSF3IeNglgCQ0qJUDnLG0qtIidb9E4gl3OuBK7s7PHB+47g1BZjN5uxN+3Yne6zsXSYnBMiSs5tOZ+aplnl69dvmUcs3rB8Ej4ERC2ctmVsniQ48GJhLKIElNph32um8ormyLDyoAlPpms7QxAVuthBEoYDWGg803HCeQNHVO19U874qSfnRC7f9/fHPLyzUoIqO+PpO67Te8JYgHmsnEs8P0nJ4EGF1HYs1RXeiX2fFRXoYqQrCeksJgspBJrg8CUsS1htxoswCB6XM11MJKDLGdTRJctJklpu0BbUTLFQSwVmUdmddkzbOC8iqgrOQUqdJdkx00VbmFqSz1xqCCmbN8vF+2RRwJHVMP4+ZAre8h7V4tVymj9YwM4rK5mM81LqJWr5SNmRgTnM5BCSZjOQEvYJiojlhOrs3z5UyepQOnb2ZlReOLaxyFuTDp0os0ni1t4u92/aZtN7QqsdJU6vr+AddFlw3kEWspTNIGvZDHpD8khy5GjnBnavnBScV7UYv32CLwhcKdHii9fxYuAQ04TvpIAvSiXmwRxK5cqWJBlPJghULuMNg0NzpnKAZvJ3FIXvPu4ZY8kqjGNkHBNZhUnbEby3RZlhPyrDUBI/A0CZxMysS0y7iBNPwGoVbUy4kn8kNWTMObECYUkCY1ZiSXZz2WkyB/BvCK48EOhSZtx2TGYd05ktbCclfPSOmWarC5TCG07w4tCSdGdVYvFG/WLud7Ky6eO9oTypQJmUHV0K2uPKwnFygMzlZOfMvBJdQivta+Eyz4EAYk6WrBfjdC7PC3VkC9mSOtQ52tiSJXDk8DJv39lDxoG2TVzb2bMCX2gQqXHazUPiI4sNo6ZiZxr7VNzCQ1EUNw8NLT+wDVJKQi6ai6Fo2Yz8POz0HGDMZtCeViNeHAlHp5aLkQTFobGEsti1eZiXJEQOakSW+hUEEVsL4/TOJnGPGIs9wJnCuNM5Vp5zS11ZssfU0VSOQWUG1EYl9os02S7SKiiOjNUipGT8VVLEQdvDtcV7xVL4MpTGvElULcl5xleBuvLEWWTWKbNW6bo8TwmdCCEw90Kq9nA0lqRaIeUI2K6pJUzMUophlAfnxIynFNcEmZ9jQXZth82At53QBd/X1uYh1QGdpdSeosGo6ikvlGK89rmueF1x9vddVpK3hQuO2+Mph5cW54BDjpGrO/vEFPGuLiBCMWCExabh0aOLvHFnUvKJTMIq6jHbNaRcDGGem1kYhLh5dV6Kqbn5fVBb9AU2NqTMHVTinbMibwnncjavA2qGxgFabktC5veoR8JUbV28S35/bxhLlzN3ph3TbDDqeNaVmyt0bb+7ZqadsE1LcGLuUsuDBirXx6AJQZi2EVWDf1ux5e3dQb4hqoi3sEvQubHk8mA0BAY1kJX9Wcf+pLUkvdxoESFT6hsFWtaC0OX5wsjz3avfUYHy5Czvct42A4NtbTH0iz5rxokFIF4cSS20cyGgapwt0VIiFHcQOjohluTelr07COsoiwpLeVRAe+qNZpx05hm958buPidWl/DBUzUVcZa4vTdj0o6pwsAQuOwBByUf+d9/4lm7LzmR1ELemC20ncXELGYmMbHXtqgIe7OOqMr+tLWiq8KkSySEaZuYdJFZl5ilxCwpbWEytNEZTSUbtJxzLrUc8E4tnRPoUS4pz8l5h2YL3yz96zeQUiR9l3V6TxhLzsqdSTfnCXUZZm0kJcV7RwgeRIvrFnCOphiJiJtXe+dkulKRVRVaTbYjiTDpLBcIBR7OM+NtxWRJao7mVapgBc0uKpPccmtnyngaEawIidruGHMmqVX8c7JCYZ9U52yJuiE4Ugr7ZZfLZXGXJyN3wcG21dvPfF9NFlvg4svfF9jZEmnFqYVxsYR2ThxOe++T+zVzsItiOZyqWshW0gRHohKDv504ru3s8MTRw1TB0QZHO+24s9cy6SasFmhYtc8H7FyHVSjvZ9V4rfPc81HyItE831hc8XY9oVRKLiiihTUhlpijpFIOUDWgpY1d2aSE/a5l1uXyTGAaM23CaDQKsxTZm0Wm0cCaWcrMusikNSOcdYkuZV5/Fx7+PWEsSZVPfvkXeeziN0EcyXnUBfJkbLvswjIudRBn/OsP/g+4cvg0w7pC1HaKlC0niFnnhL1+MWoJB4qpIQizmMjJEl9L6czxp9zH04prEyF4xpPEeBpLUGAP0ztBJRsAkRQVLcTMEv/fFQZRPIYi84Q8Z0O+fAEPYtnppNQ4kL5OpFTmA+YEQGNFzzFpfOHNGRHRfmzr0q7fvAlzxMd5j9PCqeoXRtmFbSH2xUvlzrRlaWh5yH4AplbZ355OOZqToV1y916sZYe2e2vejHkBzEmpvTjL5wwwc4Xm0odfPQpamNjOW25YjL0v0C7WrtyvHtDIxpvLkeDrOWp48Bqdb7h9oth7fEXpohWM/+Z/tfCO6/SeMBaAM0sjPvjQw3DqYRAPzQBuXYXRIhw9BTcvk1/9Br/c7nOpLPhB8CzUgWEVaJNyc39KFGX+/Eq831PPY++2s9B2xj9K5WZ754k9BSZ5Q6Wy0uWC4WfmYZVzDhc8zpnXSIVTZEZhib6UCrp5g4O6hveuhG22sERL3iCG5Ihz83+dZjPCHvEpu2qh/JW31bnX0H5nLuCG9B4M+50hVGW3LtCAU7uemBNIwMBZC+X2ZhEFVheH3Nka09Ixm0Vu7u3z0OFUwjt7bc65FCKVrpsCgnN18fzprkVr1XhxoNmIkvaNzqk9qnkOaBzkYjr/3rnvpOibe1QSTsL87w1lvAtG1x75sAUizuFSuddByubynwEaFgZDWD8El16HqobNE7B9E0KA88+DCFI3PHn1FdbShJQyg+BZGgQa54gIW9OW8bQlFgqEwLz3JBeKvhZ300YjLuaUy7pzpHzAyaqCZzgwGDd2PdUkzxesc71hmHfpd35FSV0qhDydo00HHC1PjPHgfcrunOlZxTrnQhmPSg7soiBpfZ7Wh6W9cQDzcKbvO+kNKWku4WgfyheaRz7oS1FV1v2YQZUsTEMZfXmXj12/xH23t2j3powuDVn56g1GNy/hfW1kSZFy74phqs6pNQdeReaIVP8aVzwYcBfh0vhjWjaJ+XX1fyNmBL2XP7jmsiGVmLIP0XtcsCdyHry2nJtCt36I8emHy8/eeY3+qbcV/3GOzdNn9JuPTLm5epSJOmLZbRyZUFWGzswD71wapeyCnZRwBgpLVedJ+hyV78OrsrPlgjr1P7ebf7D7GwXjbhdO+Sz5ttdnPajoz2O+/l3uuq19cv9tEQvMd7oeMOgdBlkZpCnTMDx4cX963x71HCA88p0vvOtP5yjZXc7orrBmDjmLUJGoXZq/38ZoCChX7uyR24R4WF4ccP/GGt7X82s+uKa+peKucLE/z75/5q5ruNt47r5BPTBy9w8MxTpYCv2G85039u4crTewg2vk29aGm03w27d4+//y36CDIX/1r/xtXnj+xe/qXu4Jz5KzEsXzzz75j7hSr7A77fAqLC7ULA8CjfcomVnhXnnnCAizriOEwGJdkRTaZCzSaTRIuO+FmRZWsRUIpVArLKzq+q+TxbQiDu+lJLlyVzztqII7iHfLUki9G3eGTHUx9fta6ZcwsqZzvjzTQt7LEGM02gcHbFx1gsvK0s3X2Tn0IBotlGDOgraE1luLZoGb+9/1OU/ZNIV5iFf+lJLLH9QeROZ5g4iwImOODsZ4b4TT504e5cTyAv/f3/4a0ztTglMeevQo/4sf+ggrw0OIQEpWNdesZE1UoTlossqGRErvLSlGWmhE89CzzysASqipJccCY230oVWP/NmGZyEXHFBkLD8prQQq3J23iLPzNSRPqa5d4Mg//V8SXEBDzbvhYfeEsSjmCSYxQeNYGlQMqsCw9jTBM/C+7BIRxZqtupjZnSVcp7Qpz+FkIzsqAV9CJcsTnBNia3SHUDlSVAjBKsyqZcdhXg1XpRAX7SH1CXBPDa9KobPFCJQpG9sYIBfDc87ymVA6OhFruNJckKFCX3G4AkoomjIL4wn3Xb9Is3E/bwK7EojzZq8+dOsLmiX+xhAzFDNAVxAvu8GW95S3mIflJZbvuysFmJWeGR9skV7f3efJo4fsnogQ28j+tGNvNmFlWAy1LFrLX3zhpNmn9XmM8esOQthcOlURmVP85/mCCOSEOIOl5zkPfWjVQyjlMu7yjJTXaGkJMCPO5XW27/iynuxfu3vQd1W+c6R1TxiLkRUhRes9GFTe6iYoXYoHrl4giGPSRcatta86p3PP3rt+h1AFY351MVM5RyLjakddil/RZWad7VJabpKqwbOxUNXBkkDFzk3UAWmObnXJmrNyKuly6MMtC7mCd4grFBctl9E/CwGSziHjAGxMZzx1fZeNN26x3U34n7zPsz2b8srWTV5OmfPNiOvNiD1RoneloFcMolxHj5i5PozMd+VNfejYd13NfSbzludOPV3M1Mn6h3amM5YGNcNBxbRuaSctk1niznif+9YEEY9zOl9odyf88O2h3t2F07vzjd6wDCks0HVhoFvx9mBz6L1IH4c57+bv04fZ86p87zFx83MypFBAjARb07/fwbm903FPGIsvuP6ormjrQBMcDmt2spAizaFZRZh2VhEOpb8jJvMMvsSjrnKMqoouF2ZwXxkujVIqgvdGZ/FtLBCqtRHngqR0XYfjrkKfF5JGNIkZKPZZMVvY4zAqfr9QqzpYjcWIFITKz5OZlKzd1QUhJOXE9h0+fGWbp7dhkYa90Sqf3rtO9/qEjUHg+9MGH3AdW3v7dLdvsXdknberhvPecUGFGwn2cqYjsV5XjCPGZsgHMHRvOL60K8wX8V3UD8QWf1Z7LQLT6YyMsjxs2KkmiBO6tuP6zj7pmLVH9+/n5xHAATLWHz2Uq5q/rQ0BDugnwDy/SIUzdmBs5X3sTUo7c4+Y3QV2zD9T5zmp8+4uozQjE7H2jz6/EueL17rHwzChiFRUnq5yVJV1vGW12Nx2RMtJVIVh7Rmq7SjTLpVKSd+LIFRiRa0uRpwTRoOalBLjmErFXih1Z0NyRCxfAYZ1bdAw2EIru78RB/twwVF5z6yL3+a1nZTOSe/w3tF1hUVQ8h9x1vFIVtZy5NSVGzx7ZZuHpjULfhG/EKDyhBgJE9jZHlPfHtC1ETfwrDPA1Y58oeNMM+UTZFItzIYVO3XFtZi4uTvm1TThijgmoaKraqbBkX1Fi4KGApeXnEcPDActhNJkdRAfO6JmtsczNhYHXCp5UtdGbu6OiWlGHSp6mr5zQtdFvA9MplulrdgbNb/3gFhoBpbvuT4UtZVrzzqO2R9fpaqWGAw2S14Ucd5g4b5J7YD3Zt+r2obYb1oi+ofWvoEy9nk9z9A8vsxD0Xc67gljAUBgUHu6QWXNWupRlXkDmCmEQFXafmPpgGxzJnY9HdsZ7bv0pijWZVkHqOoK3zlms8Rsluiy0kWjxnjncK503WUleAjDmmnboSVG9k5oCs1EpTSiBUFi8VglEfcOuMtQs2JFy5ipXeb+7V0evniDx2/POOYWGdSHIIALFblwuFyuWK1G7MeW9bZBOyVphwSLsfMswwhyFGRBGLVKw5Qj3hbiR7VinynTPMbvZbqRoYQaA9NBZtZURi1KgfEgMK0C06qidcKgqlhzFWF/j6+f/xb56ce5vrvLifUlvvnWDWMKzCLXtsfE3NHMcxYKo9mhmtjav8bt3Wtl43FztO+ASWwIRM5FqERz0SWwVmnNLUlvgbtWCq/293VVWc6GKyCBwztv3ZkIwVmuOs/TROZ5jhMzUnNinpwVpwnomdj/GXDDwBLUhdrTDGsG3s89haI0zlOPrK99ljLTlBh3wrjLpfYgJt5QksQuJXI2trBmJXihdo7aCdFbPaWNmSzW94IWgp+zJLQO5j1CwSBLqcNcvwt2XgUFTXUPAffV7FzqBWZgQWB5MuHUlRs8fG2Xs7OKtdEy1eIalATWaCDWrZkTOKcsULPVTTieRpbvRkMYsmSyy4TsaacRnx0pKHhBl8z7LcTAaGERp44YWlwV0KmhTlaoDLgAWln8kvdK05smRBOa4JXXz7Px0BqLzTIXdvY4srpq3jh4Uhu5vTdl3M1YaA7CHduaE20cW+JeCrnFaRi62MVCyTcLU1FSsp6bXO7xpMggWUHTaFA9QtbnKylZwbavEknZmGZtx6C2NmVrP3ZzrNzWk8wNQ0RYuHWNj7cTvvraV9DhIpPZ+B3X6D1kLMowBFrvi2iB3fxQ6A49Uuqlr5ZHai8sDmpjIXex5C3W3DSNCbJQ1Z5hFahEqJyj9hYGbU9soQxqq7wbNV7wXqlKGDUqMW5Wq8rPUqa0PSABame7UxczEgyaRqASYWU65eTNLU5eus2pfeX44hrD4XFYdPMrtv87k98pgbcko+uMpGYrTrB+CxBf4veWOUXGNZ7sbcE5EeIsE4K3vKwVclRyAr0drfgYlDzOyK5BvT3ooEnJswQ1OENWuJDhQ4cf4FCzxOFLW9xcWCm8PCGNM/uTjp3xPpuLxq/LRSrKOU+gRvwq//ULbxLVNinvoArmkWtvm4grPLTgHbWAl2xiJVjoGpyxiL3rxTsw/YSyiQneIHTUIgJxVKFC1aFOiiSVMdp9iQZSafxLGksbgPEFJ92UKdmYDO9w3DPGAkKblL3SizLwngVfyHqYi3eFNKlZGVaBQQhsonjx3BzPjIhZCk6N93R1pinidXGOwWdCEPMeWPtpqDx1CGgWxGNdePOGKCW4QJcz+0VMIpOt30UcXZ3w4tE2s7SzxfHbd7jvxi6nJspmtcDC4BDVRoU6V+o4vrBeewO13c/USyiKK7DkGt6abUMoHKZou3+uewgV0iDB1Kr8OSpEoSMiXpAguCBzsT+dKa4q/e+t6ad12V6bukwQj7ZK7DLTQeLqoYZDbhF3I3JotMSdr53nRMqc90JM1hF6e2/MmcMGfYvzoNHagNUxCBXbbc3VXaPMqIjVPkrOZ3mSPyjKOofg0V48jz5/sAatUDQGnPjCgdO5BoIT8KXjMkhlv/PWBxMcpGT/BmeRQV1+58gc2XN8MApfveaZNZ5JfOcVek8Yi2Jkyr02sjvrCAchZ2nKAY9SOwtvgigDH1gIAUUZx8TaoKJNmXFhj1bOMXQWC3ffFhYJC02Yg49NcIyqwKAKiNquN9sZM1oalodoNPz9NpJrTxsVCY4mdQz2djl55TbL17fY2G453lasDkeMBseIrgMsV0oOXIGlKehNSiXXkUzQUhtI2YzJRRZ8xfZsgtRW+9GedZ0A79Ap6FTwlZuHLObaHPVCILuEBFc8j8BQyF2GZE1ZvcCcZjO2mKxAGhrHH0yu8PTp+yzZbg16feS+o/zVt67wy7vXeZ5MahPXdnZJqcNXlTVxlX5G55SFZsj964tc3r1jifucJS4H1XcMHTOCaemYLF5WcwktVTD+nfSZBdah6mgLgKA5I/EAklaxup2hmQfKPKWiWxadyTOdmgQmSfj8Vce4qtjt7nE0zFCIvmUYwPrgUTMKJ0WmSIz4F8TjxUK3SYzsdREtnKGUrdvRetZNjC6qFiUQoWlqhjmXOoqw1tQMQlGRcULtPO3ONrp9nZXNw6TpDD+dMrmxS7y9Q70zpr61z+oks5JqlqsFFupN/EJAh9lK40WlJM5aq7+40r3XPyiv4Cw/EYTsLH5z7kCaqa5q9lJrWsPi0LIra1c8ZFaDbZ0atleq/M5D7CIuutIegO04ycJESUYaTVi9RWKh/5Tq/yxFvsUWf3/jafLY8henAjcSZw+d4CcXhmy+9gbPx8z1rX0jQ5bmOuvMNIMH5f6VAV8QAziMuVD0AYoWmKK4UrD1BeLucx+rvXnmrXYCc1Zy/7cicymk3ggVa9IxGmsqrdkHCjl53lRmxercs6Id8/bmdzruGWNRjObexmTypXiquwiDXc5kEZxC7a1eMkmZScrsdalIsh60iqacyNkbmlYgyarIsi5JxVIV2OsitRNDv7y1JQ+9Z7SyzNv/zb/no+tnGOwKca8jd+CrQBUanKwgweObGpxHnZ2/9hwT+84MJUacenKpATgniHrwheyYDq5fNVnNyXkWXEP2SvZ2L1DbnbPMEyMoMDiSkcpCGM2KpNKNGdW8SRYk90hxSf4w9C/naJ67LLYL7S1OnNhklBxKNPRKIE8szznWrPATzzzB0dfe4O2b20y7KU2zbGhW8XB9Xef7Tx+hrirTO0iJNlOUJU1bYZaN/aBqTX9RhBjtOadkvSkxuyI8opAgkgvXzihDmdJsR4/i27M2Mm1hDxTqTv+7UmGY7yP0T+zb1Nv/8HFPGIuUXb5NJiHkpcfhlSmROni8Cl2hPSQs8fc50SarkLddwqq3uRiIFSn7m9HlzNY0UYl1VVa+MgmkovXrnGlMBTJ7v/kGcnWBz109z59feJRaPTIISBNwlTPlSXHmMZwl3zmDeIeSyV1GgsNVgdx1SBUIIRjFJpfCWS4hWV8QVNPDSmWXq6oKQeg0miRSX40PHqmcJfXlwWbBvFYGFYO2KUYj6iyU6ntJVJFUGtRMN8rkiDThKuGb09t8cv1Z6GwRuqS4RpCBQ2cZHWeWVio+/tgjnL9zgwtffplHPrJMNajufqKIOE6trHBmfeOuWpQWkW8rUDrniLHD+WAMCcF2/54bVkKomKJpl3WJnpCS5loGyaRz1Z5xzCZzO4upUKgis9ihmM6DiDPWhZpAycINx/JXK/7y48eZ1kP+X4M/QQ++iPwL4MeB66r6ZPnZOvDvgNPAm8BPquodscDw/wr8GDAG/r6qfu2P/Az6vVhoqsp2+yKql1SZxkwQKxRab7uYCkzxNsM6mGYvgkig1dI4hEUnOZcbmRJJhOQ9SKaqAkHUEu9sZzH70tssXthl4/hDfOHq83y9vcb7V++j33EOGMQOleLixSHewoXclkp1LiJ4pXKdreepVJ5Dqa5bPtAffVEslcUyCA1TF2liKIVRZ7F84bKpsSQLmpbn54XYt1rg5p7SIyKWxxTj8n1bgEKsYFcmTBcch/yQPDV0S4OgjYOoyKwIZex3VFQ8sXqU7b0Jr//Mr3P0Y8+xdOrovGjbFxHtnHojKUzveb0Fgq9QtPD3TK7I9wBAKZoSTC1ThnUpRJbcpIiPqZTqvPbUfz8nS6aUTGDDaUE767mmshMhLCdWm4r/4vFT5OESPzca/PGNBfj/Af8M+Om7fvaPgd9U1X8iNgbvHwP/B+AvAg+V/z4M/D/4I2azwN1FcOunBqiDs0q8s12ky4lQItE2Wtw6dI6BN9WW/cIjqoPHJWupTTkXtUctFXQbS5GTxcy1y+ANEXMK3VffYOEPbjIajPBNzYfue5bffuOznJyucnhptfCTnKFNrtBXpDdzAyJcCJYflJwA74ldpKodSC+fkJHKIQQLk/q+XrFNgyKwN6RiN7eshYUS1tlUACNnBsRDTvHg73wxOA90BSAJnja1pvIopW8Gex3OwtrUdYQsfJMbPHfyFNJlYz9Xght4JAjMcmFmCzK1fIAIywsDFjYrLv/2V7ixtsj9P/Qh6pWFQmJNTKdbpi2mffeqKwvZlxDSpFf7509J3p1zhvCVFZIKu1hKTc1YyJ6sHZo7+k5YJ3adItDN1MJmf6AqkpOFltJPPCg1GOfcPPd/p+OPNBZV/V0ROf0dP/4J4M+Vr/8l8BnMWH4C+Gk1gs4XRGRVRI6p6pU/6nPQg5uizvS/QhGhqCuPxGiM0SJy1OvdDpzDA1MxYqMviXoTBDQwkYg6SNGS255zNos65zM1TqhefIsjX77J4sJ6EQdvGA1GfOTMc/zHb32Fv9d8yPhdQS3nUMCLIVzOmQE6MYw/RRy2uzkgdh0RIVTgqworyBV0yBW5JxwajTdtiWhiOYzYSmNO+nUDCGpvhoqad0CRIi+j2dnfOrO9HEubsSR846GgPNJPVnIeyaXQ54TolDd1hx9deRqdKFJZDShrwk1NjC8X8UADEpzlSAqudpxYWaUdKBd+5tcYPnqWIx96BBcCbbvNeHqzeH7TF1NlXhKYc740o5QW6aKlJtq3EZTXiRQBj17CyjZT7zzOeZwLJhoohcypGe8qKO9LAZKcDwWidgxuXWU9zrh081ukwQJd/NMX2TtylwFcBY6Ur7/b1K8TwLsaS85lX3C+rCPHrAweaoIwAEKweSomdJAJCLU35coQPBEIRAZVmKt3iEDd1FRdpHHKPpGUrbnMOUFLBd+9+jarX7zG6vImVVMVtM2AgdOrx3nzyDE+d/s8nzj8cHm45aE7j1NfCmaOvpfCFnCfuGdcUx9AtaUQaSX7XktLrCfdMlYb6uOE5WbErb07aOjZtZb4UjlDbzJIQQlTTqiz/n6dZEh2Ljka4uSCtQHQK5rYypkTCd/ydzi9sE6dnTV5jXz5nLL7Jmz8R+2gomwSxgwgJmTkaEaO+08fZfvmFV77V2+y+ZGnGNxfxlCIGbMh3KbEYv60eP6e5Nizk+fMYeb1NeuN6fUOCqtYoW1nBo9jRceYlKqqyCkR1dBEk4XNpVZTyJrA0s5Nutjy9vW3iIMhbZy94zr9Eyf4qqoivVTC937I3ZO/1jeAAdM2MkuKiKkGuiZQFYBi0PdyO4/Hai2VE9psuH4tjuGgxqnifJg/CBAGEug0MwqOaRHX9mJQtb52keXPX+DY6nEG3hJx70NJIG1xffT4U/z8C5/m7PYdTixtQmO7rKopSJIFTaDO8gfNGRUr1mUVwtATx1NTaFHFqyXqlAWTUkKSzomAWjSvlusB5/MegnHLfF1ZnaRLVE5IgCuV86QJIaM+obEU77ANAXUkQILVcpzz5K60FYiiDr42vsSPnXwGnWVkyYAL36rx0nypnlcCQXC1QeEuGNSvKUPtkDbDTFhlgeWFEbe//jK3vh6pn1ogHFqwxV5g3n4KgNDrhum8uJaiaR73Nam+cSu4MG/2EnFktHQbBLtWBS+BqiyaDJBl3gLuvUOSSUjFZIIjRnEyrWMpslTvdPxxjeVaH16JyDHgevn59zT1C0Dvmvy1ceqMqnbMivKjoyIHVyZl2aIIAgNvs1h8cbG90qNPmSYY1OxLWDZvNxahjZGUlcZnUmVhGlkZv36Z9jOvcWrlGMOqxle1RYPOl3F1Clmpteb7znyAn//WZ/mfjj7KUIem3VXCRAkeXM9oLTmMs3jIidVeXBPolcK14Pk5drhQ4WpPbg3NsxdY+LEYRux2e1aLcEWhsnDgEgVAKNG6QKlsmxiEw5FzwtVVKXZaj0iyDreiYm+h2m2mqIsclUWoBVnwaFsY0gEkKm5oFHYZeFLvIWLBbAeCVoLbBt2PMHDoOLGxtMhqEG5/7Tbj5ibh8Q3CoRFJMzFGBIcLFaZIfVBnCcUYXDjoVbmbxt9z9UwxtCi+9IqWRe1GVMzrzntazEh9eU/nzSv1RmmOvo8Ovvvh3vE37378IvD3ytd/D/iFu37+d8WOjwDb30u+0mPksRAfu5jpYiaW3ds7G0MQREBTzywHtcFHoyrQeGdhGcbNagpxshYYBKu31IXYOHAeuXiT/KmXeWjjBMNQ4Z3NFPG+MsWTAjN6ZzvO8aXDrGye5Leuv4RGy6KDqyztLmIIUuotztuD8k2Dq4ItMufpYkev4qjRKuwGkR6EQ1JYh945hr4yzawUD4QlSjsuSckx2vsZgcoWTgfSFXWj4NFoijX4vmuytN9W5i5UhOdnl3j/5nFTwRmVvg4vUAEBNIIkIXvIXpHaMHklQ8jIwMGekvYS0kLeSUinMAY3ThxqVjlZH2fxa2O6336bfH3/gE7Uw9kluUcNLeylXPvGsV4tB+mbBW1Ru0LJR+1r16dkTgpqZk14vQabES5tMwv+rn4luVuF4bsf3wt0/G+wZH5TRC4C/2fgnwA/IyL/Y+At4CfLyz+FwcbnMej4f/RHvX95fkCpi7gimt276vJ7c5lKwNGEgC/zBr0X6kK8DCKG4qDWpai223stFxoqGlUm5y8y+dSLnF4+TOVq2+Gqar4rWeGOIsBt+UOoan7g1Pv45Rd/k7fuXOSBQ/ejwejnOWY0WOKYSZg0litVbYdExbsK71pyjnhvPR5JekFvYxw4+lZYT9JMJaHom0Ukhr7AgPrS2x7My2hXRNTpk+Eyo7KAGDbDRoFsg5CqAz3gmWTenl3nzx96Am1M0DCV3EAE8tgoMqnLyFKFNg5fCfH2DERxazV5u8ONDaqO0VgDrraCq7UwgJtFVtwKa2GVnT/Y4Uq6wIthQrdeExrriqxsNoXpstnqK4u5b0GGujJDimXOTg999/JSMSWaysJo721d+NC3D5cRHNE04UQcK/sTUs7c2Z2QZgf50B/LWFT1b73Dr374u7xWgX/4R73nH/q7/mSCJzhH48Wq+KXSnIsSYSWuDCKyeoyWFtRKjM7gy26SSk2lL3ZqAQEkK9uvvMX+r73CgyvHEfEmOicGLMx1EEqY1yvpVCGgXaRpGj7ywAf49ed/i7+3sEbjF3HOoMmkGbSIMxR+U+6ihQzWb0SoGmIZUgoR7TC6RQlBrPHKQhtRoQoVPgzo2hmNCxZWeW8xpjNKCVnn50kWEhEvAaJCLK8vr8lk4145h4gV+F6b3uTBwYi6q5FFb16zK4u8XzceaIzAKOJJ260tQCucoPsGnPiVhtA5dJrQVsnO+HCSsEJqUnRPWcgLPLSwzHHfcms2QTaGyJHATJXzb1zj9P0b1MGXvaGEWTnTpWgsdCe0rTEeY84lRLPX9vB2yqZamWJHLKWClC0HzerY2R5ThQoZz4gxc/nmLhM3KcXt737cExV8sBi08o6FKjCsHMPKl6m9fTGQeXswShl7DQFnk8H6kdiuZ/MW1yoyz18uf+1VJp/5Fg+uniT4AWmeXFISbi0LvXQ9AiZrCp0o4jyHlzY5dOoJfv/iN/jh0x9BqNCmCFlErPiFkS17dMeKgBmnDsmYYLfzBdakVPL78MHq8qmI1vlQE+OMprbaRcZanIm51HtsITkRg9XF3h/t+VJWyFRkHp7Q8+dUeXH3Lf7So48ilaAukzohjzuDhsHg8QUPVVloKRm/K0EeAbcikgt7eJqgLddbCczUrt8JrrNCqaggHWhKLFaBRb9EPqfceOkWv3XjW7zvLz2LoExmMxaGg7nRWPJU2wiNnBlWwbh/mu4qcpqOtVI8tffkHOkV/OeLR226AgKDG5mmDjx59jixHjD6NibCtx9/3JzlT/mw2zFoAkuDiuU6sFR5RlWgKi2oph5pveXTZNO25rlOSfTbZOLTYB6o9t4GuIrnymdfYPIbr3Lf4ibZCSoZ762rLgTriVFfxCa8K5TyXnFEUG/oi3jhsaOPcm5YcenqaxCTLdxkBD1RVwaM2n9amsr6OoH4YJCv86gv1zXvSXdzLpZzjqgdg2rEjDHiivI8fa3A9zsMYdAgdQV1CTe8IHVZ7I7Cwi3gg+sHQiXuMMGzz6HhIesRaoFxwgVHdor6UuDsE4GRsYpRRWuFNlt/zUzRiHHRVHFRrCWg7hep/Y1ERbtSfJ1l0iSRZ4lrN2/x/LnX+PFHn+JIGDBqGpYXFqz7sdcg09KFyQGC1Tf+WRRW5mR6X3qf+h4YX3JBe49eBms+NVk5AA7eNWO5R4ylRyC6roxuLmJ5ZMUBtXM0wR/g42rjEfoJw9OcmcRMq9AWlCcrtFmZtR1vfOqL8MWrnD18P1U9smnBoY9I7AGkGCH3bh96WnlWpZuL/DlEAoMm8P4HP8pv7rzO5M5t6BIak9FvSi0jzaI9zMLCdU2Fai4jEToKzRK8HCiU+MLK9c4QMoWlhQV22j3QfuiQK9O8QJNCUlIXDd1SK+hKADTj6lJzKrJNzBeFDWb9g723+MDh07imRscZnWVjMgegFCw12HJ0Euw6tYR9I4EppFk2JnQWiFhk2GZcB2ls4EXusk0va223011rB3CV8MruTX71xhs89+STHGpWGL45LuMoZL46DhL9TCrTh02owmBze60ra6iFArJosYbeKLQAOLnkQXPVTphPPn63494wlnKSKcMswe1Zx+1px06XaDGqSuM8C8EzdI6h9wy9DeV0IjTe03jP0DtDurBmr9l4wuv/7vdZPDfh2PIRnKupq4YgwWJdF8liwnCoqdGHAjH28G+m8NGyJf4GNjjWF1ZYPft+vnjlBfJsZjBqXwzNYuP1pgZL9dKhPtSIq0yIL3WoJKPeO6PJmD2awaSyUy6ERcZde0DzoCwSMTaAVbrVEv+ccCq47HDq8XWNqwPU3moIQrk2RyuRm7tXOXPkFLqbyZWharnKBq+G0qqN7eDduCVPIswyeaAwVjTafygmBhj7GCiTY8JpwleGrGm2LlM8UDs0wOeuXeCFrWv85ENPs6ENeZoZ7oPfntGrtBjp0sTRba3IXYhW8bO9llhBFRVraaagXv0xl2gSg6JT6Yo0TYAyUe5dnMs9YSwACHMqtxPHLKtpF7exeIuS09Q1i3XDQh0Yec8gGCBQ9cgZjoSwd+UGF/7F73B8K7C5vElVD8F7HIHgKjxVkSrq5tCiOBBRyynU+i7Ue1vIXnCGoRo6FjyPHjnL+bVFrl56DZcV7RK5zB3x3iO+5AjRcH5XFg7BG+wrBiZoEJKkudpikGBjJJxnoR6y07XlAReMSAQ0k8sMGqOX5yKyV5jMoRRxC+qDCBp8mYCceT3e5v7RIqNqxRgCQQsxsWik1c5252Ce0nnLt1KMMFF0J5MnGVdyulx6ZyQqUgdkUBQ4xRBMTYk8tPOLKL965VVu64S/9uCTDFuHdAI7mUaWqF+9eVfF3lgJffXeGtbsSfdIqdnnQVXflDEtL+snqfUtIIZIWw6X6WtjlPTynZEwuEeMpW/eGQSb2V6XXm0zmI7dLjHNWqjYuYwzMI/kORBw02x07ttffZndf/NlzvpNVkdrOFcZ6uUrW8SlbuNlQExKTJGkidwl26HLDZUSktmNtTZiE1GysKYKnqcf+AC/Pb7I7M4NnHocgi+exDmTRrJEvAASPlA1NTFbYTAntUY1KSiaSKm221z3xWrIfkjgtCyKsnibCtd4RErIAfNYI6cSjokDf0DNJ5qwnKsCr9y5wLMPPGIhVxAIli9l664r0rUHITIzQ50cArtKnhY2NWXxJtDOmsWkUzRmcnDleSVCZeziPen4t5e+wcrigB87dha/E60BLakxASIsbA/w+7PCDu7nYZYalnMl/+gvN9sIjx4IyiZkkXOf7Ms8clFk7rEPjEgOjE3ePQ67N4wFSj+8mCAF1siVSnPXrUnL9WnLlcmU3c6S+1ZhEhNdyWKD98hkwtX/8Fnq37nEmYUTDKoG72xks4leFHeNoSfeB4KvSbkz8YRg6illoklJqHWu1tJBoY/Yw3ASWG2WWH7wab506ZtoNzXxhwIPi7g5/NpL+Zg8rC8Tt+I8d54n9r7Hmu0hD0LNnmRw2bS5hJILHVSdzeEYv2xem9ASsqVon+3tP3HC7XaPkPfYXDmGtJaDpFi4AFXxsB5L4gvsLLWgwfpZSCDZNAy0K2lxNo01UcizCEmQztjT4gw+vrM35V+e+yJPHT7Kx9ZPIa0BAfMOSRF0khkurSIvXrVrUr2LWW2boYkv9iGYGU7qjeQur1GgGboYjd6CdVZ2Mc3XwV36MPMemXc67gnouG/nLPkqk84SuR4CnsbMLClD70haelZSQsVRkxg6x/TFN9n69Euc9BuMllcRX+PrYA1avRh3LwEavHGkegQKJeauFPRMME6yoGVXdijZBWICL4XfpGr9IEF46PAD/N7Nt3n44jmOP/g0yRl9vJia1T+6zgqmzhG0MiONHS7USHZY4lIGMXlf+kE8NZFJzhb+9CLZIZNdILho8Hdy83gd1HrineCCcan6llnJ1pT24vZFnjl8Py7bPckx4xaCtcTEhFSmUVBVBjJoyoi3hrscszWTBWekzwLKqTNGsraKerH+7wozgKS8uXeHT735Aj929gkeaNbn3DqpHFRm6A6PNuBaz2hH6MYdjKp5rljK9HNipkHFOveq8yBKbQOyJj3r2xdsdF8+gFbuYjyXHqL+vd7huCc8Sx93xmT9Il3KRIU2KZMuMZkltiYtN8czro5nXL25xZ02sdVGZtfusPWvf5fml8/zaH2MhcGCSSkFK6I5tQ5EFTnQ/c194ldURVwguIY0S6jGwoKWkvT3aEvGq9FUHIZqJTWiYuUr3vfgh/jN8dtMbl/GZzFIuXRUilrbbsgFkXJCVdVWt2hb68loW+J4Qu66uWK8qBBcRWoqcm6NCFh58L6oW3pyDKYco0Z/QUwGKGuyPMJZETN1EfUmEXVp5woPrp0h7Ubok+VOUW/ySoV4Ra6w9uUib5b2s4Vm2bxtaiNC0U+uejqJ1Vhc4w2CbzMv3LnKp859g5964jlONavELho4kDK5SfY3ThCvc9nYpfXjyPNXrZ1Yeg9iXkC1DMotoRTSq/cwz1WyGOUn3VViOPjajC8nm8zW11/6toF3Ou4Jz1JS1EJ1sIJjygc1hT7xmiVlEpXxZEJ+8ypn9idsfP42zWARv7qBqxtwFb6QL7VXP8yA9Ibh5oU5+rVbWR9E4zyzOIUukrziCBba+EI1wXbPvnelH8et6lgeLrL00HN8+eUX+P7RGrowQETxg9oq17kk8KlFRJGYaGdTdDLGDYeICrUP5OCtPyZb6OEIuKohasSZJAyuEAQJGNpGJnvzYGbXuVTs0xwa78OWC91tTo4GDPySCZp3yWSQhHnjlKYSNk4VJVoPjwTL6bqiltKV9y5jQDQZHN2PuVBV0izy2duv88alt/j7z/0AC74xFMfbOA5XeTNGsLHcpYlJgCrULG8NuX1zTNocWSt0yVHmELiWjoMi15rLaOdcgICcM8Hb7E9NaV64hAME7MDL9ITUdzaXe8JYtJi+aTlZQ08sXqCpnCXFzvKFgNCurtG+eoOXP/8HrHSBZxceoi4NQCknVC0Z7xPU3CNcvp8JAk77Ia12i8Sb2F3lBrRtR5UyKqlocqWCvJRBPeqKvA7GVxGTXnpo4wyf3bjA2cvnOXL2cYtRYiRnY9CKJg597mcYXXkZvUvYoc9WVRwXf+gf0m6etCIQBgnX9QKT3DII3ti9wWbVi/NobCnZLxnMo3pb9M6Xwa2KhXCqvHznTX705EPgFd8I2TtyZUox0usDdGXBDC1MkyzobkK7ZAblseJiLei+1VFc5XCNQ4uuWRT45QvPw/4t/s4HP4GXYHy0BNqabK7WildjVOuAu5Aqe83a+lG2v/Iq3SdPk10/GqPw3aSImZRwzECZDEVet2cpx5RKB6QchG3aT49m/n4Wgb4z1QXuEWNJBeqbxV7v1sbYxZhNrKISBsETs/UkzDI8cKNluPwwF6d3+Pc3XuTD8X4eXTuFD8HabxVcjiQ1w/NVjXjTGfMlF7Fyl1pdIGWorFJZ1TXaRSRa8NtX31VL4usNnhQ9GI8n3lGr8vSDH+A3vvbr/K07J6gObR4wAtQKic1f+FEWNv48HDoCt27CYIDevo0sjIjf+AOkzYWuMq+DMhg0zPZnNjbDh2KoDrQ3YtBcpqGVh9+DWOLEakAi7MYWaffZHB21rszGG+w7E7QyBEscyKKhbCZEjOUhyfQGSCBDkOBIs4TripHlA5bAvov84stf5b484+Mf/EHrLM1W4ZfGlc+yTCIna06TCOqxxe4FzQKzzObiCS6/eov80BqpiHSIiHkLcUAv4XtXJV57T3MANYtQph+7uRdRDDmkv4cuzNP973bcE8Yyd4ki1B5i4fuFJtB1md1JZOotT6iDZ4hy37hl2izwxMoGZ+VBXrj5Kt98/fN84vBjHF09DGKkui4n6qYpjVgl8zNBYaOIAJoSLil51s3h21BVtF2LL5i+NRiGeV3DFQJkCcRKQl2xMVpm+ewzfOHcV/m+hU/gl4ZYFO1xknDjbdJrr1qO8fpruAfOQk7ojRvkmzdY5zW6wWKB7OzefGz/DmvjjqXBivXuh0CBxazrsNQ4+mq2FHgVKdCvGEEzpCl/abLD8v6OxUqhGFd5nTo9oNyQLeGzWMVo+ncZcLYPxKUSuMRc2qwdN6e3+MFbN3jwwbO43/rZg3vfGZ8tF0aElPytJBsl1C1TnVG0cixUwmDrDvHUot0z7R3pgXZYXyvpx7zb13cbDvMJa5TNuP+6Ho+Rb+3w8L/8BXJVE8Z/+m3Ff/qHwnTWMRLHcmMatrOYabtMUiF2mWEwwt7K9oQjruZSVSHe0bjA++97llvTLX7pwtc5eft1fuD4UwxHI5q6PkBQevi43wUV+lHcwXuy96TS8SviCL5hMp2ZfCQm9iBis2MKE8zqHKXRz2An4ZHDD/C5Gxd47NrrrA8eBgIiGR/3kZ/5/3D99AfRekiuz6KXldhFQjiMrh41QmYpsCk2aiE4x53pdUb1AtKEuSE4Z8U+zWravZjqjROx3aYsyPHI8fqwY9Issrz8IC47Ni+NqUpdBG+QcblwC90ah7aJ3BX4OPb2WyDi2ljEOWLeJylX0i4X3zrH+x56iPrwafCGSKVU0HBfVroYXp4jRmmqiqcOB0aiTkwXbRBYObPKras3uH2kJpmSO5QkPfQzYdADljmU2N42A196fXov0oMsqkqTrR60Mxiyeuk69c7eOy7Re8RY+pjBRNaauqIVYZaVxSZYhV2E5aamAR54/TrVcAHXBpJYoq3OcXhhjU8++kOcv/kGP33ud/jQoQd45sjDhNqq6Za3BLKUGB3jnkWB6MSKjkVkAm95TkCYFaREihyo2UWpM1AgUAUhEbxBSY+f/RC/9tVP8ZMbRwkb60VR0oxs5+kfoRstm2i3wGS6hw+eUA0Pdna1duO27bg+2eHN8Fl+9OgPUC0vFgkkUyoxkTwbSeEw4T9xDmkCOwuOrw0nfLW7yfPn30CcZ21xFU2JY8/cx0ceeIAPxEXWLu8TGltY4sUQKhF0O5Pa4q3UED6jtQhuwRs3rVNwcK67w+9/47P8xY/9BPGBx+lqhzi1XphUOhIbIe12aFdgWC9zcQlV8MPiGQYl5/TAosMvV2ie8tXXLvPrx+4vSqNWSSkkce4eaJazqcfQ54OYlGsqdP5+uTlxHNu5yv9x5xz/7Pv/Bh/4+lfpvnLuHVfpPWEsxSninKdVZa+LxALLhuBYrDyrVbAKvwhHru3ShSEkbyRBb/JEHqsPPHr4Qe5bOc43Lj3Pi6/8Fp888QzH144ag1ZjoZ9nMh6c2hi9XOR9Cs0FvC2OnJBSTOi6GXVVG5NVtYRXfYJZGrdcwNOx1ixRn3mGL73xdb5v9Al0YA1XgEGhhesFSl3XpBxL/lTuSR+aqqNTuJH2jFofoxUOSzuLhIDTTGqzfd3UZODr9Zh/t/U6ITtef/FtZtMZtC1bL79CCIHLS8u88PpbfPb9j/PRh+7jwdsth/aVNR/wQch7Cq0jBKPYS2ur0bmKTEQnCemscPy8XuWlN1/irz7yDKsbJwo0q9BaTUsMskPbjLVaA0Wko+e7IZanaMxIpWijyHqFjqx1YBRqPnZ0jTdub/O1tSOFqqKllbiXjeWAYRwtNJ4rxFCEUfrk3wmSoYo1SYWbsWEnh4NazXc57gljAdsAdvZntHsdbsHNIWQB0wUrU4GryZTB7Sl3mqF52Z5xWkJeG0ngWB0s8LEzH+Hy9nV+6eI3uP/Gm3z/8cdZHi1Y0c33f2edjg41yFoNIE4Krqpw2aOzGThHCI0VEvtNS2yLn89w7Psk8NQ+8ejRs3z25gUeuPEWx089UpTiMdi3H+CT1SgpqfC85jrAmFFVjg23zCLrJvtTBVJJ7KWpoBRvzw8zb55qeHppnb1vvcX/8+IfcHtrlxMnN5hcu4YbDMntFD8YIZXDLy0Qu5YXX3iJF77yPH6yT4Pj2fvu4+8++UHciYbm+pTFWbQenCCEVLhiIrCvJMl8bv8truy9zV955n0sNuvoGGTAHKo3QiUWVgW7HiShtUOiwfBeXZkWkHEDB4sOWaqQkalgumDF0dXVVf7a1lVu72/x+sIqYLlTr9UwdyQUHeNCXepBELAgzXQVeyTUfuE4yHPe6bg3jMWCTKazyP7E6AyVF1YGAVd5au8ZOEdOCX/hJlOtGKtR9jO9Z0j0ItKuzOdAlBOrRzm8+CO8fPM8/+r8b/N962d5+vjDhKY2zyC5tDI7a4kVoQ6eVpRWMSKi1ka5QAi+JqdS7XdS2oRNXklz3y3p8N4zIPPUQx/mN77xaf7W5klqX4phqmX6Qg+DWveiyVZazUOCJ3cdTmzGZqysUzKTDV1zCsFziymfO6S8LB3SdFRPLfOt2/vki/sc21ii3d7miQ89Q1M5xjdv4RYadGWVwdKQ7b2pVbRLM9n+tS0+98Z5tgaOpUPr7F+6yYPDVX5q9QHqcSQFh+5EJBsV6De3vgXxFj/+8NOMumXyLJvHix5Rh3ZqPfy5JNoOqAVX2VS3LIorOZh4YOBg3VufVyUHLOao1mdTO06ePM5PvX6RT2+uwspi6aYVOsUmkZWCbKuBIIJoYtaO8b6iqhqCWO65O53QZeG4rxhWjg8cGfLQcm1iJu9w3BvG0h8iTGYdXTS5mmmXWR3VphiZM8uDit1XL3DbrSI4g4lV6dRg3FQGfwZfxArwSDb27jNHHuaB1RN85cJXefH5X+eHTjzLkbVDVLVDfSBU1gDW93EHb/T/SVJ6spQUNE2cp+s6fGUFQksercZBTogYOdOJsDla4a37HuErbzzP959+uFxoXxA1ycD+2rs4IzQNgpv3rvQt1btVZqyRRSmsl+Bxlec39q/w0izx2GNnyb5CXSYPHSkp46y4wQJ39vbZOLzOfR9+GipHNRpwZnODcy+9iTrh+qXb7I+nbK4sMdGj3Njd4fL5izRrQ95+4zUe+P41PrK4QjM2e44x8St3Xma97vjwqWep2gHaWfU/NVircXZIUsq4A0q6hyETBjWHpcpQNS+mELNkfTzMFKZlD608TMvYDG8Kmo89dj8rb93kyIfO4kcNqtZinHPi6o1zzMZb3H/6OUQDd3YusL/fsrG+yWiwgWpiMrnJrG0ZDldYuO3Z+PWa//nTZ6ivnue/fBc5pHvKWPqxZylZQj0T2J1FumTjvtvxjPVL+7B+yIbw5ISHOczqfTCmMKUe4CzJdN5BzKwNlvjBB3+AC9uX+W9f+yoPX1vmo6eeYWFxCUlTXFMVFm9FRpCUGYr1RmTnSKV4nOjwBGI3o5YaMHkm+kGiqZcaVSqnPHXyMT5z41d55MYVjhSvYsBZgXedo6oapnHfAANnPTSdRiaxY5wil2YTvtle5aN+neRhGpTtZsoLaY/d7cTOzi263OGHjreef5Htt95isDCiKZX8i7fWubO7z2h5xJFDK3zl4nVOHTvOaKHi6OoKr77wKpfOvcb+7j6x60iTGd0be4gq/05/hf0Tj/PU8knWqhG/cfNFHlhwPHX0CbyadrA03uqzqSj/i6BNgbOzkNuEH3rEWWruGuOByVIw7YMuI1NT1cmFlsJ+RpaEXMYWSjYKjp8qR4+vcu23vsqRH/0Qvg5kjThXsbx4lJuzHRyeqhqxsnSMzbUzONcUqddAynDj9pscPfwQi/M5kwGRdzeHe8hYDlpFk6jlEgrjWWQWhVlSmguXeTgsH4ihFQpHP47bKdZ+q1aocWK0FlcQF4DaB86sn2Rz4RAvXXmZf/Pyb/HxI4/x4PEHqYEwrEw0QnTuNWonTGPPg/JAQOnwztN1La6q8EXUztDX0gDrHMREk+Hhk0/wq+d+i8ey6RWTSheic6jY6DYcxu/ywcoOIgzqBrqKhzdOs0ogV46LJwO/p9e4ev0WS8uQbtzgy596jce/71le+PTvsH3xCgtOSDu3GMeEuIrRrMPNdrnTdtxwNTIcct5/k9HiAmefe5LTzz7KsQfu45uf+RxvvPAisT2oN5w/f55/eeMWHzr7OMcPHeXJRc+TJx/F7VVoI3PAQpMl8EQlD5Sw6G0sX1BjIZsYghFca4Wlg8Gq5NIFmy2P87W3955Z2ElQo8IEQSOE6NhcG3Hrd19g8889aUROySwtLNM0jxNTS4x73Nm+yOLicQbNmm10acb1m2+ztnIfiwvHceMieSdp3gz2Tsc9Yiwy/7efGJu09Ka7Et+qsvHqdZYWj5aimcXB2ZmPlwIFmzK9kASiWp+Im+sQGxvYoyw3Qz544klurR7l9954gfPfvMzHHniKNX8YXzHnEQll5J6a3m8/T13E+GKSIl1nIxG8MwKflcyMsBmCJwAPHDnN9q3TTM5/Bfr6saqhYAXy9HVNitb8JqXtIMVIAk6sbiK3b/PKZuSVU57uumN37zaT7S2muzOYTDn3+19htjcFK7wzXFxDiYz3J2hS7tzaoa5rTp9dYeWhR9i4/z7e+uY5vvHpzyLLy3ziL3yED/ylH6ad7vP2K98qpEYLae9sb/G5V57nyM4NNt73LGeWhVES3MzqJL3bdZUZhLoyunvBUEAfxKgwArJktRqioqkgWloKqCnjRsYWp42lL8feDxSJpouQUeqlirU6cfPz32D1Q4/R6hSHUAVT0UEqNtcfAmDW7aNqclNVvcbKynFyiqQ8Rcm03R4hjnm3BrB7xFjK4WyZ2WLEGqPKjMVqa5dH9gU3DPNqvyv/pZ4rlK01GCg95DYxy5AP5gxZX8Td6ipwePkIP/rUJq/dvsC/fe3zfOTmKZ66/xGqUYNrGlN9FGEYKpTSNpCjJefeo3iqrMRuilQ1uGAjE1I6mG5VV8RZ4gP3P0v1lf+W6d4+vlTpfeXRbATE4MRUSZzDuapIHsHr197k8vg6V4/XvOmvoq9EmpR44+uv4kWJ3ZgQBkynntRNme6PASUEx2BhgIFvgdksojlx6c1r7N3aprp9neOnHuKxZ5/g2qVbvPT5F3nkqYf5vr/+V/n8z/48b73yqjFx1aD23f09wpWrXFu6xI24zPHBIZpSKyGUTH7kkeoAAk+q+NqhTZF97RK6F4scE/N+n5xMDC+nXGhGeU7JMVkqhwZnqjIxQe1x6qhHnlXv2H/5NpvvN8OIOVHXQM+URe37Aq48eOowWmSrqmoHQQhhRFMtvCsidk8YS7++pSR/GcUlilxNJjs49dZ11leXaR12oeJJSalDIGEiFr5Auda9l8nc1UEnYtSVbCzhMpYFj1BVDU8efZATK0f42ltf45VvfJofOv0sx+4/CaiRNMURJTMp5EdTOSnjsvHUtSOlDqL1fZgRJ1DzQKHE3ePc8fOvfZ6fWPkxgh/OqSnWgGYcsti1VINgIIareOjYfbx88TqX8x38pRts3dxl5/INuuk+bTI0btLtUdWN1WdcJnYdu7s7ZCK+9qwsV9STxGQikCNZE7vXLlGllhtbxzny1KOcffIMX//Siwy6IT/0P/xr/M7P/TKvf+2FA7KpCFv7e/zq66/QrK/yoxuHqCqPyxjzV8G4MYXsieBiMq7ZoiO1NqhKusIET72mgSm2WKt9YR/PsomQt4ofeNQXmkQFkjzUoYxa9wxqQZgwe/0aO5sVP/+lz1IHj7iK2gf+5kc+Nq/iG3FS6Ic7vXXzIisp8vuvvciDd6zh7J2Oe6KfpTfmlA/w8NirKqpSdcr7bo2R0QIZUCni2pi6i1PrrOxESx9DRshUagknhXjnnMN5g5nVi4VM3hXRClhtlviBh36A009+mF+48gJf+MYX6Xb2ybMOl5Shq6hD4V2VYounGIxYq3LqOiMvlsQdSr5U5jVWvmLHVbx54bWywEoDmg+gjmY4IhYdZ4ok06SBFyWy+eD97I9hf3vMeGuL2M6IsWU6Gdt/+3t4Z3Mzk2Ymk322bm/TjaeEnDl9ZJ3RMNC1kaDC5vKAWifc/tZLfOXnf50rL77NMx98jFMPnMAtDFh/9n2cfvoxWz5ykHhPs/L8jTe5ITvkxvJGwNRkYokIVJFGcZuB3BQUMQFtCUJTRqaK7kbyfkanauowE7VR5B3IBKT25ISNzGjFBq32Hagz4EYLu5k6Q7V1k+biNpPxlBu3t7hvY40TmxtE7eYUov4wxnnm2u4OTjz3rR3hzmT/XdfpH2ksInJSRH5bRF4SkRdF5H9Vfr4uIr8hIufKv2vl5yIi/zcROS8iz4vIc3/UZ8xPplRzc8kLbBSI48TVW5xZXiU6m+FoQxwV657KiCaCy/hCkhRVnGb6/6UcEaxhaM676ttMCxHPRhAk6hA4tXaMH37mR3h7ueFnv/IrXHn9PHkyocodgzKOosdD+ym4RnKxVuX96cTqF2LhS+o6kjiGVU3tAx9+9KN88ep5ppN9GzeRewqNM3EMJ2jRFeo8/MHoDtJEXnrhApf3Ye/ODqqJlCKIB28UmVhGdYe6pvb13GPNJhNuXL9NGxODxpG6lp3dPS5e2WdjZYkHjy9zdi3z8hd+l5e/+gJrywscO3qYkfc888mPsbC+Omcza8oEb/JQL15+05RvGsENHbnO+MWADB1uw8NasCJkq+Rd8zjmUXIZLyJQGVwsfR9NxF6nJrph/DuHC5RhrZgewDQhexHdT+h+pJ1lrrz4Ft/81z/H+BvfYu/2Fk8eP833P/Q4wVXMtaTLf4ZWOlYXFlDgzPoxHtw49iczFsze/3eq+jjwEeAfisjjHEz/egj4zfI9fPv0r3+ATf/6ng5XyIGuMGVBcKp85Po2zfo6pqxpyEvCBpN6FEcmzlpcMoORgo711XBKCNY3mKpmvNM5v0jEemEsNzde0WAw5KNnnuPsMx/nV26f53e+9hlmO7s0OdKgBDGqv4TKIGsHeE8IgbqqyNoZVbcwg4OzMXAiwubiGoOTD/D8i1+BaMVO6+NxoEI1HFpelpXXprf5yuW3uHPtFnf2InFrizTZZTqbEFNn7cfOE0KFONPNqgYVzUJTRsoZJ2owaLj/yBo+KcNBzfb2hJvb+7z65m0q77n/yCKPnlzkwhe/xG//+1/gxoVLvP/Dj1lrQzPEVzaKwwXPaGkBaYSvX7jA1t7YaP6t9bfIeiAcqiGC7GR0P1ndpFUYl93dG5m1H1RSpndbvjIISGM0Jm0cDDxu5OfTqNmPRuRMsEfkpZtX+cWvfp2f+ZXf5fkrFzn1+EmePXEEt7TIz33p93jxwjncfFMzpDIXjpgi1PSBe2dTF+ZJwR8+vhet4yuUYUSquisiL2MDin4C+HPlZf+SP4XpX6qUnSvMFTuO3LrDM/UCXQsumT6vL1qtgo0mUEz8OmNSPYoSu9Z0cTVb45ETg2sL8TGWxdkL3/X8tKyKFpaxc57jK4fZfPZHeP7SS/zMV36Fj594kmOnHiJV1jdD8YYJmY8L92LdeePZmLoZUFU2VsGV1eE08+jxh/mtC7/ImQtvcPjBR+fxZ8ZoPpO2Qyrh/N4tbm7tMMkB9rYJd64Tu9mcYZtTsj4MSShCO53S1B5yLPQPZdpGrt/c4ZvnrtDUNTHucvzoBqmN3LqzT9dGVpcrBqMRTz9ylJvbe7z4e1/g2U9+nNUjq+QMg6UVvHbkmIgx8sZrV7h/c5NdGXNsaQ1WFb8UTFtsarwra/837TGCWhglDueVSMZJGSzkBWmcyds2Bg+nnG1EnzcRC9pMruBmnHD+9au8du06XZpx8tAaHzh1jM2lkT3HOrO2t0ClW3RJjXEufRc+8/kuPdXlyOpR2pS4ubfNlZsX37UB7L9Tgl/G5b0P+CJ/wulfdw8zatY2QIbzUQBW6HU4VT721i0Wjh/n5k53oNlbWX+9Optj7kvbYswRX9V4TVan0FxmmkBwFWgqMwUFKcJtJlpRhCycs3gYM9qMebtBqPngqWe4deg0v/PKFzn11Uu877EP45eWiEXeNDh7HFp0hIOvGQ76cdvJRlf0PCTxbCyusHnyYT7z2pf5qyfvxw2b4nlAyXjxdLnjy3cusXPjFmnaItM9Ujsjxc54VGUkhvfBwJCU6DSytzdBvFBXNrMyidB2LRev3mFhWJG6jtWlmvc/epbdccu1W7vEDsbjWdEbV25fvckXPv0FnvrE+1haXGD/dsejTzzCdDzm9fNvszBsCHXg+niPs02pj7QZTaU7QHxBD8EVFpCoWqOdFxCTjVIHNA6XHUlNOhZVe8ZZmO23XL6+xSsXL3FpZ4vFuuLhw5v8xFOPspAKNQbQfVPgnMSOo/efoLq0zahuePy+B8qcJNsAfaH0S2FrLNRDVJWt/V3ed/zMvB/mT2QsIrII/Afgf62qO3dDbH+c6V93DzNaPnlG0TJ+OVuDECjHb+7wwWqAuMCk6Dp5703nWEwQ3ODA8oWIzRnUytQaU8R5kwNKXWdySYV/lXLGYTQK8YasuDJUtZdTRSwUEgQXPJsL6/zIs5/km5de4uef/xUe2TzJIw9+AB0EXKjImkjZ0XZTBk1ju2ZypBxN/qhvTHJG9Hvm9JP8wqVXePWVb/DY+78PJ44uWxEuhMDbk9tc39tlducWOpsioyXidNsmD5e60WA0RHyNkwrfzei61jaJzuSaRk3DcGhaZZNZpI3W+Hbj1h6bmws8tHCUjWOb/Pbvf4W3r4+ZTqYsrzS06om7O7zx5VeYTSdUtefw0VUuvDah62Y89Nxj3Lm0y6Xdbd68fpnXX7/I6TOnOSpLLDXW8NYLkedJxg+9iQAmkMrb1LAuWxFYQetyYxAmXcfrl2/y6luX2N3f59DKIo8fOcyfe+B+KvFIyuSp5YkxJvwg2OJPmTSJrC0f4sjuKtd3tvlXv/drLA8GfPzx51hfXielWPIgazvoYqTJyktvfYvje7vctaz/0PE9GYuIVMVQ/pWq/lz58Z94+tfccO7+rIIg1Zr55BvXWDh5H+NZZpoUxOSFEooXoECHkXIDcFShEA577pUIEqrSOmyTanPK5Gx/Q6iKCoo7IEXGvvPOkBcc5NSZt3OOJ44/zvGlQ3zutS9w6cu/wUee/D6a1VX6Ud2Dvj7TEyPJdF2LV51frKJsLqzy0Jmn+P1zL3DqxsOMNjdNWsg5fKh4e7JLFzukjagPaI4WIpbwyovHh4ZQj0jaElFQK5LaJN/IJLY8vLnBX//R57h8e5/f/crrXLu1jaL89ufP8+HH7+eRp87y4adOUb9yiVcuRGZTxeeO61evmPQRjtn+jNfOXeLaxSt005bzL73Bo6dOcivu8ZuXX2cWZ5x7+Zv81IMfIKfGBjo5h3qFTq2fp9cu8xYdRImkpEjt2dra5/zWTV658BbtdMYDxw/x0VMnWHcNVeWtbyZa2KwDkFqI00RqI94FQ+JE0DSlXmh49OT9pCsXTOHUe6apI8c4T/D73ntxjlEz4ONPfpDmxu/Nhej/WMYi5kL+K+BlVf0v7/rVL2JTv/4Jf3j61z8SkX+LjfX+I6d/lfB73ouAwhNv3+B9K6tIVdOOLY5MmgzGz5hulZPSHetLz4Kbj3/zue8JUVSLWr4zNcqoIGlGzAlps43jzkaPyVI0qShKKWqjXVUhiM1h9MFzZPUof/nZH+OFa+f4+T/4Fd536mnOHHuQqqkJvYieDUigchWdduRoI6hNJswU6Z84+gif27vDF176Ij/0iR8HxKrWCtSNbQLeo6lD2wmmgQZgfTzOe1zweBlSVYFWBGG/DHrypJi4eHOXO3sznn7kPg5vrnJ7Z0yaRYaLI9Jkys7tHU6eOcPK2iJ1eJXXr+3iU+apRzb5yguXmIynaNdy8/J1dnf3EYG2nTIMnvGspcFyj1v7e0zijJWlZfI44WssJhsINFZD66F6Ubh1Z4+Xr1zk1Ytvo2++ztP3n+IvfPT9rC4s4NUmekm0Go/WPUPDSKY5JnwdSjFaSNOMW7ROVw2ODzz0OO974BEE6zH6Luv6275eG61SB2u+e6fje/Es3w/8HeAFEflG+dn/iT/F6V+9Z3Fii2R1POHHr+4yePABsvPsxa68Rthrp3ipGHhrEdai9m4i3jYDxZfOKOsgLuIEBTDQHjb0HhejhWI5IjGSnfG+RLKprRekzaeuJPIUNkBEfEXlKp458TgbSxv8yiu/zbeuX+DPP/lxwsqKfX7xbqqZEKoinQR03VyaZ2NpmVOHTvHa9h/wyBuvcfKhh8kZkij7qSV1M2MDkMmxRQqA2SN7/TwX7wKaHfUAM5rJmDoq2UV2pjN++j9+iUPrKywujzi2vsiJw4ssDgInHjjDzq1djpy+D9GO554+y84XX+H67oybW2M21xqmkwm7rbK1tUVWGI5GIMKXXnyZo2uHePjMSfbGUyKRG3mG7G9xolm1DbDxpkUWhRyUa9N9Xjh3gXMXLzEU4fHjh/kRHTDa3uHI0bPk5WVkomhPTxoE0jhaLpNtI81dhJGHWgwMckr2pRUgUZBNNx+G1fe55DJR4O75lG4uHF6iiHc5vhc07Pd557Lmn8r0r/7Ns4DXxF9+9TLHjxxFs9B2iWlnIVNwRbCuSBqRrXZiQ1c9SYy1mmKvsG47nuTSqSfW1EXOZexEsPAgB1wwcYuUMx5TfK+D1XSy6HwWiC9QtGoZkuOEowub/I0nfozXbr7FL33903z87Ps5evxkqQnZZuCcx1cNYH0XGjuoarxznNg4wrR9hM+8+iX+5vHjNItLhMKG1enEFoEXNJvca19RD76hqgaEqjH4VhzeO5K3keOx64hpgnQztne2mU2nHJ6usRZqhscaFoYVg9GA5fUh+7d3GK1scljh7ANHOf/ZV7mz23Lz1i6xjUi2HnYfHMvLIzY3N6iCY+vmNuevXuXq1Wt0XcfFa9c4tXaYf/wjf41hXZNy4vLuDi+8/Tavv32RQZzx+JlT/NSH3sfy4pA0zuzMLtM0p4k3rhDcaRKdJfkAnRYipSAzCpHSfqYjQacmcNEr8TDpiJNZWYuFJl52Yykq+n2C3/Pzyrr9z6T5CzOYWjwffusaz7kBNDUqwt6kK8qPiubIyNlEMLMFIypqTvNCoWIco1RyEofa9Cff7yQBROdJclYTasiiJaSWeR937CJZKKPClVBEL1TKjBRxtoCdY3V5nQ8urbC0sMSnXv08T958mw888SGohoSqBxB6I1ei2sQscYHDCxtcG91k+8R9vPDi13j/Rz6B854utuaBNJNjxMV2rsMg2KBSE9AoVPjSTOYRXFWRc8dAPX44AlGGowWeffoBnn7sOMujBRYXHHUNa8fX6fZn5K5juLjGmROHOXvfDS5tzdjf3Wf/ztSoQs4EXxYXB7RtS9M0rB1ZZ3t3j/12Ruw6qm6GNsLVyS6vnb/K+bdeYzCJnL3vMD/53JOsLK1YY5tmZKakSUSbgDt2gnz5DkxboMhHUVC0Ir6XUzoY1OQESSWa8MYikCDkGmaTCcM8m7ca52gCh64gdKbQb5tsavdQzYwnWzDb/c+FSCk8sLXLj12fUZ84hnO2M+7tT0kZghhlf1iZEakY9d7nRMRGVfRuV9XMxDlwBUXTnp1sH2Xfl91LnMfZiC6UMorPBRteoRnpOqPZFL6TqFEzTA/MCp9BbXzco4cfYLEa8puvfpa3v/BL/PBTf47VjQ2gbHAKoR4QUWbdhEYGeF9zYv0otTj+4Jtf4pHbt1jePIIAqWvR1KK5ZT7TWgFxhGYIzpFIRt5U5rMTnSqLtckmxaTMusR+G7m2PeXqXuKNW7epJPPk2czixgrDjaN0O1uIEw4fOcyzD28RX77E/pE1dJa4cf0G9WhEMxgwagaowKXreziEZlSxtLrKbDzlxPF1NtZqPv/7v8dD64f5G88+y+LCkpFGa4dWrrR+C3hPlpbcgRxaQ5/fQ2YzZDQqijKKJtDKNACkNJJpg214085qZ51A5Ukpo6MBv/o7n2L3Gw1OoI1xbgAxmffp4eGclY39Lf5n29v8v//Dz/L4y+eZtu07rtB7wliCZurU8Vd2bzE8fJyF9QFVNWBva2ZC4P2Mc1Wms5a6rvDO267iwoEecTEMQecM3n6myVwoAQupQqG52IgIehMpJLtyYs7aY50oLiVSzGXCsOBVaWNGfIWTAosqOBKnVjb5y0/+IF+9+jK/8I1f5/vuf4aHzzxm/TZiRdW6HjFtx3RpioiyOVziytY1Tj3yFJ9//it88gf/It/avoYUZXwnQKih7Up4HUg5srd7GzwsDEZUdQ2uxnnP4QXPik98682LtF1kdzIjjcd87is7vHj+Ao+cPcn7HzvG5saihbUK9dISebzHYGGJRx4+hXOZw8sVv3z5Ol6E2EaagXJzZ8L+zhYuDBiMBminhFDhFjzNaMTp4yf45NmzVOqgttpTP4DIKSbcUdtg2Ha/o5soceSoRovo9gxZWUSymFi7N1V/Hdv8GVcVNK3x5Gk2YT7Emv4cDJZqZm9MuJlmVtAsxUex7NSoNLb1EWNmNG3JOTOZdmZM77ZO/wzW/n/nI4m1oJ678AazY0q9vc2h1QXG7ZSLe9tsTWaMwojaNUwSnFxap85WuEvOCHHe+6K6koCMOo9TU1e1QhTzuSuIFNE8h2RjDztv/RfeGSsAyfOxfH3I5h1QpJLaOKXNLUMRsgNXVB8leLIIRxZX+cipZ3h1sMo3b7/FG198g08++DT0tWSBQT1i1o6ZTHbRZsB9q4e5s7fN18NrPP/6N3n9zg3IHaqmDUZwxZAdEJnubVFVgcHCANUxIsHQMRwbq0t88JFV3r65zZ1rN+nIDOoFRvWQ586e5C9+4iEOL40Yti1MI7kem3hfMOrOaHmVRx4+w/LiiM9+7lXa5Zq91sY1TG5tMZuMWdscMFpc5PjxQ6jAdG+fdjohpmxeRIuumr9LAzlnI1ROC/0+lxmXdSAfXyKd2yGcOWzs4tK7IgqpLnfOFQc7tfArldvhKkeqhcYFRtmxN+kIZWBY7otxQhkroUz3O7ousuhn1mQ4aZnOunkO892Oe8JYFBuP8Mjhh7jOEfLtCrfnSLuO4aRld3vMnekVduM+e2nCOQcETwgNzWCBUb3A0nCJ1cESjRswCg0DZ2PCjW9FUaUvyXHGhChc0QPuJwlLWYw5U3lvw0qT9b9oUX7BCZITimNYj8jF82XA5zISHEeuPCvVAk8cf5DhoGbSzvilr/8GT3XTwlezkLBuhkwmHdPpPgujRS6mMR96+iP87Au/y6S1ZqZc5Jbop2uJ4v2AxeVV23VTx3Q6JSUYLTT86PtO8+Qjx/HjPf72T3ycf/NrX2Znd0IjwpGlIWdXhyxmoQGCr9A2ksYTQuPvyoczzdIi9z+6zg9+7Bo//dO/xmLTcLvLzDpT9N+5tc1k2jKbTlheWaLyjhg7bl24SdqwHDCXyr2viuZaNBRTgrfuhVJ7iQ44PCC/MUEWg6nCEEhtwkXBF8NSMRZHUtOh9uJQb8/VRcWPKjaGI6q8bd4MxeNNE8EJTkwYRQWGowE+dfQBupO+1/W7H/eEsQAkB2v3rbBZrzKrBgwGwqUrQ/JgkZUT9+EyJATJGZ1OmU0mTGf7THa3GW/vsX3tOtfilLG2RAehaWhGCywMl1kfbbA+WGKxXqAJNcNQWaHMQXKWg6iTIh5R3D5lJAGFIaCWVDsxVMWHymBLdD6r0OguGWtcsi7PRdfw2OZpzt28wINPfZT0+u/ztee/xGPPfQw/GBrpZrhIO9sjdS0nlo+wEyeMjp1E33wVfGVJbcK0mrPHibK4tIyvB3Rty3Q8JXZT6joTqhlHRxWPPXoMNHJ8vyXED5CmLVXXsrE8YDSsaepAJQHnGlw9hGTTg6VKpl0MtLMW9Y5jK0NqPyTMHCuLQ6bt1HKOyjHZG3Ojjexu7xm6VlesrA9IsWgfODEmcbIamARBB85mt+DwA8FVBpWnocPlZCPCFwRxAZfUOiw9aGvC5FpIq9QGlphCZjCofuA4srFGfX2LaZFsdZILfCxMpx05KoNBZWPzyvoLIfxngoYJdBnu1I61LDS1wzeBKhwkW/tdy83ZhPtW12kGKwxWllmhdOkBPhf6fdvRzsa0k3329rbZvnObnavnuNJNGOeOtlLq4YjRaIWNxVU2RuusDddYGi7SiDfCH8Uw1EQnTLBVUGfGM+26AjIYUtZTZPqCKGJEQF9g6lqER9bu5+0rL+FDYHsAv/jZT/Hxp76fw4eP4z0MFpdpJ2PWBkMuXr3OtfEWxIm1TYfahPRK45lzFlbOJvtMx3t07RRQUsZ42EnR2ZSwssDyYJGPfv8S41s3uH3+Ovt7kb04xYcROnI29CnUZDoLYdU2CakCwVfcvHSRN8/d4Ic/8Ahv3ZnytXNvQIqmqCNAhnY2YTQcMNnbZ7C+zImNTcKwguTIbUceeCpfJglUFZKzeeBgskg5QnZKFwR/dEC6so9/ehWNttBzNoPxI2+ySK7w/jQTgsdV9veucfjFwOHj6yxcfoN9UVPFLCPbp5NITspg0ICzyQcSC/XFWevzux33hrFgnYyXnXK4Dtbf4K2wlEvCvpNaJqklJ+ulcF5KezAlawafPRo8zWjAaGOTVYRTQELpug7aGeOtO0xu32Z7+zbbVy5wLr3ABCUPBoSFZVZW19hYXOfI8gar1TINQnABqbxNNU7KZDphYTgiBKvIqADZ2ohdqBAUdSadlEiFkCk8vHrM/j16muMnh/zKNz7Dh04+xuMPP4E6Tz0YoXHGyZVDyI1XcMHhfKBaXKFrK6o4Je7u4kONd8L+zjZdbAnB4/2Q0dIa3nnaaabd2SMsDHGNJ7Yzbrx5g8sXbnFnJ9Is1KRoPf9ulEh6h2rBtMhEPT5USNWUKc0dVa3sTluu3NyxhrTOiJO+dHm60rNTB8/m0jL1cADBw8BTjQKJxHjaMRotEEIhT2bTSvNlLF8SyAHiIrhrW7iwjqQOFpyJVhTiox86dGL6br7yNu4ilOBJQdvM+uoym9lxzRk1KGummxnYMxz1/cUGSbuCsIXqnWku/XHPGAsiXI8KIeObMB+M02Wj7Ne+4uTKyKrgzuSJNMU53dqHnq1siaCWHhJKf4yvK6pBQ7O8zOap05zIySYMtzPGu3tMtrYYb++wdeUWO+113mbGrku0VcuJQ/dzdP0+NhfXWQw1sxxZdN7AN8dcxKJH5fox1uajPNkXXbPUEcRZ+0DV8OMf/fP83re+yOWv3uLjT3wf1UJFdp5lX/Ghwyf5TJywk3ch2fz2NN0FTDKKokS/uLxmPT1uROVrQrA5LOobZGETpju44PCba/zif/0ZcgrUTcPDpw/zVOo47jPLR5bIE0uAw2hoRU0coaqQ4GmGFYdOrjO4s093Zzb3nn0ME9QRU4d3yuUr1/kPsy/Svh+eO34arYz86jOM98csHlqwYUydknc76kEDlXXGzjpBKyF2HUNnIueaQTqgSLHnlMhVET8vYbOqorNkvTMjqBcqHl7b5NzOdVLOzJKNXRw0Nb0Mvysomytiya6/mD8pkfI/xSHAlQw+WV8GUjSnnScjjJqB0eyLTrF6R8FXTPit7eaDOSvv5zWXrEWgzQmx0Oh7ZCSHQFUHFhdGLB49Yjyj1KFdYro/YTreZz/ts33jKtuvnOdiHnO7icyC8tiJJzm+fIRDC2sMQ23qJUXsz2Gf57PlMyq5KFoaneL4whqXU+TG3k3+wuMf4/nL5/i5L/0SP/zEx9hcXyMhPLhyiHOjffa+/CrGSDMBpowJhjtfsbJ2yLhvMeBdhReo8Hz5lcs8fHRISlOu3tpH9qas37/B8pGjfOvtSwz2p+yfG7M3GfO+WcvJ2YSVowvURbDO+xHe1WS1AT8euHNzh7cvXyNp0TtOkdznBMFC0llrDXjHHl3nV5//Bg9tHGJtsIw4R6iFvVv7LB9dMsnZabLi6SjgilIOWci1p+4yzpuGW87J1PjbzgT3csbFjIgnjSOyFGz+jLf6jXaZtNtx3+oGC7evclMzMUaauirESSnrzZA21xeuiorMux33hLH0p3jHBboUqYvsTZuFXIaKOlcGihYll14myZERl6zBS21EQ5dMQjXmbI1imgnBKvdmQKBl8KqUCj5K6WmpcAPPqKkZri2zIUI+/TAkZXrnNpeuXuDK7Uvw2gW+Wb3N/qJnabDA+sIqR5c2OLKwytJghC/0mj6cFBEqVxltRzyHlpaopjucu/0WTx9/gPtWj/Hpl7/Is0fO8sjJBxhUS5y+7zivfP6bJIXGwfGjJ3jtjV1bQCJ4aWzUTBVs4LjLNBXc3tnia994k9tffIlzW7v8yNNnOfn0I/ztv/uD/POf/l3euPw2k26Pb11NTNKED8kpzo6EtUFlTAMtUwN8xcXLW/z8b3yN2+PETjdDnJjMUH99vQAEIN6TnePF195gZWWJ1y5f4el6gbqR+cRnmQja6zw3DplG6oXAbM+4cpVzOJdJEWRY47oZSUxrLXcJJomwVM9VcPJUcUsVOo2WWyaDijePbXLsNc+VFKm8L7px9vx9CPa3/ezQwhlM39Gn/53HPWEsPbK97x3TWUvjHBmhayM5pwL5Odudy8Te4D0eoVMTihAyTpXK29wQ70qS7QTUlR6PZExlNTdMVnBKUimDdQvruOeAYdXejIV0o801HtpY4yH/DLSJ9s4ut+7c5Opkh+3dfV6+cpFvpilutMChtcOcXD/O4cU1RlWD1NbzYnuaJ+DYGCzT1DWvXH+TB9eO85ef/Tiff/NFbr76VZ555CkaKk48fopbV7fIF9/m0tZV0FzeQRj4mq60BUAiz3bRPGVnvM+lK8IH33+WM9OWs2cPo0y47/4l/jf/27/EL/7Hr/H7X/wa18c7TK/NWF8ecv+Dm3RdQnI2754jzltL8ds7Y3amNtKirgIh2IhtEUz/qyzG1ZUVThxf5uKF26z4Id/cusn6/mHuY5mr129x/Og6ubU8wg9r0zf2QrU0pNuNpJyI0VHVDsYZWa9hmKAIHHpxxGA6AESj6aNYTqMQu4hXj4wctfc8vHKIF7cukUQL3cjyXCdSOi9MXcZAjXgXqfK7H/eEsfSepfOBSY4spoQmx7RLxJTKYnBUofC4clE/FNMGM7lQW+TS11SyUUDoaydF48KXnELUXInkXpSvTFtJFK5Rtt85jw8BNCFA7qx323vPYHONk0fXuU+ENkZuz3a4evUS+5eukl95k5c4z5eGwsLGKsc2T3KmangERbzHS40qrPqKR1Y95+5c5OTKYT7+4FO8cfs6v/PSFxisPMTjT5zl3M7zXM1WyUeYdxzWISA5kbKhXzm27A+GsLDKq0l54Xe+wWip4Sc2B4yOLrJcedaW1vg7P/Vxzp5Z51/97KcZT/d4+eJVPrB7msWNhio11tKtGaeZ5bVlgg+kPMM5x2zWFSkoq0s1wyHtbEJQUFdx89aU1Y11Dq0NWFis+My5F3lu9Tinjx+lHjSlwxXyNOJrI0jWywP2r24TGk+amXBi3hojhxqLXF2Z7pUzrrbuWPGmsO8bE/iLZQBTBpvi1iUeOnmSzTtXuOF6siT0obtI0ZQTY633g4vf7bgnjKU/OhHGucMBky4y7qxHtfZ+PqItq3kN7Qt7874VR1U2Gi0zKr2YcalgYYujoCDexjaIm+uNZREkR0QqNBtM6ylMVs02jZdMkown2JgINUE3JxY+HBmuc+SBTbozj3B56wY7t7epd6f4rW1uXnqFr89u8dxkj98592XWjj/I/StHWQwVdRjy+MaDvLb9Nm1qOb1kQMK5kFgaDgjjxOryBtcn+4Xyn5jNJgw3anQ2Q/KMtp2imkk+UC2MmHRWI1leXmbn1Tu88OXLjPd2LewfVJyfTdm9dYdOMttScWdrzAlZR1MmdRGfEklbjh7ZoBnUsDs2Tx3CnLmbYmchblWxtLqEhMBobQVix7AesTBqEHVsHl5jddEoLBT5L8GRW9NOGKyOcFVNita9mrwnzlqqUJEHlqMokGdWTGYcrZfeCzozZE6C5YpZsYYzhNWlZR4fLvO73Y7lfKFMH+hzkzlIUTzNAWbxXY97w1ikx7od0zLKbDrpiEXVETI+WH+JqnVKqkIljqwWpvW9Kz0DtwredMF6bpDexf1yAH11t4gnap63E6vYbmUbuJpusRp7GQQXBMTEKkStsu+9t5udEs5VPLh5H2n9BHdm29zY3+Y+V3Fkd4/hL36RzetTXn/zSzzvW6r1FY4fO8n960c4s3qSi7vX2ZtcZ3V5GbcyIO7cZjLZYxA8I9+QYktHYne8R+VhKND2E8DEEFvxDpmA7sy4ePUtbscJDxxf48wTp3joI8dolpd58T9+nZ1Ll6momIowkwxBaLuOhUUTAREPo4Wa4aCysRgwV5PpKe2aMzFnUlaGjfD2GxdYHA0YHb+Po+tr3An/f+r+LNiyLL3vw35rrT2d+dw55zmz5u7qqu6q6m70CIAAMZEASZAUQxJpWrL9YkcoFFLoxdaDHQ49OGSGpAjLD2RIClMUSZkiCBpAA2g0eq7u6prnynm4N+88nHHvvQY/fOucTJDo6pYJORI7KiMr8+a995y791rr+/7ff6jokotbaBa5Wgopo5C+wXhF1mrgyhpPjQvg6lpQqyTBFylKIS4tWkEnqlBjQ6+Vjm46WswtJhbdTtFK8alzF3jl/deZ5hJ4pRTRJ0EW26yXSY0mTT4ePn40Fku8gtI4rak9bO+VlMqTmBTnRfwkrFpNUIZAwIZaepNIMdFBhlVKa/lhRDFZxKfipD0afD9EbJDYPDH5M9rPLZR8CNgAaQQVMAqNEQ1NTB8mRFGZD3OV5cxVzBhYbfRYafYY2ylDNyJkhuOffY5zzT52f8D+xgb3r2/w6tvvMcoNvWPLYDT9sMLGtZpOt8Xq8hq3r75PI2swKUc4JfT9w6Mdlps9dBnwWcDaQGYSGIyZbmygsoTmUovzF87yG3/l8zR7CW+8dYM3v/cWr7xzgxOtLs88/ThvXrvJ/qgU34JawAM5oWt0okjTLMpwvTg7+hhxjiNNUpRJsJVlcDjCKIO3ltF0yqSqGG0PyJdzGeqmSiBjQOUGVSPpzzpQLOQc3Z2AD9RTTzWsBDVzDkWASnIscV42MiWUfKMMLnhxt7SgHajCREl4YPXMKo/faPE608gL1DLAFF6uMLSVIs9NDH798dcjslhmL1EgyMHumINhifKiSkyjOYVUl9G7ODa5wVn5YWLmlPsQxCzB+1hFEch1dDaMX8MoE0+KyBODuU+yGGKLCtMYJ7n3XpxTskRJUrHUdsKWj7ul9xKz573MfdBh7t3V0Cn91iKJEq3NjckeRTtl9dknOGmewU8qBttb7G7c58bGOjfv3sOe6LDTiv2TMYzHI2x0e1dac2dzg8WzLRpZRu1SlC+hLpls7tM7ucTiyTWa/RbKTfjDb7/ON19+j7vru3TznE9kS/w7X/gMq7/xOK+8usLOvR2SIsf5aUxeFvrH5OiIo+idbEyKSQ11WdNsNWi3CxKTkjRyBvuHmNRw4sQSh7tDDJrX37vFU50VEkCnCp0nOFuJJHzqIlqoCbmhsdbi6M6e3B8P1f4YNy7RzVyMFE0ticg2OvYYRai80PtzE6XgkhHjK4GblQ0kKuGFU+d45/Z74gYUmemzpy7PskhfSuChDfRPux6RxSIvUSuoqookDWRZgDIwtSVWGwoSZnu2FrRPNgoZQaK8F8cWZebeWwI5x9IqNsczqr8HMYZQcnqAEvtVJelVISixBYqad6cgMyl6lruGGGajwDmhYCTR0CIoLy4iNpZ/guvKrg0s5A06zQUqHLujI8Z2ykLWZPHUCr3jSxybXGJ0NGB3sMfLg1vc3rpDOZ5S1iOstbLze8+0rri+c58nTp+laRzDumRwtMOgHtNpLzMcH3Dt1nW2tveo6hqF5kyzw986dp6nzp5j8ZeeQzU9Lzx/nreVJWs2cUqsZ0XupnjjjavsHYqtqfcOX7ropFKTZW2uPHaanf1DOs01Dg6GDI+GnD27SjkYc3g0JuuuMto+IncNgrKk04Dp5OKzpgEb0E42g6xZUO5PCCpgpw5/WKHzLHohazlBQE61QEw7hjCpZKGEBD+yaAuhdpAHgnGcPnuS0+s3ueGnzHbFoDWNRkZSTgCwzs+//o+7HpHFIpcCssxQtDOWWrA/cEJpCGBMLqIdLW6UqZYdvXaz7HbJUyHEbBSFzFDiQvSKqCIEKcxAKelY5s6IzJisooKULyKLLkli5LaJDv8+RI9jHX2ymMPVSaIIJFKKIehPYKa4VJgkBxISY2h3lgk+sFeOuHW4j1aBXpKh0pTVhWUeTxx75VhIjTI+etAvhMD2wR7L/R7H+n1K79jc3calhg8/uEHtLCEoemnBZ0+f4NTU8Pi+5uQ0p/XiabKTXWy1j9Hw9LPnMWmGJsR5luPocMDv//EbYpYX+WjE/q2qLPu7h9y/vU6RpQwGJfV0ylQX3N85pJnktNoF72xvcvX+Hr52JLWnXSnQQRjjzYKk16C51CZPEsLRlFBaEhT5WGFeu0f/Up8kIpchMaip9Ip+GO2gDCivMJ0MbTQ+Q1xwptKDKu8xzZzPnr3Ijetvi3FJYkhzI+hqOW9rZaP7mOfzkVgsav67p61TdJrRbiuKNGVcy1BJKBYS+ayDF6FOdAuJcP1D2F/EJ5GHWBKmo9cu8qBpI7EQXgmtxuNRQZGmwhgQNoko+jxSzungRcFnEom8iIQ+fBCj8SB/53z00zXSyzDD+OPwLvEKay115SirKZNqynAy5mgyZHd6xHq5y1p7kb1qyI4bEpRnWo/xWmrzhwfNgcDVu3fotNv0+otU3uOSIT3TZrx7CK7mXGjBumPLlySNhN7nllk8k1CN9jAmkDYbpIiuXSZOgWAr3nrtAw4OJqSpoartg/ulFM45RuMJH15dl4GrkT6hrkrKaY3rt1ld6vFrTz7LYtrGB08axKDda2EZ19YxHZeUKVS2ZryQsz0eMR1PKSvL1btDsi2NHokTpkcRrIU6YGqPcgGjA1oFjAoUeU5zYQmz2KLZLsinmuIwIRsVnCbjmG6wm3iyXIkcPThmc0jvAx+7UnhEFgtEBCsEmt6QGE1RKDqZYVh5vAskWrLUg4egNUZHM26lcF5CN+W0UPM3HTuKuaMLMVkMJKICLcbTmZrJicVBRCFwZBJ3Gh11L87nkX8WRU1KieN9IpY8OkRlZlCEuqasSyajAUfDQ4aTEfZoixOTEf/sO7/DXp6RphlJntFtd+g0myz0mqwu93mWcwRnuV8fcWO8w51kn7RocTA5kE0jMDfZ00pT1jVvfvghzz/xBKtLS1QuQNGg3e5R3h+wZ0u2dEm702C43KfhStTNDU6ctTQaGbpRoLMkit4U3lrGozGudmhtyDLDYBj+FAM6RQia4B2VF0p+qgQCHg8rWAyUkzF10cKOa5I0o8jSqCoNqCQlpIX0F4kANPtlg8lojFKa5cdWyJcamFQTRhU+kel8WVrskaUmUHpLVdVMJjXOw2RaMpyU7IzH2MriKocLjnprF30uJ1tpQohebg7sQxvsxwm/4FFZLEoe8qSuSIMh+EBapPQaCZvjUhSASpAq7yB4gX9ngiylQ3SnlEQpHTUq2iQRFJg1/iGeFjKhTIxITYMTWswsJkIcWXXsiSJy4hXK2Zihnoqbiq+pplPKyZDxaMDhYJ+d4R5H5ZgJNTpNSfKcdqdDv9PjePck+dsFX33pS5jOIolCyJzBRqvZQLA1JjXyvoYJG+MDJqMReaNBepRSezF0ANFguAivT8opb12/xjMXL7O2vMT+1JK2GrR6beq6oirHBFszChX39sf07w/o95sEZ+k0GnMTQldNmQxG6DTn8mNnyb/5LkWRzjU7D1sJyXxLtmajoJlnUDlCKja5y5023tXsHW1TltDqr0rctgbj5HRSiZHQIgSQSYuE6cTgvSOMLT616LUM5QNmYAmJJneGohAUzLuEYHKCiqYlfUWYiHl4iIYVJIEPOw0+XBiQZgqvHBJMLZsekRiq/jxM8GdX1zuKtJiRSSmKHKOmopgOkvthIpdHBfkl3l5GpvpRv2CQN26dsH+NVjPUEKU12svvLu7MsxAdCNgQyFTkRzlHCIF6WlNOpuwOtrl5sEHuDQfDI/bKAVXiKRoNOs0evU6PxeOXON/s0M0LGjoVTpOK1krDA7TSOOsZTUusLaldLahcIn7FPkA1tewe7LMzPWDXHbGysIYPnkajjRsf4oKN9zd2YLGHGQyHfHTvDlfOnKXfTBh5hzcarQyElKlzaJ2gdEqeFJQTS5IYqvFItO0a7HhKXVX0eouYHNqNglajIDUDqhDmA0mQh0082xRpXtDp9VhoNOm0W7z90XWSqeZk9wSmlQgwYCVq0MQo9lB78TBMY4BREsiKjKAmKAPTUU2SpPhpgkoUarsW04p+TiiD3DcnCcYhMUKorJx8vSBGFiEPuDzwQTjC54mUgc5H7mR0BArygHn356jBX6oq0nyJUNto5q1iViGkRgt9JSJRKsQ8j5hlooOXBKqZp7An6l3iZNfMmnAzh3hRD8DC4IQXVZexdDrcY32yyeBwyKCaUCUB3Sqok5Qnj1/gdHaas1qTmXS+OJUOYgQOHE4njLTQ2VUIKO9ojI9w3jGcDrCpweiEImuReEOSQ3Bwa+M2b69f48rycX7++CXG0wmH04rfq7ZZXlgjSwsOBrt4Z6NX2gPX9xACWzvb5EnKuVOnaBhDGd3s0Qlp3qLdbTGuFTfuHeGnFcePt9HakRQiKXbW0V5ZQumURqNgaaHN/b1DsiwRn2RmO/LM4E9hDPQ7LTr9NktFF7TBO8+wGuJTSPNEAJJS7FPD1Es0eZ7IqWLEREJ7DUaRZMKvtlOHb3ncyGHaBior/WGhoZHKYFMrcR43oFJDmGoxJMzER0Fpzc31Te4saUajiu3NfZLEyBDbQa6mOOfZ2T7k6GD46GvwAVCK1UmJLoSGrZBmMdVC1RdzvFkJNRsQBpmex9MGZlMUWQjyECu8kiGWVhCsk0bdOZytGR4eMTg6YO9wj+3yiINqCFlCs9Oi32xx7vgnWGj36OQFiVIMXUUnKzBaZAAO5rkeRiuc8yQgXmXBi9kEspvlzpFqw6mFVUJ7Gclv1wTruHr/Oq/e+YiVbo+/+vwXSQJMRkccBcv7R/d5oneKVpLy3ewaNniODnfIsoKUguH4cE4vD85zd2ODJMs4ubaGShLGPqC8MLQHR2MmOMZGUU6bJIklzR3aiEhq7dwKSZqR5E2SrGB5pUfj3g6NRs5gNJ0vFJAStdkoWDq+RlNr1m9vcOX5E1jv6PW6FGlO5QJFxFtUO5XNI5M5ixiKaKH4x9PfKMmPGVc1ToMzoCsvdkfHWoTbR6hxhS8SqTCyAGkCzskJ4T1uLMCLNorKeN6Y7jHspzg87U6TLE9FPGg0rVKMCfuLXRr384+l6f80XscF8E0QfwPgn4YQ/k9KqfPAPwKWgB8B/3YIoVJK5cB/CzwP7AJ/PYRw8yd+H+85PRVWsYrDI43UwvXMDTF9YMtKIMblyRJRQaBcYSfLYtMIcqQCuMmUweE+R/u77O/vsjU5YKgcvkhodfosH1vmYvcCC40urbRBqhTa1YREgnRMPDWUjc0/SnZsI/2PCh5Xy+KovZRvouBTaJ2RpobUVkLhMClOCVXj3p1bvHz7Xdr9Pl+8/Cwn+gtUtuTq1iYj47h9/zYLpuBUt88bhxtM3ISVhVVwjrxoUtUlanIYh5/E6Xvg1p3bGODk8eMkecIEmFYVrhQPsiqB/f0pBx3QvmZaTVlYabJ0ois/T5WASlhZWmChXbDXytnZkwM5RIccrQ2T8QTta3pLC4xHXbqtFoejIcu9Po9fOM/YjekmDWERp7IxiK+TuOKoRPwXTKIJYwupornUxB2MqWtL6WqSNJUCo5ehuwa/OZaTfK0FTlGXlqSIJhcqwY0dOk8ghTv37nOr67G1JTMJadeIQM5LyatnC18rkXF8zPXTnCwl8NUQwjC66X9bKfU7wH8A/OchhH+klPp/AH8XSfn6u8B+COGSUupvAP8Z8Nd/0jdJveNktCXC+jm1Au9jEpgnhkzENymxcFmQ00PoKfLp2jrq8ZCDvX12D7fYHR5w4EZMUk3earO8uszZ/kWW2326WQOTSMOutJoLiUKM1hNWgAeVorQmT8XJxSst38x5cfG3TqxgCXKKxUQwgEQbUWymomdhWrO+cZ2Xr78F/TZfeO4FjjV67I4HvHrvBlmecaq/zM2N23SKBs8dP8fVwx26C20adYvUpBzmLbSCwXDvoUaV+PORnJSrt2+DyTi52GMl0YRCUxlFahIWmzmLLQOu4vbdIyZlxbjqsbjao9FeJm0EvK9ZWOzTbeV0mi2KPKOqRUylERrMsBpw784mvrQ8duUcxxZ7HA3GfP70eS70VoScmitUJkEqaiyvTXdEqo0HVTtUiizQWhZi52Sb8qiiHJaU+xX5agFTD70m6toA7gwxqx2x7LUqltYxsqRpwAXqyvLOwS5HS5GaFEV/deXiTEU24RAC1rlok/RvsFiid/Fw9kzHXwH4KvBvxb//b4D/NC6WvxT/H+CfAv+lUkqFn4DLpSh6JmfqAr6sUFkanVIEoZDsFGEBqyAnjAkB4wPldMxocMDe4Tab+1sMygFj5UlaLdqdBZaOnee59hILzTZZkgnqoaIRRRASIEEYY8HM+n0lTWNQEWIO4B2J1ljrcNqinCUm3hNCIDGpROXFODxpIIXWEbzMB+q64ve/+/vcb7d58alPcH75GDuTCa9t3yYPnosLy7STBrfW73F9sMkvPv4sN/a2OLW4Sji4T6vZxPmA847xaCCZLi78CbrsjPRICByMx5gspaUCy7lhpa2x3rE7POTekSc4R2pks6lRXB45qnJKWg5IVJdmu0WWJORpSqPIcHaCi2zd6XSKjilnG/e3ofYcbQ3YPzxk9fxlQjllYWVRNggdXUTbKbiAn9SSEhaCABVOzyUQRstsq7ncJG/lHG0dMvpoSLHcoLFQUNmKYqlgluImXKM4iPYQjEZpz/7RkNtpjfUSpxikqxeOYRD9aRUdfKrKUdX+Y2ctP20+i0FKrUvAfwVcAw6CFN3wIN0LHkr+CiFYpdQhUqrt/Ctfc5781VhchpBjfZyHzPMf43TVS6njg8dZy3A6YH9ywPZwh4PpEaUKNLIGC60eSxcuc7nRoZM0SdM0nkrCKFVKnCqDivLcCAKEiK55JVwzE2MvZulazllpmrzoH5T3eBt7JW0wiRANtTHMeM/aB6wOJEgW4mh3l2tvfI8TdcXZx5/gs8fPsz0e8vbufRaygse6CzTSHKUz1jfu8f31D/jVT73I7cEeC60OJ1o9vvfeGywudxiXY5yt6fWX2NlZRxvJYLT2gUmcwLwpWd7AoRjWNYPRCFdPsOUUG2qy1JCnCY0sIU8z9Khid3fEyukJRTUhSRv0em2SPCUvDO1mg8FgAnF+5WOiWbtoUtYlu4dH7B8e0skKru5s8omdbZa6PZhIjqfKxIIXJTEQHodKEFbGRBKhVaZwGlSq8Dh0Il5oIwL1oMIOKvTJBbKTHbQNKDfzwQaVCdfMIzKKu7f32ckiwBHhYe1BqUBtpXJxThjsNkLN/0aZkvGhd8CzSqk+8M+Ax3+az/sJX3Oe/LVw7mIIlFSVRY0rdDPHOE+qLcEdcHt7l83JHgf1CJShVbRZbC1y+tglni5aFEmGSUR9aOYaa7FknS0+h0DKNvY4M0mZEh4/3sxMC+TGEmYCMTuf0Wip1cTJP2a9iI+YiXD3zHsMfBBV3nRvj7fefo0PR1s8dfIEWZajtOHGYI8TrT6nsx62jr1M0uBod5dvXHudX3z+c2yNDmknOSfaCwxGY5yCi67DTSP8qOl4iMfTbrVpNjvc37wbf7rSqzXb3Uj/UQSTYEOJVomEqnpQCDpllNBwahu4dnOHpeU2eaNB1lykt9DHWeh1mvTaTda3DhBeXPQP9pa6HHOs3+fx8+dYKpr8/JWnWFrokapMCKUhoFw0htABlafYaSWAC0KTNw1p2EMlP/tggNoSlCLt5vQXZHgpQ2Ig0eDlJBIqjgITCFaoSbW13Bkd4dqxXA6xD9YKh8dZKcFslGJUkwpb1f/mJ8tDD/iBUuqPgM8CfaVUEk+Xh9O9Zslfd5VSCdBDGv2PuUQJN1GWYTrgSO0zGJXs14HNMMTmCcda5zilU5aLFo0kFY/hONWfkXtm3LHgpXQiuqAIXVtFdEqjEwlmBUdw0abVz+6Cm2tfnHpQ3YQQCCqRxZLIyTNz1HdKBpnEoV3wnno84uq7b/H6xkcsnz3L5x7/PGa8j1KK8901VLtHbSdUVUmWN0kwlOMxv/vWd/nsJ55lXJcYrTjV7qG04ur6bZb6C1w8cQa7c4/alpio6Oy0F+i0OmxtrUdHTNH9NBpNkiyhCo7cJJisga/ELP3YQpOnn1zDY3jv3XtU0e3m7s4R53bGrJ6uca4mzRuYNGdajel1miwuL7G/uydUodjPOWvZO9zn/VuO48vL/PbbJWc7PT7/xCfodsQUXBUSxeor0dGrTOGdQlcP2Agh9os4L35sRjzDBHIEGkpo/VrFZDeZl2FDBHwAI73ryJXcLEdMIuhhrcXPokgiySP4QLMc4b1nPJxQlfXHrZWfCg1bAeq4UBrAzyNN+x8BfxVBxP5d/mTy178LfC9+/Os/qV8J1RRbl7x5sIVvtGnmTRabq5zpNWgsJmzdH7N9WFJ5R2Fiz6GNwJ0wN8mGQB0USWQWS2cT4v9rqqBIVeRq+Ui5CIEQ6ocGfKLVCIHII5OzIjFC4Zb/ZFajtMbHk2ZGo6+mJRvXPuSV62/iVhZ47NnnaKUF7WaDE3lKqg2J9kzcFIUiz1oYk2AnU37/R9/m4pWLKK2ZlBMuLqwg0XyGW4dbnD5xmqXOAn7nHt5bnrj4NB/e/ACjEtIkJUkSqmizlGQFadEUgZsWow+TGAw55493+OUvX+b842fIOou8/sqHfP2PXmMwLkmbLSbTmtHRmO5KRd7QfPGLn+TDt95ncDhiZ1RC3sTUNZeOHWNj8z5b21t0iwafvHCRjf1DJsHz3OWnonu+nvsVh9qLUrEOMAxoHSCNkLeVMonaolMjzbyCJDf4JIARvYpJhW+nMiOU/bhofOkIqYJpQCWK7a199vMY86EVaZLHDW82HxJ6S9NM0VrR6rbIG/nHroWf5mQ5Dvw3sW/RwD8OIfy2Uupd4B8ppf7PwGtIlB7x9/9OKXUV2AP+xk/6BioryFuar3zycVy+RrrYFuDCi0jLeM1wYtF1LH20npsQoIVYOaNx46VEkJi72XRWGsYsHhPBO2HXKtldCBCMBBkFxFrJoKKlp5ZZSWzYRVSm53MBHRej94G9O7f5wbsvs5E5Lly+wPmF4xxrL9ErmrJgBzsEAtaWmLyJyRpoFK70fPf179NYW2C1tcikHHCusyjvTRkGh0PGwdNJcwpjMHlOlqUsdVusLR1neWGBu5ubFHmHoglJWuCVRuctCJUcsLGEzIucM6cXaTTFvlSnmud/5hnOnVvgnddvcf32DpOp53B/zOJkTNKyPPbUJU6dPMb9m1cpuobfemWfTqdFV8PP/OxLDCcl7179iP3DXc7nXdbyFrlJ0WWQyPE8bkQGjFXSxKcGEoG53dTJcFJr4W1Zj2oKeuZFbwFxnuan8vMOVgwPdarwmZJSGC/IW9OwM57i8hzlZ95yCCsd5s+FimWgvDhmbc3/74slhPAmEuf9r/79deCFP+Xvp8Bf+0lf91+9ygAfNA2P1w5Ki0qT+S7QaiYs5BlbNhq84dFBxd1fhlEmIMO/MDOsiAYWkcPk8XNRl4jHmKMj3jnx5k0SqX91KlR4iH64sZdRSugiKLSykZukqA52eef9N/hwusXyqdP8/PHznOqs0MgKghW0TROobQUhiL9w0pSa29a8/vZr7OTw4skzDKcTzrQW5XUaqcuv371Nr93h2MKywJx40NBrt1jqdTkcHFL6mnZ/Ee+DGN6lhQw942u0Xh7GoAxHE0sZxEIJErTJ6C4tsLK8zf3tlIPBhHv3jlg9OaC9Io1vc3GJs01D2jC8/sErvPSlx7BTzTs/eJWVVsKXX7hCUjRIdGAxbVOnA/zZJibJwCrCyMM4oKZSNjs82kk/aYyGVDhnIYbf+olEsOsikY1Tam6UcoSghToDQnydit+xzhRewkSh0SfXQ6rhkSCZCqKAfF5FoBRJIshhYnSUGP/469GY4CtwSvHPTYt24jmxP0K12wJS60CaapYWcnaGJVXwaKRHcFrQDYLEYbsII848uzwRFUKGTx4wSEJWnP9jo61HmuaQ5CS4iPVIKadiPqHSMqGfmYUnWjEaDPnhjTfYOtzlzIkz/Nrx51gqOsjPP4KVJkBw1FWJdkKCNCbBRTj5+tUPeH+6xeeefpbxdMTpoo91NVkiCkFrFa8d3OKxk2fpN9tY75g6x3Q64buvv07pHB5NkqegDCaRYaHPCiaDMY1Gig81aZbOT8CDQc1wIB4H3gmilbW6HD97jPfeu8/O/pRy4jlz5oDls1NU0UYDSdGj1+/x4qUuvUzTWF7j9MovU47W6XQK1tb6tHsZSngoAqXrBKVSQQxdIFQWvVXidyoYQ6gi/hTEA0AlCcE6klym/cKsjqdQPAmUC+L7pkLsQ5CQ2uBE9j21nD99juXdIYORmFUYY/7E8wax1Y1/FoT04+csH0+z/P/zdWRS/nFWUBmHG01Eu+A9lDXNdkq7SMDLA+5gHr5KHEjKieLx1smOGsszQ+xTQAaMIVDbmoAmy5rkRUt2Gx8iZqbiCWDQ8tlEwbYsHBfYuHqVr738+9wrBzx/4Rk+d+ZplrOO8NBsPPUqi61LbDUleE+aFMyyMKkdGzdv8b2Nj3j2iWeoveVkcwEbTQJ3Bofc2dvizY2r7JmKaRL43Ruv8n97+bf53vo1nAuMrSPJGiQmIdjoC+BloVd1QuUMy92CZy8vcWrRyKbh4XBUc/PekOHRmBCHmTpt0V5e49SZZfCB0bji3p0DXFXOH1qlEhqtHs++9Bx337lK1YCsAWthhceeOE2v3yA4iRcUfksiw1udCPJuNDQy1OkWySe66DMNdDMhyYS4ZzJxplQhWrIaNRdkKcQ+ifhIBCPSCTQS1T62hErYy9XBhO7JNU6uHYuDZfkc56S6CN7NRxNeypP5739maNj/UteMzKiU5mqjwzfGU35uXIFN8ZFAmWlNv5NxMKmxSgiS80NzVq/Op/5OskHmKV4P0HPrZZdJ0gwl50xs5J1oVkI02dMm2iQJOhaQxnKwu82P3v8Ru6njpU++yMnuMqUrJaouiQ1icBBqHA5bi89Wlhbouowf9+xv3ecbH/2Ixz/5DJnSHG+22ZrsMKgHZF1YO9/h+IkOW6/vkxymfPvu+1S1k+l51mB2Qs1Z0/KDlLhvpWB1mV/67PN85aSnSOCDD+6xP3RcuzficDjh3vaQ9Y0D+suLZHkbdE7W6HD60hnyl2/hK8vB7phyPCHvzZSSARc0K6dOsdj6APveu9x74vOshTuUwyXSho2uK5HOojSKJDYCGqWkvFVKaC7qXCC0xrirA7Aiu1aplswcPMaDs7HGnohntUuEiu+txHoQgjCRZ2nFNjA9qOit9DjNMbmHcRwgvUtshHSkQs2blBm89uOf00discwe5RA8jsAf9Ho8We5wSrRxwiFSgU4nJdszlE7w8ohPzdnDSsU8RSO8LYWXBYMiMaLND0bFhSQ9zox+KRETjqAMqTZxjhBDXJXBjka8d+NNPty7zfnTj/PlmHlvkgRVCSQ5m8cE5bGukqGb1mRZEbX48t1GB/v84duvs3zlDL1GwVIzZ695l7PPF/RXjpM1C9JGE6UTTl88xs/+3DNc/XCT1966xStv3WI4qUhSQxIFZPNsmKjRuXiqx9pLn+BvfvEKZniVgMYkGQcHB5w50WLv0HJwOGW4N+ZwZ4+i1SPPmpA2WTl5kuPH+9y7vUc9tZSTKbP0M5RGK0fWaPCprz7H9bc+5OY7N+ifXuFH1+7xuWeXCaVImV0IKOcgTUUKoYlSbVBGqEMKCMfEv9q+vw81hGIGy2t8HTCZwjnAOXxlMa1UYP+JlX+jfcwXVehUHP5rG0g6DU6wisZEW1YX5ywa55zMybya0/Jnposfdz0Si2V2ifhGM85y/kWrwf/6cESaGVAJKniKXNEoFIORlFdeqThsjNP+yA+a6x6V0Cyci4hXFPfUER5OZw7sM/NwAmkQc/CZ+Mtay+bt67xy511aKwv8hU9+lcVmd47V+6jlwIngSaBoK3agSUaeFMiSFnqNqy3f/dEP4PwKF5aO0yoy7qU3uXi+RZIpdGLQSQ4qlZmQTugvtHj+M2d49vkzfOK10/yD/9f3GB01yJtNijwjVE7KPDzPXVjky589wx3dRk+GBC8ZKKtn1sjynFZ3zPLyWBa5SsjSlHI8JGl0MWlK0VniE88/wf073yXYwORoLBuJklh0Z8UqanGpw8JXXuCZKudwb4+97feo7ApwCqUTTJKKH3VtRYuCjw6Q0QQ8ntaoBLXUhs4AdizK+ijoU+gIE4eJmEloYyRbMngUJsq1gZCACfg6oL2oN02esmwWKRo549EE5wJVVdFoNVHK4GfPRJyp+QgIfdz1SCwWsQOVnkPHXf/9fp83Du7w6cqJK7bW6BR67ZSDkY2MC2Eda4L8UJ1YrFplMCYFohFfrMuVCtEBRoKHvJ71JmnMolRxti8ipeHuNq9++AZ7ReClZ1/gTGtJPheZWViDJJHVUqq5YKXpxJGkOUYlzNOWQiDUFaWr2Vo2PHf+MqudHl+79yoXLjcpHTS9oo5ukMqJ17BQ/Z2Y6FmLdlOOr3VY3/C0uw2aWU5Tw8nFNi88d4ZTKwlv/+A6+/0+m3dGhIMNmv0WraUO3X6LvJUzPExJUmj3+ugkjcYOU1QoULrBuScu0f3667jRhP31XU49Iz7EigChJNih3K8kp1U02bh6k+zML7K1+y1W+nsEuywPo46J0RogxNPaCUlWRd5fQIaGC4b7//A7HHv+MdJTizKoLAMhkYi8UCnCxEmvUogxicogBC3zGx/k47nGJxNIAs2kwUK3x3AoLOWsKOJIgdiHeVw8WWwIOD7+eiQWy/yKQi1JtlV8bbnPk/sjGnkPlchgsNPOMHsllXN458VULg4QdSKwbjzzBfoNIfobB4gqvZkh9Kzv0V5uXogeX25S8cFHb/PW0TpPXXiCL6+eIU2SOdQMQXY6r2NOPXMrJpQVmofQaAXB8QHlPDffeZfj3vPZC09xbvU8X7/3Ojv1mN4wYXVck2eaopXh6gpvLcpU6JgiWlcl08mEb3zvBspouu0Gv/DZs9Rl4PLpJa48fZEkUbjpEZW6xXg8YjhKoLQM7u3TGYxo91skWUq32+TgcMTh3pDl46vRYRNsfYjRFY12kyeeucwb33qdnbt7hGmJp8RNBVmS11agkWHnsYtn+cNX3uXC5Re5vneDy8uLrKga7xzBGJyzBBVII8FUMlViBEdEHItTS1w90+bl/+mPeW5thXMvPYM51kPXgZBoVEPQRaNDdBp1oDV+6qB26MQQjAQlDbMpZTWmkXc5fuIk9w6HKGPQWQ6J8PdUlhK0IRkdoja+S+OJ58lchjKv/NjH85FZLIHosBEjoA2K7W6fH+4d8mUVzfU0NAtNM1FMaivNbVD4aIEkFkY8JHuNM5WA7GIqSIipUqTR0khsjeLRHwJb927x/Tvv0Fpd4dcuf4Vu0ZCqwQhBUob1ciIFL+WPMJdrCIrEZLgIYSqizWldc+f6NV7buskXspQrx8+zbQe8N9ykDorN3Qmr3ZRWrhkNp+hEk2QZ2ntsrXCVZVpN+cGbG9zcGLC23KbVanLpVJt+r0mrWWCrQ4n0RdNutSBpcPHiAqNdzXQ0jopSCYGypaW/2Gd/64h337vFxctnKHIvuSs6oJTn2S8+w9bt+xzt7THZ26e1phkP7tPuHwOdYbIWqBwCNHotPnPxDHW9z7Buc22SsLDYIqmHzH3doozAe4v4Nctd15EEqRPDF/7i89zQLd66vcVr//I7fKq3zNkXLpGcXprfq2A0obRyYo9CnLdpcEHIl85T6zaDySGtosv5pz/N2/1zKG1I0wyttJyksULoH9xHv/5PaF98mmJzf3YM/qnXI7JY4sMdOULG6Ii8KL6zvMgL+wc0lnrSV2SB5U7GYOKw2lB7hXEyjXdAEiftM3MKkPCfxIiziya6tShmdFSBG48G/PDqG2zoms89+TxneiskSnoSFXsf3IwJLfC0iieYdZUYbkRjDZBUMKMgOMf+zhYvX3+Txx9/nHznBwDcPNxi4ioCgcGkZn9cs1xaGpUlnVTo0spsSEFVWe5vDfjuG/dRhSEvMpZWlnj7+iEnFiesLbTodhoUraZw05o5O1mDoDVZQ8iMNpp6G2PASM3eX1ui2h7wzhsf8cTTV2g1c0B6paKZ8bO/+RJXv/eG+IENDyiaS+isjdIpSssMRRIJ4Pi5k1x7fwCHnv3NAyafPsVi1sS5KYDMp7SRDc7beK/FQUGms5r0WJvFdoefOV8wOnWSdza2ePVrP+SZfpfzz12kOLuKr4KYZigpvZVXKKdRuRApqQPt4yc5GO5xrH+Gk/0e6d5UhrwxKUGeDLHoDWE2no72pR9zPVJzltmiEbsi2b33ul3e0gFVBgiaLElJ04RmJso4vAwVEzVDNGL/o2Y5g1J6ETl6Jg4yXW0lE9F5bly/ym+9/zKttTX+6jOf41x/mSRNIDWoJJEYgxBQPuCqOi5EoaiX9QiFxqgsspA1iRZpQLCWnf1dvv7uK1x85imeP/cY4ubouDPYlhvnA9Npzbi0TCpLWVkOD0cMByOGgzGHewO2t4/4xit3ORpXtNqFoHuZ4v5RyUfrI+5sDdjdFzfIcloytpZmt4dXMdc+TSiaDUxiJJXZmHm/sLLcoz4ccfXN99jb2sROxyg06Ixmr8eVzz6GNVMwBVmzDzolqEQqAVdDPDWc95y59ASvXCv5g+9e5cZAoZImJmmAzqQ30lrybtw0njayQapIck2aGc3zPTSajk75/LnTfO5nnubeWpPf+vabvPmPvs3kzbvYvVK8j9HoVEMh8yVvA77yLK4dZzwZoLRitZGTqojm4YX8OVsUakZvUQ/9+vHXI3KySPmklRJ7ovhetDE4At/p93h2b0KeLmCDp79ckOcpdzdH7E1snBd6USQiZZgOonOw1iJ8PR0dK4NM/INif2eHl2+/R9Uo+AtPv8hSoyVqRhVBZQXKSUiSt9GcL5G4CpSlrmvSJCcxMw5ZmB/jylmqsuKP3n+Zsp/y9KkL4GTOYl3FgZvMB52udkzGltpKUGmSMLd2qqzjzRsH3Lg/JMlSsszIPqITSXieOpoDR6thSfNK/Hy1YrXRoLYDWcBZIo7x8axVxkgMHRoUPPeF53DeU08qBnvbaLWP0gFFjVYpC2vnMFkBShOcR5lo0q6F/h6CwOs6MXz5Eye49uFVlvQRWh8TxxhbQTR2Vwp0ms+TC0KYwf4C12dnuti7R1DKPtgOGZ85fhJ79gTvbe3x2x9e5cI7N3jqU5cpLq0StEEH8VnQHmwdKHp9xge3qW1JUxm6iWa39njvSZMkgjpyosXigCwRL7GPux6RxSLXLPJBZkN6vtjvtTrc2B3yWG0xRYp2HrOUc0ZrWB+wP6kxOlqHRzaw8z7SRWoKk8Tdz0XTipr3713ng8EOz566wPnFVTIjBE1lBBTAO3Cg3YMBlvciG/a1xbuaJM2jJas48auYJhbqGlzg/fffxucpf/m5z5EURlJ2gamtGFRTgpMSUSk4HJccDlOKQlOEhKmtCVqzvjvhjWt76FTT7GSE6LE8HZVYK/a2U+cZT2uKaQKqFDOGiPgppUXOkM70JDMQRGgxMwFcqhR5XkR0LCVE/putpnNPNW00wnJTcXOINyiEOE0PnDm9xKdOpmwdWs6uKLIkR2tRNIoS26G1x9YTGXR6IxZWEQzJjrUpmxkqBVdZlFNQSTz6M2tLXFrqcmP/iG9+cIsLN3c485kLFCfacrKMKlSeUfTbZMMGo+mIXqvHap6wZ+vYPcV5SnzN3sfsF++kMvmY6xFaLJGsKFgFM2M8fMArww8XOjx2OEEV4hlknKPVSji53MRvDRnVEZoMYELAOY/2gXaWoWMAkHKBnf09vr95lcVOn19+7NM00pTUGKyREsGEILOJSL5zKnrg1hZvPd7X+OBI0xyT5yIs817MrYMgZcoH1rfWeX28zq999udZ7PTmGnGAkZ1Q4/D4GMsn5uSlh7L2aBw+KPaHFT94b5vaBs6c7OKCw6no/Ry5TEpAPsaVp1E6UJJ+ZZ2nKiuMqiPUmjxwio+IFCYRnlzMeCTC756oOPSgTfGABh9JIlqbOZCilNCBfJB8zryRkVnL0eYBkzOnSIycGlL6eFAm3moFocb7KSiJ2VOILj9ZblDdOCT4OLjUChU1LYVJeXplmWphgY3BIV9/8wP6d9s8dekE7WaByTJMmtBv9zkYHdBptjndKnh/LKJeH0cHSikJtIqLfZZR+nHXI7FYHlSKYno34/MoYjyaClxtt9nfvc+Sn5EbZdjW7qWslg3ubo+pg8hMFZ4kSNKTiQYYtrT8aP0q237KS2cucyyKqnSSYONAEsDVToy8VaR4xPLKKU+gJgRHmhUopaVnIjqERHg6BMfw4JCv3XyZzz7xPCutLrgYLx4VNrvTI6raMvM3C4hhwqSsmU7FCrayjjc+3GU0cZxabdJrKqaVpnKe+xsb7G5vsHRqkRCjq4elxwxKtEkoK0dhK6aTkjRxJFmKweFtkKwVFSMWlI6ev/I60GFOAdEq5rDEoNIHeYt6rgxVKJyzJElKiNSiNDOcP3+a1eN9XLUPRT+OUsRvR8oxWXDB1QQ7FbcbW2ISg/MOs9KA2wMR2VlxlQw+QCn31NoaYxUn0hYrp1rcL8d8/dUPOdltcPn8GVoGWkWHjZ0Nzqye5ViRyWapFSrEhR4BgpniU6FiX/Pjr0emwZ814zpGRSgda+JouTM0ig9yja8qvBabVm9rEu/odw3d3BBcQAUnAawEjIdQB+7ubPLbt1+naDX4lUufYK27iEpScYv3AeVkDiJPQOxV4sOifcCXNW5aEoIjTxskibi0iAwgUlzilH46HPPtd37Akxc/wePHz+ONkvzLWMIpYGt6KCfm7O8ikjcpHZPKMyotb9884v7+mGPLDc4ca5KoQKIU+7tjrn20zv31PXY29lBKU9WBg3HNoAwcjSqGw4q6rBkOplSVnDCuFlaBoOhqHkg0d2FUMZNlNisMTtAh0U0DCqUTlDKxfIpRhDrB2VpOjiA9wIt/8YucvXiG4e4I55OoSvVyujJLAJB5WqhH+GqI8lO0EsBG9wuZiWVmbm+Fj9qX4GMfplAmIQHW8oLnel2q/UO+c+0qV2/fpJW32RseYZRmJc8pYmmqtTDGVQgkJnmw0RHv/8dcj8TJ8jAIIUe7FjZs8OLxFAIqSfiwU/C5QY3PEhnz24C2gcx7urnl4KjCBz0nEw6HI17ZuE5o5fzsuadYbDRw2qBMApF9LCWECIREsx+RNAXUsvBcXaOVBLH6uY+yjnSJSFX1HleLNkWdWOGFM49h0kwGePENeuuw3rM1OZLTKxIBZwE7VmlG1nHz1oCbmwN67YzHTnXwTjJRamtZ39iXFOfgqKdTXG0ZW0ttEspqQmk9g0FNOhxipwOWrAS9aqVoNBtCFVHpvH4XIxhxuVEYQjrb/ZEF7i06zWNpLH2k9yKJUEERgo1cPKn9dXTOMVqzuLTE4eGEflfcVHSU9CqQECML09GIJDXotBCpdNpAdRS6mRDqEKn5CmWU+CPXXobMAXQi/aUfOg6GU1a/+nle+tKnGZVjQFFWFbWzdLOEtkk4dMKeriM37MHiiNy+Pxd6lhmSpx/EKSRa6Pg+iKuLDoqNZpPxzi6NKhORkI0DR2dpFpoiCdRe+pMP7t/ng9Euz66d4nx3QXyrTBIp9D6WzuLvq0jlUdAGgtAxqMUTrPZWzDCSlDDLVlNq/rIFCpZk448+fJ87Rc2vP/V5TJbOkZ4QokDNiwHffj0lpJJ5WAdZtLZ2VN5zd3PE+vaQIjM8dq6P0oqq9FSVxVpPXbv40ImGxfogg/Da4WrF/emY4ciSLY7Y3R+CaZFnCXlhSKw42UcYBROh3Lia5D3FAW7wVmYyQYaGftZnKKG6GxPL5fnz5WNpFo3DjaW10GZwd5e63cb4gShPiVJwlYLOqCqPr0uSoiJJJJ1IJQqVG/xQ9D/KRIsiH0hyUVL6yoFVEBT3/SHql69QXF7h9sENGmmDU/1zZGnOpJrSzAoW84S94TSaisTNYTbpROHdnxNuGDOujp9N3me9itw8E4dJB4nhQDnySYkpUqgcygvTtBlSTrZbvHNni5cPNujlBT9/4jKtdksYrvEUkKiJgEeM76yVvMpEOVmUWlCkYK2kd6Uyp/BJIsE4UaJs/Iy4qQhVze7ONq8c3OGXXvoCWZaD1gQn5YlWCd7XOG/xwVN5iw/ChhVxE6Bhb3/KweGI1BgunerRSjVlZalrR1lZxtMqlqmaJElJslR22lTLglHgvWbqRa9+OCzZG2R0mpYsLUmMLBClNSYrsNahEuHQKR39xkJA+hKJBtQ6eUBtDxHVQkiYIbj5vElFm9AQ7HyWpJSi2WrwznsHPHMl4IixhCZBqwSlDSbNGe7u0+jVECzB1xKq1DT4w9hLaoWv3AMBVyoPfOUqdluGs3/9L9PstzA6lTIRKbl6rS67R3u0Vk6ylmmugqxp9YApEmaUJ22EQfIx16PRs8zmKrE+Feel2YBRzXcAawz77QIVLMGKQZwva5hY2Jtg0yHr6T6PdRZ4cfUsrXYTTCJsY63nyIdWRkKKvERNBHScqygxE6ucIE15KjtxQMKJ/cyNX8oRZx3BWibDId+49iafefpZVrqLsmPXkewZ5GFy3s5pOA/oOMxlz1VpOTgco5XmxEqLIlPSw5SWwbQiIfDSE2f40qcvsrggysU8Sei2MmYu+kmqhfJRebI0paphMHYcDCsm45pyWlNVJXU1oR4dzY3MZT45o7LPXleCNjkog7OWmaNWiJsYuPlJQ5zfaKPnG9/MLqnXa5EVimmt8HUJcX4egsd7qKxicDhmFnkxeyBCQ0v2ZKahMBJNESky3nusttxswOGnz+Azzf39XXaOdtg72uLG1nVcsCx1F7i/s41SmuONBrOk5AcAEnMpsVJ+LrP4cdcjcrJET+DYaBljZNXjZX4yM91Thp08gWEJ3grWOvZ4V7J7WWNUj7/U6TIea3aPLNPaY7Tk3fugxXlSG3EsdNKMuujSrxCCIEGaSJMaEiP6b+IAa0btmDX0Kih8XfG999/g5KWLPL52Ssyua5HzBTUTQnmcrUkjYGGi474KgJMHcDKtMFrR7xcUmWZSWioNk6pmNKr51Noif+ELT5B0Fvhrf+XzfPPbb7F99Sa/+cIJ3t5xfOu9e/hEQmmVtQxuH1KFwHBSM5gYRrmiWSSkmSZLMwY7OyzlTZK8OZ85OO/F/C4uGq1l9qGNRukkzkoeSjxD+Hw60pOCj4iw9xiTyDgDz+lTTd4fDng6L8k0qNnuA4SgSLNUeiknm4ILnqSZMLa1nFi1FQZGIgvaKc/t6ZRjv/4Vrm3e5IP1qxxfPM7m/hbaaPrtDtN6xEJ3gXduXgcVON5skGmNi8yOuZG6l6WtH/Jm+HHXI7FY1EO/J1pFDo+PfEUXG24pEfYaKUbXuEGNHU05WlWMGprlukknbRJ6UHQcecuzsV0xsl6UcvE7zGYTKs5zHHGndA7tZcHoJJEgVxk0yGdaH71ytTT9Xtiub3/wDqPFBj937jF0zL8MleQZ4uXBcb4W18gkm5+ULnrsEmaNMTSaGbnRlKVA0sE7qtoxnnreeGePx5ff4+KXn2Ox3+bXf+UlDrafIdm5xwtPrXH13iEbwyFWKdHZ39qn6tVMMsXhkaZXJLSmlizX5FlN59gSwVucq9GxFDNGpttSUkU5r9IyJ4m1onyOi1IFFecjKuqHgmh6xI0wrpzAQqdNb3zE0KX0bC2zNJMScKQm0G43IPaM3tcCTSeatJMKiOg0TD3eBkISWB9Znvq3/ip5t+DU6nFhD2gdF7BQW7Q2GO04Go2x1tFNNA2tmUg1OXtpc3kIPFhAP+56NMowwvyXRDDKg5wgZZlWDybFB1lKyGHsx9w5VuOmnlPDnLbK427lSdKUrJGSJuJXNfuaqFn2pMcrhXXSXAv8K2iVSdJ4wiEzFoL458akYe9qqGuoLffu3eBdd8jPP/UsWZHFzBgfwbFoqxTEFIIQZpupAAIzlHo214jZmASxFi1rR+0DZVUTfMZWOeUPvv0R17/9KtVwj4Bn4fgyzUsX0Mrxhc9coa6lpNJGU1U1tXPUdWBcOQ4mFYOpZTx1VNbHet2hgqQVBG+BWS6mgtlUPcQaHyPkyXgfhHFtZ3ctAiPEhZ8QwmyRycI528xITI6fUV98Bd7iSpmvCOIi25r3DpUq6srCNMqHnfQvG5sH9L/yFGNzxP5om63D2+yPNtk72mA43eVovMPecIOD8RaJMSTGMK0rGkrTz828LxYQM8wXyIMYjR9/PRIny3wX0mpuCm29f6A9QczwjDFs7Y+4Pb5H4uHkcIlmt03IUggi4JlOPEeDmv3DCZUL+CAWOnI6PdBdOydzhOBksu68Q2cGr5Q4x/gH7GQVAiTRmNXJyTTc3+d7Wzf44nMv0G10ZYLt44w7SfDO4WP8HUFUkt67uKtGv2UfF6B3JMFQO0/tIVFieK5RWAchGMqq5LpLePnqiJXHBiSNPsENMUWTYEecPdNhudlkt5xiGgk+VdjaU1eO8VhxWCR0Go5O0zOZVCSpJkkzrKtBGUzWgBggK9az8mgEdETKDCoOE62vpJGXUJA4nhJWr9aKgAElbpJaaXxQ5M1F/HCAt7vyM9cKpTzT4ZBWNyf4QD0do1pC+/cElAGvfJz7aI6GE7IXnmTt4hmMkijBRtogSwsqWwqBVRtm8mWjDP1mm4PBEa3lFZZTw/rUzp8BHj5VZoyRj7kenZMlNm/ByUNijBFdtVCOSIzB7O+wtXvAmwsnWLt8gSJNKacVh/tj7m8ccfWDXa5fP2B9+4hxHagcsguGmeY+ymKdyGTxIlH13qGicCzINBRjFCryw4Qq5gm1JVhHPZ3wrRtv8dilxzndX5HF5HyUqmqcq+VtaYVTDpNn6CR5UA8rgXtncK1OJDHZec+0ttgQsNZTVTXOa6yzJO02euUYm/tTDtcHYEeCeuoMTIGh4rOfvIRzHp8q0tM9MSLUUNaWyaRmUtYMhxPquIjqspb5kKuxtooPkAwdiQ85WqFVjPpArJRCCNi6pBwPcXUpbqDxwSuP9gmulnKV+PWMEVMJDmXwqOSErqYTBvtHYpEEpJmECRmtSVqZPPhRe2KDZTtTLD93noPBIcNyyu5wh8qV7A8POBhtszvcZm+4g8bEOQocX11h+2CXgOZ4o8DMT5CIQj40BhCrpR9/PRony2xqjgyfmKFG8UPaRCi5v0hjcZnvBM/BwRFPf3QfExSlynHRNiRJU7w2kASCMqBnzh5GKuuYoeKdJ3grNXeknOvooesUaASOJdq84qMXr3W8/uHbmNVVPnnynEy8lfRCwQVUKqKyGSih0Rg/MxGPu7UnlmwBW9ckRign3gdcovA6ofYOPLigsCHQXFxAVZYnLvY48cQxdFKgTIY2GWl3jeroHk9d6XN38yTfefcmybkc9gWxq4Oi9iGia4bptKbZzETJ6J0Y4RHm92CGOs16F++Zde7z0xmluf7Oh5y9fJpGf5GAw+uErLsksHMAY1JUCKKUJJCkTazfnlP7B7sD6okVzqp10ddNz3+OTAVUCZlia/uQxV99jv3xAa28AUCnaJGZAuum5GmHgKaRFQJAICOAtYUl3r75EUYpjhVpNLfQD5Vhs0dQzZ+3H3f91Isl2re+AtwLIfzKn2nyV1wcOsKOJvKQZtN8kJ0gmIgmhYQPFhe4++XHOf57b3DGBpIsJ0lTqiCZ6MFJF+e1nmdLxikhwYumHQUmifU1M6Vm/N4PHcoz/QrOc3drneum5tcvXomDuZh4TBB3EWdFj48jWNlhdWIiVOoePIwKVCKlgrMeHYRgaCtHlRjJ0DQKVwVMnqECWK+4e/OQo9tb9M9L6UkmLv86axEGu/zSVx4DBR8cHBCco6wUSZFSlpZxbRnXRnQzpSXPhZMmeZzR0M4YMUmXr4oiSoGV2IC4mNhM8Jy+fIqkyFFeXHEIDhW9D4SA4Qm4uMg0Sdbn6HCTTiPF1iPu3linHFvGk5K0WRDqGmtKmSUVCbQUTBXj0ZTGs5/g4uWnI1dvNlAWSlKRtOdMhIfPhkCg3+6yPzzEBcdimpIrhEMYyZQzprEOEhP/cUfL/5wy7P8AvPfQn/8zJPnrErCPJH7BQ8lfwH8e/91PuFScPen5KfMnPqpmqIX8O5HIwnR1kbu/+TneXUsYlgPiM4j3km8+U0N6V8MMR9eC8Rstjokq+rT6EMS/OO6w0vwKWqVQUFsGgyO+s3WDn3v8WRpJUxayFusj+ZwIPeoAfsZRMzhfo9CkemZVPnOjiVPvOGsJyAkqdrMyvCOIDsd7j60qGu2cwc6QelpikkKabG/jzMSjqgm/+OWnObfQmZvJVbamqgOTcc10ahkNpownFWVdC2csyqNlziEn7fyXfjDfkOZY+HfO1hijCN7hnPQwxmgkftiCciiijxgFNX32kzVGK59n/VaDajJl684e08phy1n+ZvSsVgGdG0yeEIJnv1Kc/NyTYigRoLYlEWqUXlQrxuUEsfJ1jMshtS0JIVDEwa11nlaW0kuj/W6crs3uhf7XH7t/7fppw4xOAb8M/F+A/0AJdPBV/qySv2YoIw8y6tVDTODZ7zo+bLP8SKXANXIGv/RpPvrhB5x+7R6r/WV0KgS/EByJMTGHyKHwOB1ABRKViOYjomGzlaYQHhPWxbwWeahtXfONm2/x7JWnWOv0pcYV4T5OyTxIJGMq0s6cGMppIwC1c0IJjygYkdLug5M5RUSwVYSm0QFbCVigtGZ6NOT5cz3+6t98gTxfkL5hPBQeXLMJaJQJqERjUsPPv/QM7711j83DEUpJXklZK4bjmiJVpIMxWW5INKR5ilEGSAW6VQCp3IM45YbInfJCEcJbeTitk/Ahk2LrEiLkbpIMqzKU6VHqjFRLSGo3NbzFImr9Qwb7A7KVRWzlsBGhA+kdfHD4QjPcGWOfPM1hOeDW7bscX1jj22+8xtryAqdXj3Pt3i2KImVYVrSKjFOrp9gf7KGVpZEVPHX6kzSLJofjAUvtHktGc6Q9IbI6krg5m2hE8nEr5qctw/7vwH8EdOKfl/gzTP5qLa8CDbQS04kY/Te/WelsYj4jLcY5iUeRRBr99KUnuLnUZvL1Dzm7sAZKDLKB+QKssQRlyKJQrPY+avITOb2CUDRChHa9j8iAc/zwxjv0jq3xxNIxIQHGXsZ70JkkLIf4YAUrziUmyg2EguGIzn/xqJSZBbOSQgVhEwQZlKbaCCk0TagmU7Ik5XBnyu77tzjxqWWZBTUaoBv48X08AZ1kok1xNb1Owb/3t36e//q/+xr7kzHOeUYTi/GeNNMkiaKRjUl1wBhFs6sJOsEERZJpQbSCFlaFjvoPL5Jc56S8qutK2LtKo0wlg0vvcCpn3bY405TIjGacxWTW0UDT7XU4vCUbVW0d1om8wVlx7NGZASw6T7g/nbDbTzA7u+wc7NPImpw6fgzvK7RRHF9cwySaM80mWWroN3qcWFyjriXZzAfPYneRje1tljo9VouUGxMJj5pTjQI8CK/6N1gsSqlfAbZCCD9SSn35J/37n/Z6OPlr9dKVoKhIFHOeFCGCRgJZSKMfHkyWZxHTyjspU5zHXznNdr9N/S9e5VxjUaguOJxx8nBrI/ZF+OhjHJtzL5CXjnyo1EvPIvMHx431W9zRNb9+9gomT/FRPuArseMJNszp+iDGfDpRGJNGLYgAkybaNs28EWSBqvlkfGZMHbwCq7HKE8qYSJWk3N4+5J1X1+n0C9onT0N7lcQUeO8Z3rpGY7GH7ixIs11XHDvW4W//ja/wD7/2NipN6bZzur0myyfXaCSa+8GxbzwLQXPKpvQTQamcs3FqL+hgdHgWxEspCJZ6MmY6LlEEUmvFkV95ynSB3x1qfvjRmM+f3eHxpQ7ni4KGVhQjy+5//ypP/caTXH/seRo335K0YhdwVkrSEGdUSmmmTcXJX/kSF04tsNDq8dT5S/HZ8YDmvds3aBQ5a4vLpCZjOB2wPdgmUSnHF06QRC7ZqZVlPrpzi6fOX+Jks0HYG6JDZJfHymIOjP0bniyfB35NKfVLQAF0gb/Hn2HyV64VqVasphqrAxMPFmJeh4t9ioluHJ55HHMQurZzjiQS4/zqAoO/9hIf/dYPuKIXqGeRBkaTIqIm62eNfhATi+Dn6mux1vExTSpwNBzw7b3b/NIzn6bICzxydHtheIIRFZ8PFmUSgrXSw5gMF6yUNyEOKBHIOo86GjCYNDDzBnbxSwblKasJSVownUxJswznakxusEZx4711+jtHNDvXWTx3kqMbt9m8usnpTxqSRltKt0TmI2fPHeNnXvD885dv015aRC10eL+0TK3isaUFTJGzm6YMkoQX2wu8MznkGV1gw5SqqumkRihC8R4EB+CoxmMOdwb4AI1mQXnLM35lh0v/mys80a25bRy/8+6Q31P7nFtJ+d8/fpzRyzfY+s6HDN9bp//lC/SvvIDffg8fAraOZhJatimlNOZEhw9u3GfwynWcr0lyQ13XtJodzp84xasfvke73SRbv8aTpx9n/WCHyfSAXqtBr92hY3oopVjq9vnB4C20VixlqeSKBjfbAqRPDf7j1slPt1hCCP8J8J8AxJPlPwwh/C2l1D/hzyj5a9ZcFYmmlSiMV4ydx4YYLRBhPh0bfR0fVgXEdxtdiwM6BOpeh/Abn+X9/+l7XLJdCl2QRJbxTHcBkGiNdxavk1iHz/oJUEZRTUq+fucdXrz8BKudvoAQiZ4jZSGZ0dwj+9gLWqSVIUEUeYKgaJy1MVVBU6gEY3yEr2dvQpHGYSYKTK6xpZSROtVgPdPRlM3DMT+6fpfxuOZY6njx2eM0dEpilfQPtSVNA0EnoFKUyXjxyTX+6MMttkrH9oeHbO5M6T/V5vyJ4+xbxxPdLgsNiTi3aZs902BB93h1dMhL5Ezfv0fWN+THc1AObx3TozEHm2PcfUN53Yu5d2K4ln2Xk3/7RbYOpuzvHlJNa/ZuWu50NKM/eg3X9ZSb27ibDfY//zQroxvinDNTnkbyrNaGTqtJO6tYXTvLweCQxOR0mwXddpduo8NffGmRdqMJypOZnHPHTjJjZMwGkyEEWo0mpbU47+nmGe3MMKrdAwYFMsf7CU5I/0Zzlv+YP6PkrxlVeuIsTmk0Ef6NwPcM2/fxJAhh1mtITzNr9lUAh0zSfatB/Ruf59o//z5XSo3OE5QS6DZReu4gOUOisE5ULbFf8gFeXb/Owsoyjy+dhNgQ4oMIwPQDJWWIk3kVxKR6lisJAaf0vGlWBBJt6BUt7tjB/GYBD3hJ8cTzwVNbJ2ROZ1GhhnLMjT3NxU89xcm1Bd797mvcP5xwIfGQZox3jzBFA93oCDFQa5RKSLPAr32yy9//zpu8c7WAUc4nT7e4PZrSL3KmznJrMuZko8kzeYN3qopjRZMvt/ooo9h4fZ/1f/km6bGCxpUWrUsN3HbOqdYFvKsZTrcZt0fkacb47j7vvXWPd27cxzlP0WmRdbv84WHGb37pOQb/9D3qJUu4d8Qn2gnhsU8zXX8db51Iup2XIadJSFLDc09eAtNnXE/mw8bKTdHa4+oJB6MxjbzD+sE6/aJHaccoxAM70YY8yeg3V0iTlMPRgIV2j36iGdeRyOlDHKjGSL2PeU7/5wawfgP4Rvz/6/wZJX/Fth0XFFU8UeIXm6MUEiNt5jRr4SbJpFmMwWXhGGUIOKyH0MxRf/lFrv7T7/PYFFQjj1TvQGZlIClDR0GxvIpMYGNY39lkw0/41TPPYVKD9TEqT1ZrdE5R4kUs71vg1DRq1F1EkpyPTB49/5yltE0oB1FRFVFAY+aRfT5IPnuaptRlBUGjyhG+HLGrlnj6mU+ydGyVL3fb9KsN0gkMrt1EBaiGY/IVG2dJDqMsOjgun2zz5NljbKPZfGPEh9/Zopc4XO74hdPH6OQNNvKcImuQJ5r3X74BP9xg4dkT+MGUkBqmeyVH3zji4IdTlpMGZ0632f6PP4fZndL7H95m4VOn6D13iqA9T9Yp98eyIfUa0NL3yL4wZvX800zeL9n8+nvkmwN6T57n5t33yCpp8G1dkWWW4CxBJwyGG3x0eJd+s8PG/g7nVk5iwxTvDXmio62up5M2JWktBLI0o5k15JlQCqUNx/oL7BwcsNjucSzP2YhZLkrHsUX02X7kJ/gB/kR35X0QUwUjpY1CyQ7rYzZHHHKpuJujxIJo1ugzS/byHt8oqH79RT78H7/LY24Vg8aYmRvJA2lAZWu8d7SynKPBkO9v3+Krl58iSzPsbIilJTddx5+qivQPrdRDVO9ophFlwyKPjgiSk51sQTdFW04QO6MkedDYxtLBE7BYwKF8oJ6W1NZxYnmZorNAu9njxJOXaCMOkXenFeM7exRLHbytMJmQSkOosHaKwXN0lHNuIWektzh5UnFhqc3ySp9LJ44zThLW8oJCa04EeOVffgd7fYfq/U1MO6O41IHSozYUS0XB6d94lkbaYDgcovo5rQsd9n/3PXZ//12KC4v8xd98gh+sH3L9wwnPPV3wKwuaxN7GnDykc+YE7QsvMdgb8639iuOrV/DD6+CCQOyuRqcFXgW6rQZPtpZJkyYnF46TmgTrarRJGU0nwkBQCWnRZjgZga5x1rNTjkgTw2JrDY3i2PIy97buc+WUGFiogxHKaFJjHjrNP54f9mgsloiCWC+DLx2bdeKQUgKeQqRdyy7/gC0cEaT5PECgWjH5jiFEnRb+Vz/DrX/6Mue7a/jgccagfZCMF6NIghLjOuv44zvv8+yJsyw0W5IzEhG5EPlkIVJciOWXi875KtF4NKlOCVb+TKgJNk7G4/vtqByjNc7beXkGAmh455gfrCqQFAZfVgQ8JmnQ7/bp5gUtU5DqDlqNUcFy9hc/x9aP3sIPSqhKyMXQL2jATTjaG7G7ecgmjvazi4xaTd4sS84OSgb3tqlCwgjN450+Ly52uPS5p9i/+w5ua0r2bJfWWpPJ5gR3a4j3sPj0GYpgOPH3vs3xv/sS+S88xu6lDvfub/Dhzhbf/R+vcvf9TdSR4nffW2D3sYIrqwec7wQq3mOx/WX2SLl/uMiyauOso6o9ufMkThIDTJaideDevWtc3xvRaRQxxNCz2l9Eq5SynrDY6TGtpzTTgl7RYVRNaeWt+ZA6BM9qb5E3rl4lKFjOM2FHqID1krygQviJE/pHY7E89PvM3YXYg8gOLDCf9QHrbHQVmWH/DkNcVzNZ8LxLj1Nyo6mX+oy+9Bh737jOcn+RJMpgo829UGyM4ZXbH9Hudri4uCqHnXOg0nkeJZE6oxTSuzjwvgYdNfw64KlRJhCCwMpe7LyYQQOHR4fyerVCSVKTTK/j9CgEMfYzSpOkRt6PcyhfkhQpF3s91lotVDkB3cC5CgUsPX2Z6cY9kRDYEu8sJjMYrfjg3pD1gWXj7hGqFcguGprNgq/98XUGZSAUTUwjZ7WbY05mNO7v4z5tmPzgiOy7JZ0zfTrtDq1Gn6q0HF7dZOe1TQ6GhyykI15dX+frd++wsTFmY7/mqcfP8qUvvUD/oMXvfHDAz3+2ZKd3kgvZNZrBo5LjdPenvLK3T+OxVerDAmc9tqpJckvWiIpK4Nhik+WVc0zrmmbepJnlaG1ITDr3Kw6IAlYp6DQgBLGpDUHSEbrtNmVdEpynYwwtI+YgaewVU6PnJMsfdz0Si2V2pUbNG/VEzdZ5INFKLFRVNDsgNuVK4uw0RPhVfmhEt5TZ5Hl26vjHz3Lvxia9jTFJ0UIRMGkqpZVR3N7ZZMOO+MVzz8wNwXUED0KQzBYV4ZMQfKSag5uJvSIdR5JexE5U+hxZkC72One29zAnElQlM105SR6cpmpGKYjqxbTI50lZNZp21iBLDNVwStClDCLjzypd6qOLDr50KCrstETpwB9+9z533z/CnOqwdjxhtQVHoyM++XSbH758ROksJm1x5sQCT37+NHf/y2/j392ndXmZ6s4eB1c32MnvY8ZedvQP7nD8MydY/spxttspH6kmyxef5pquaB43mOWc9mrBATVqYYGX78Ov9j3vu7OcVV26qoc6l7PShQ92K67ojLqaUteW1FpsXUnJnGS0c4UuumhVIKKgf/2hVqiHwJKIej40KE5NisIwmIzotTs83kzZr0EdCZi0YjxN/fFw2CNB0ZcGP1D5+EDNiIuEuSJPXBhjn6JmOm4H0e/JByeEyfiAa/UgoVZCnqIU+Cuf5GY9EC8t76mdeCUPB0Ne2b3JVy48SZFm0eVdIOnaRb+r4KOQizg0FS9lH6KGRTBu0Rh6eWdKI47kyA0trWU/OE6m3UjrmUGdUVsRCY0PeJ8CKCR5imsVDNKEG3s3ORptUk73qY62oJ4IadOkmLyF0g100RSgwVucD9gFQ/9KwYobcOKdXU74gs+fPc2JXpPKW0kAtp53d0v+i1d3WPn3vsDqC5fJrk45+eQl+s0V+u1F+t0FCpPhru/w2pmCPzie87u+Ju/2+cWTx0hMC21y7hxp1j+ydPuGU13HG/dKfvfVErfh+b/+3oDf+/ANjqo9KuCf/fG7rLsGwSRYF/BWUtRCsPOBMW5CwM8lzzMUcz6cnlHt4/MzYx3AzJ4JTqwssnN0gEKxmKcSdx6n9jOQ6eOuR+hkERq5jQpDFRtneUjjm4lDSBlNSC8zcxHU+sEPLwTRxMSOW75GREZ8UVB+5Qn2fuc9lvtLEALWW769fp3nz1yik2ZxQCkqSeesMPUjABFAeibcnGavjUbVNtKm5ZdGy2nialBSRjpnqQkcW1oma3e4Nt2KxhlCcZktqdnUHwukMuX3HkyWY/KMN65+iF1bYK0a05ls0z3pSVo9YfzqTE69GSmVgHM1a8ManbYZ3RpSjqZs/8F72Dv3OVxYIEzGwlgYK6r7U14/2OJr+kP+8t8B/ZXTJL0m9q3A4a0Dim6b5ROGpRN7vHlrwHOPneduLqegLx31+iblaEoeSt4+GPKRqvnEuSaNtGLl8R7L7cv820+e5d7+PkVL08xSnvv8MxwPb6CMiapWj7MVJk1RWgCc4AaQ9JhVCv4h2H+WFTnbZMNsk42Mj9m8ZaXb5+b6OheOnyLRKVs2sOCFNrXrYOF/wTnLn/k1Y5A+XGa54IXwCHNOgppxrMKMPxb/Ph7P0tgL4TJ4L1ELMyoJwJUzbL9xm87OCJMXvDvYob/Y40y7h1jNiWeY0eI0443CebkNqU7mSJr0GaJZQSEiJh8IZnaTotFGUKKFceL0/8TKcUatjLAZYopeNDaNYjR5wAXaNFrhjaSX2RpCVXHr5i6T/V0umcDy/h4XvKW5WpH0+iK0ijoUvMeWU7wyuKlleuM+djQhOM/Rzja1GpJ+SpHUA+p6irM15y4s8u//7A5PnS1pNNv0PuUpwxL9Ky+STmu2/vGbLL20hVNbfPVej9c3A9/dvI+/OeKbtywnVeDx8Zhur4GtNVc7C9yoevxHXwn8se6y2POMVYsP1gd8qeMo3t1iev0Qf06DCbhaosEz6yLfTno5FcbC5mBGf3pwkjysnZ+RMT3i3DNTxKqIiL350TUIsJilsunNvZp/8vP5SCyWWRk2K0ck0MjMF4yfNb/xB2NE3YMgVBAQEz6lowY+llBED+rgxWCBiKRYBepnn2H7H3yL8uiIHT/gK2uPST/kKmqrhEcWOWTOuflu5b0kERtt0MFhEkWqov9YJB4G5whBRmPy2jUqCqeKJOF4q81ekpGblGkUQnmiw4qLN9t7ghEAIwmRiewD1eCIcnxAEZpsGfjo7Q20h1Olp0sgabeFdhPnLM55SFL2nOZoUqHMGJU6Vi+d4sSnTvJugKcvn+HuB5pp3uXFXzvJM6c2aao7oGqCn3C9nPDK8C6P25T07evkn7pA62zgRKMNtsmke4JvZo7VZ5r8ne0Dkpfv0mwt0/jMaV589Q5XdyxvfHSSY0/1uFVbau344vkm7wyHfPrSCkuFpVENpXRGVKlCqnQo49EmAV+BG6GTbuTw+Tn6OfNhnm2cMxTMWjs/cVDQb3cZVxNcsHTTjMU8pdBCMWon4rr0yEPHca+IJm8RBXPRpHo209AKFWYs3jgn4WG4WHqI2O0zixaYfW2U0L5nAp+w3GP32ZPc/Zff5cVzFwhVTeUnkuKbJmgt5hkmggUqfj0XfHy0PZ1uRlEUlEd1ROWkhxGtengQG43cwGk9nevuW0lOJ20wqUrRvxARnOhSYhLNbH27mA+PgulwyHQyYlKA6zXZnnq++fodnh1MuDStWDyzjOk0MWmKJpC1uwSVsrZSsK+XSbMzLH90SGeS0rxq+bmXzvA7zYqVCxV3393grbd2+IdvjTiVvMawgkwNqeoKsjNU3U+wuTZl85+8xVq/ID+W0T+9w1+7eIbd/SlnPlxn7wfXqGuL2jsi+3CHcjxi781N3HaPN36Ys703YU+3eOozF1hZyjiRTOgtpUw3h4REZMTe+ViK1ZKXQyr3zw0JqjXfNB9eJLNeRlBUPf+pz0AekCY/UYbRZEqv2eF4nuO0QMb9xER29I+/HonFMruUCsyqKmk3Ykk2i0SLi2bWp8yh5gA6NjaznUSrWXiPDAZ1dHkPCEyIc2yvKk78+7/E7rUdju7s0Suh1+2QtlskIaIuiZmbSyREm6YgJ1SrmdFqGmyuyY8ga2YMd0YMxyXOQ3BO+GehJrjAvYM9rngnfVBQLOUdtkb7coPmg80Q36/EYGstqstEC91c2SmunFBWKb1un+efP8n6jX1e/2iT6WjC08HRP71EaBcEk2DSjLoODHb36E7FvG47OeLGuMTdusfw9tsclBW+ZUhbDfa+0+O/39f84ueu8Gs/s8K7jVOcNmPOt/ts++MceyHjXAgM7+6x8/4GO5u7vP7ut+i9v0X94SFbyw2KUzmTOxOqG/cYlZZJcJh9xae/fBa7ep47bpEvHe9y/Y07HI4nHAyGdEwNucZaK2xtJ7n1wTmCloi94IYk2TGpQtQDbCqEh+59HBTDQ8/CzJoWWOr12dzbodfq0NGKgZYSuK0DxUP0oz/tesQWi4kDIilMmCFBkWLy4H2I0i2ZO+7rGAokDlZ+3s/EpOLIZhVKfmwMvePY44+RNRqop8/jq5r9nSO2P1gnvbZFZ+xYbLUp2k3pAxD7KqWczEtcwJgEY5Co1U5OoiHrZyQ6MByVBCPzEevEc+zq4SE/i5ojXatZi/d4kH+pjcHWlSBiXsoR68Ek4qdM8BRGMUHhrOP25oSiqjl3cRlzqsPurT3u3tonbxiSukm22MeVJbv7AZc0yJuBRq/B4PEVbtzdgvsT7Nij0kVOPH+G4pNLLNlFDm8l3Gzk3J+kPJYN+UevBwZugxeW3+G51ePcmtQMdy2LpxZZLVd46oufYuN769z74AeEJCc51LTKBtnxFurumCQoVl94EvPZFYb1NocfJXzrrqZd9BibMUqlTGqHmda0mslcbJe6mYpT7GE1Nd5XKFPE3PrZhin1g3Mzj4AHPLsZpCw+AppG3mT3cACnAkt5xpETxvnYeabezwfCf9r1SCyW2SLw3olVqtGiN4irPlGBVAkS5ZAdwyBDwkRHl30jTopeoKcH/Y6XJjrRs51bGt9Z1qJIUzUmzwgnlshOL8OXHNO9ATffuYn5YJNFl7HQ74pVkvfUVtSGriqpCPKarULlCcZDnmkaJiP1QQy7HewMRywuNWgmCd1GgmsYLveW+ebGByJuDQFlZoNJ6VmsFTsgLGgfUCYjyQzBycempePm+pB7m0cs55rVxSb3D0Y0Nht0HPSKHBLLjdsjhtMBOm9RT0uuB8flT1+hvdfmtS1N0m1jWwVP9vrs5iMW7hyyf/0Ob2lYWkv5TLNkbHv83rc0/a/mfPF8h7rj0TTYHijuTgv0uwcsXTlJefcQpTTZlRWq/THFZ/rsfbDJvbfusfLVJYZplxPNParhNY7qPq/dH9NuT1jWFamGVGsSLbGEzopJuwueBGRU4IYoXaAQfzRhbs809ZENEiHmJBqRhCBleG1r3r5xg1/87GcJStHLMopENsIUFWXfP/56JBaLXIFMQdPE0kk/oIHM9StAoSDV0gimQNNIr5KZZJ4pabSE4gRglqPofKAMgalzSLaW7N5zCBri0BDQmnytR+vYs/Alz+HNDXZ+eI3W+oCFVlPc65OE6bQkwWC0fKdq4DHekcXItTQ4gg4UIeWWn/Kp08skQGviCKbmTNqiSFJGdopSYH0UnsrAheDUXP4aCDQ7XTqdFtv3d8A6qkmJLWuOKks59mzsONpJYPn8CpOdEflyl8yk3Ng5YtDPWer2mO5t4SbQO50yGRwSbh+gTWCSaF55HZyuCCQ89lSXhWNtOH6FF9dGfMee4O88luGOXmW9bHJnuMXeuMP6RkEj7JO5HXZvXMOWNbnRHBv0SJcNrx15rueWylV89Y2XCafPchha/IXFk1y7X/LlZ89w7d2XGfqaDE+WaPJCDBZdbamrCp2khBADXO0RpEtz1neYD4wrKmspq5KqrplMS8qqZFqXTKYlg/GQd2/cwVnFxRPHpWJB0dYKo6Cbahr6z0kZplC0Ek2eaFyAkXdY4gxCBVIdKJQmVSIWS4KnlRhyJbr9TGtMrDmDj9avRIqJknSCLEDbGHyQabmL31n8c8FqRRWPYoMiURBSTe/yCfSVk0w3D1j/7rvkV7dZ6/cYZym+SkiTnDyFwkg5QCUZjcp6lNe44Bl34OxE/IKJ4UZtMlYbLe6Ma6yzsad6AAz44HCiUMaHhMnwiNNFh047o93M0S5Edq5nYmPOpUlo9htsbw/oj6YsNLu8/+EmZTng4GCHUB6RjHq8/k9exg4rLh5fYGnqWWt1WF5apFt1mXYXeX35HPcnBr9zyG99OOCNO+9jjhy6BerUIsniCt2lPomr0JsJV37heW5ffpzjd7Y4aBRkT5/kq493+YVMcWBT/j/rJa8dOP76asrmaMCZ05pfa1qaBq5ah60dR76iUUjIrDYOkziCc8y3ShUox3v80fevsr51wGA8YjiZivGGi2G7xpBnKY0ip9ks6LSbtJpNGkXBp598ik9deYw8zVAKRu4hw3N+smnFI7FYZrqOEDxl8A80+BFO1UEa9NnlfZhTUByChth5qpWe17JKRe2+95GA9+D6E1ErUrnNNTJBaxItJ9bMy9coSI8t0P8rP0PYPWLnG2+ze3WdlU6P46t98iTD+kDmNUkjQZcls6p6WNfkbUNzR0ADZx2hdqhMc7bR4/ZkX/QyhOi2LyAAWAI5rnI4VRE87B6OaWQ5Z1d73FsfoTQ4a7He4r1lamFcOfaPSk5ZMQYvrSJLEsqsw1o3oT+Ge2aJ5754nhe2h5jtAe2ixdLKErvfv0ExWufZ995j83iL7+g208UVTPc8nbMd/s6LJygXjnC0WA2W9dEeVh/RLTLeKTWvnT5PraHdyElbmno0wAxHPDMZ8gev7PLb7x1w+cJJ/qtX4GdMYG/0LsYIEFLXjvGkot1OKXzAxcwUbx1al7igyDNNqzBcunCGTrtNM89pNZu0Gg0aWU6WZmito6RYzQEgpaWnIW6edfDcHA5IfHQKmpXwH3M9EovFIA9qoQ2T8MA9EkA/NJy0Sqb8yisKAzWeQikKjwTgRERJhpLRbTLEyT2iFHZxJSrm1tdUEXGec8hw1F7mOdbNBqACFqRaUyx1OP7XPsd0/YD7X3+D8Y07XDl/nMWikMVloxrTBUKi2HYTFknFqTGSMIPS6BC4nPf5VhC7MltJ/R1iUxusxyUzyx+g2cKS4KNh33hcoYPHIRkz2mi81hxNSo6f7JE1MrYPLQObM9grWThpuVNqmq0uS/f3SF95g1uJ5swnzhKSgvvfuU6mNIkvODHIuBI6fPYXniL9hTP0WopaH3F7O+X+Vc2TqymH0/s8d+wqt6YnaTW3eK4z5VtXp0x39jk8aPGt64pGmtHo9jlM2+TH1hh0TrN6ZZHFU57ldERxo2TrzvsYHXCKeR5NiPa5zkWPN7K5SeEXnrtCkp+Scj082CAfuNEwR8tmyljlZWZlQ6B0U66PRgysoxe/xtRD5T+e8vJILJZZLF1uArlRaA91gASZkjuEkChzSPm72ikSIE9UNNET87SJ97gZ1QXxMA5aS5wakuKrIqSYKPHbtUEe0ERFr+XICLBO6MIzwziDpnKWymi0U4SVNu3f/Bzl9S32v3+d4+0C7EPWSk2Dm1h2korHBrmcNEqOMKXA156zRZ92mjNyFeK35SNkauOuWgltw2gmw4rNG5tkjYLXB7vousa5OnooAyEwKWu+9+odvvD8GTq9Nh/tTal1m+6VE2xPj1jIapaX2/RKS/vmPv20weknzpBXgXvvHjByFhoO5R0HrqK3cZXhPrx2Y4fTdoM/utagmipeLUvyXpeLX97k99+Z8hXu8akVhek+w/3Dx9htrNC8sshnz7XI0gyvDGd2S/6Ld/Y4t9bjXKpI7JT3N3bF4E5LIy5DWSVSbWbqRRn6JqmRiqA+oE7XmNQWi5JF5hyl95TWUXoxVSdudpJQHAmrMVC2RlEHpOwGSgIT9+cADZvJimsvqsJMB3KlwEOl5E1lWpMi+hOjNEZrMg2Z0WQoITkqTRXkiHWxbKv9A7eQEDwBhffSs+j5Uw2p0hgdOWAzFC2IgZ64gCiBggNY/2Cu45QmnF/h/plVsmsbXBpU+L0JJhPd/Li0qDzQHCRoU8cTTcchmqKjUi60F3nzcBOtFWUpnsneB8k2jkYKKsmoRweEfAGdKcDh5jGCIqfGeYyChXaD4yttms0G03LI4qJH+QHj/QFZohmHmrH1BFuzv7fJ+//P36Ygod1IMT5BLaSsbx2yFzzjV7c5PfmAv/RzF9hd/hSdUxmfMob/97+4Ro8pd3ZP8fmnOzx7JZDmJS/lU+7YFlO7y70y4xv3hixMFNeyKZnTPOZrdjdGtDs5dQ1vvP4axxqB3GjGvsZZGI0rWk2J5yCIxNog5ogeD8Hyzu49blU5MfZKvDMjN+8BwdKLY06ARInjpEJ+XqJ81ZSIw2XtFH62If+Y65FYLLNLEzDBY3REKRLF1AdskMXTTCVnUseHWagwgRqonJfffcArJZ5gWjLhZ73JvH4N4kUmYECA4OWUCXEaTIj6GDllZouN4KOToQw8Rf4cQBnGOvDOlRNsjCc8ezBk8WCCPqw52N5nOS1QlcfXtfRZkT4Tac083Vjh7aMdbDwRvVcxm14GsFpB0Whh3QRvazG/UDKwsxERcs7N3W46zZRmMwel2N66gxrdx+Uncb5JHnKszljf3CBT0NA5zUZKSAJ1J8U1DO3VDv0rZ/lw2CNdXuTJ51LKJ1Z5+dWUC2tDfuapghf/V5/hcHKD7X3H0W3Dy7/b4MxnavT0OrvbP2DqFvGt5/jHGzkj1eVvfOUkv7C0yOb5mgZwUJf0rWeh08PZbZIkIcUwrTzOBWorGZo5zP2mlSAdeBynsgn3bAPnha6i5r2J/Hs9I9wioEkdoCbmiapZ7F+MTYy0kJ/gq/LoLBbjPWfvfUh9tEWipMTSSrhfXrgO84lsQMXeQ+yDnA/UyCIJIUT3dx2Hf1EuKisDo1XcYQCCJGUR+xWQh1SJUTXzRflgV0qUIpnlTiLGCNIPyfcNAa5ZR1FPWCoseXWDc2UbVVWocoT2FcXRR1C3mb2KZ03Fu/6AcTWitKX4CSMx1955Gq0WRTKmTCfY4S4N3ZJTsK7xtgY8PlhwjjQxXK736K2n5NMBq/tbNJpNrl/fZWEyoD0dkBwUXOhC3smopoFRo6DRX+AXnuqw2GnRXeyis4I/vKd5dX+IWk94rlmzsT3h1xf2cD9aZDr5EecWx6xmT3CiNeC9dMDBDw5Z9AXPXfos/ZVjeDTNoz2+cXMD9foGd441OZ3kGBRdAsPhlMcWE/beu8uiy2MuDbR9TrfOaY0aNNoNsiJHJRkmyQFH8BUNlfPEtMOWS5kxxIE5B1Dc4dR8OP3g/+W+z5ZFY28TNXP8+TjcmEdksQSlWT/3NM997e8/aOxh/jDNhk0zlCs8BPfBgx1B/SvH6IyuHfv4+HcPf5wZi11ex+zvZ/Xt/OOzxRaRu/hvH2aqhj/xiYqZTG1t/5C82RC+WelQYUgx+QNCSOevYZnA3x0cUA5HeC9WQ0maym6ZgKo17MopaidjTJ3OZ0/zdxnLNa0Vnau3ad5LMWnKr+4cSskRUtlNc9C12OB6G3BFmzQxrDmF/VEtMRfO4ULgLzr4igWrU86uZfw7ZYZ+BZomY2NUsR/ABLjQGHFp6CiHDu8DCZpisU3ZSfhlpfmK95y4m4HScWPx5NpgR2OO+RptLepWhRLCAyhIU0OaSOKYNiZqUuSJ8JGUuqI0A2dwYbZYHszjPu7P/8pdY/3iJ/FJwsc2LDwii8UrxW/9zf8jRZwz6AjVhhjbYGJ6lIllVxUCNggIEGJdGyJvy89OiNiTCFIiGL1zD/yLRZYc5qfF/MGPsXUBScCaJYUpFImwyEm0wYZAhRzjM5fX+c1R8VSsp9x9/ypnnngcbRKKvR1+4f3/HV/73/49pgsL8jWVID7vfOObfPDN7zAeHmCylKLZikbphqyVSzlpMnY/eIPMtGk0G8LSTXMUDl9NCMHSaeT8zCfP8cyTxyn6K/yDf/Yt9g438aoFyXFqUoYHUxINhVbcuF+TGMVLp7qYeooJ0i/lzYITJxaZFIv8znqT//AvXeS//cEhF19c4ImFPu9fm/DR+oR7u0f8p1+BwSt7DL+/zmK3Qwia/b9wls6lRe66imeaXWzeJNUJyiRUSpMNS9b/+A2+3znkQqPJ/rUfUuQJh4OayimOrbRYWe3Q7BY0Wk2anS7KZCg83o1ROsEkTW5M23w4bca5mmwcDxuto6KzaJipZ+U2RS6H9LAE0Ga+Ef6465FYLEop0CZGAYibvfMBlRh8lOZaL7oWpYIERCtJyvLMTJ2Z152yw+rYDD4UW5Ho+b9xM+p28GI+OcfkQ4zI9pGMKUZ/RikqZnkes3pYzQZCoE0kIMlNCgSshROXr+CLHBT4KD0OSos4K0hTafGEIqcCkkaXNM3nN9BkiQxnXQ0akt4y06MdAoqsWZAq8VKrVZAeRxtUkhISw7DUoCHvdPBJl/1Jl+37lv3dkqWFjONrmrTlGKk299KT/JXPrtDNE/r9Ju1ukzRNGdHgj795yN97w5O026wsdni222V0yXE4GGP3Kv7+Hzie6TXpX1ri1tMnqfImj53qkbdaPJtqOiohmIx7dcnpNKOBxpuArVKGWZu182dYv/4e1FMsmsHEYg5Kmr0GeRCftso7jIHUGFQssmpvOdmsueVg7NNIJYybZSSEBaLRiDLzj+nZCcWDRTSD9D/ueiQWCyFQe7AI6pQgU3nvBP5TSkztrI+quTB7k1KumUio9PFrCWFS9pmZ9gEEMZktAj+fDAfQDx3B8UST9FofF6pDRTInMDdJmFPEQ8xBnL8WZEdrNkhMwsy+9UHpJAzqJBFrV6MU9aREqYSsSEFFrlhiZOG6WuruckxrdQk72MNWE4IW50ZtZpR1Q7NRkGcGQsr2xpTRsMQbzbjSWHuIcSXt3HK4u011qFCrPZ574RyNaYd3yg6/erGN8Y4qwGg6ZXi4z2NZxfc/2GdlSVF9sMiN7hJP9zocrtXsbxp+/kSHVlbyR50Cn1WMEsMPX9lGhz0WmprLxzIWu4GnXcH3RzWbvub5EsqTOcZOKIPj2PnH2fjoVWrnqZxna29K1hjSbBe0u1oIpkBicqwV00KdZKTKcTYZ8X7dEzWpEcGePDMmiumiI89sM8RH0EdmafVMuvznoQwDIplQGmkX9QgeYevK8ahA6Xljr+IxOnOgB6GoqJkbZPBzDoOJjUk0RoqsgOhwaR54EcdDQ0434qIgxGmwn6NnkoYrDIPZUGUmY54pNEGQGR9z9mbJeAIdz/T2guwliQwos1ZDvJZtLQtBQXAOh4UgkRl6MqJz+jy7Vz8gQzEFKAypUWRZSrfToL/QpFQ93nz1R+xUHdpZwu3dA5pZwepqg3GlIWkwmAY6mWXwwTc5Co7r17t852tdmARWLnV54uwK/VaLy8tdfuHpK/hGyrFOh4Et0cHy/nCH3fsj/v5N6Br4antM/eYW9xuW6kdXKcdTmo+fRH/yAt/f9fz9TYXvdOmsLPDUp9ao748Y+yF7RwNOnjnP1bdfJVEarQyHo4r7W2M6nSFZbmh2GwQc3pXCrjAa0eiXnGjAzapiQhIzZFT0a/Pzg19uTqw0UFGhGpW0yInETBPzY66fNp/lJjBASL82hPBppdQi8D8A54CbwG+GEPZjdsvfA34JGAN/O4Tw6sd9/YAiRJmuV3NElVla7mzBEB9g/Sco+bMfBMxCTkxUSs5qUBXLJzkR5BQKgEn0PI1rruGfWTERJaoRQZP1pqL5xAxQQPqZ2BfNuUXxdT0wVZDvncSPNzTkqfDcUhXpNFpj0gRf1gQti8TaACqgjfgfYx1VVaONpXvmPMPN+9ijQ1Ro0ei3aLcbnDuxQqN3mnd/8AEbR0e4tEtotEnamnFlqMkpuv/f9v40WLL0vO/Efu9yzsk9777VXtW19L6g0Y2dAEFQFCURHFKrFfKMhhMx0njGmvAHS7IjJsL+NLbDipE9lkcTsuVRjDTUNgxSgEQQIAWQIIhu9L5UV3Xty6176255cz3bu/jDe/JWSUI3m4MGuhVRT8eNzpuZlfnec85z3mf7//+KVhQhGgX5uM+RToyxYErFnVtDTK5o1xZZO7HK2uwMTiuIFMv1Gn1nuGXh83NLPHsq5l/f2ePZYcahd24g3rxFlEqOrs6S9xYDN8E1xdE/dorfS3PEigGtKHTMNzLHn3msC29vsjUc8PiREziRYMpJANg5z93dCVFd0elE1BsxaIUtM1StgVJJxS1dEinJWpRyrWgdNJz/7YbJFOsyLRBFIgzbChHaAE7c6/q/l/1RdpYvee/v11j5G8Bve+//ayHE36h+/+vAHwdOVz/PEwSOnn//j/ZV53x66YekeUrUfL9ThIvQVSjC6vdpeHRAWlDFrFUla+pp7iAu9QevBQbXKcFB2Irvr64JApBxOkoR0pSq6VXlH8Jzj7R8WjmrdrHpdiVEaIwJAQ0Z6JF09W9Ka6k3EqbEfcaW4M29NZWBIEMJHxptJkOJgpnVZcrS0og0j549x+ryHFFZcvG7L3Pl9h3S9gp1HKocUY56lLsm5AM1yZ7WOBNC1zOzJ1g5scwcgq2dHQb7Od2kxZv7A47MdohR3EpT9r1lqd6kKDKsKXhYac49u8Ts+h63vzmmV2hE5vHXxzRETJ2EVqPFbGee02cE6V6OyXJaTDhau8S1vmI0GrItHTrWLCwfZuPyOwcJeZqW3N0asbPUoFGLaC9IakkdawxOx/fNFFpW1Jjbsh3y3uoUu4NQ+V65M7CHBj7lKvUPOc6BUMt7248Shn0V+GL1+H8gcCD/9er5f1Ax539fCDEjhFj13m+834c53MEFKQ6qGve2UFWNolSbLFqEaeRp/hCqHhWVUhXmhIs4VMiQVNSoVDtJdZepLu4wXkF1vEKIp6rdRAoQFSZCyNB7Cdox4W4k/D16pkiFdUofsBn3ejCOpIKtJkpUz90rXc8tzAUhoEpmToY5wIrHKYDNwqSDO/gbKMZE3lAOc175/bt0tODsSps0bvJmXmOxE9NoxIwLh3E1nEmpxYpOp8m48BS54dCaBlHS7Syw3G3xwtkup+cFhw43mUQlIwRHa3XWanXiKGYdwVzcpqYbLF7cQdYlqbRMopTo2ALxkUXqo5L9F65j5i32i3O8sv4G+7d3SS8OOfTEIv/ppzSPHx5yq38chGRSllhnOHT0KDfPv0WsgqamVILeIOfO9pil2QaN3GAjF5Cu1byw9BJncxrS0KCgTxyajtOrwE9JsHxVXQQvKwrg6mpylSzJ/cQXP8w+qLN44LdESA7+biVEtHyfA2wCy9XjA+WvyqaqYP+Gs9yv/NVdXgmket4dAL4EHLA/Vg9Rohp+rG4ASgpcFcKF+NMfYOxFVQ1z99HhuGrnCImemELfAzmGtUgBkaxUO7wPzjD9XUzzI/CyKlMDwnkiHZj51X0HdCpL7qrysgV0tTMejNncB3udW5xHxRGTUQGmBK1QUdCVd87jq8EOAaA8UtiKB9ojajVUU2KU5gpNnIvIyzH7NicqYZRZ1NiwfKiFxIeR9kGK8FBvzXN5MGF/Y4P5Xo1nZhVREnE0rrMwO0u73kAnddbLgmMqYjbP2bh1l3/+xjX2v3OB7lqXU8+fpnZslr02/KAzoW1zGrUxzWFG+q/f5eG/8lNcObVMx/c49GiT8yplbdLg+iAMxo6ykkmesnpoFSc0wjmUF0RKUZSWra0J/ZWU7lwNV0YIlWBtqALmkwk6jpDC0WHCSIRKoiREzeFQV2rSPiT6SsqDXdu5KYvP/UjcH24f1Fk+571fF0IsAd8UQly4/0XvvRfTLPsD2v3KX4cfftjHwlZVrCr88dVdv7pDSAjjHBVwy4gweu+pZPBkNVHsqcgtwmiKPsgfOKhWTXetAxk7b5EqOGakQu4UdFf8wXvxoScy7Uy6aQ1fhvDKVZJ7iNBzcRAuZimZUgFqH+5lpQ3TBsK7A4ddnJ1ldmmOcW87ID2rKl4sVMirDoYlLRJdObMAJ6o8TiF1hEUwmRTke33yvGBuvkPv2oDJMKV/J1C5Wh8uFlTMjZspxThjOC7J52aZW+zg+pLs2hYlFqxjXBrGRcHRxTbLs02K17ZpbmXMUmO+0WB86y4ucaTv9Ph8/QSHf/GTXHnyCL3JmLyh+Ha6zc7YoETJlT/IOPezS5w3c1zavFoJGVn6kwmH55ZpzXQpd3eROIQNeJa9/THbexMWFurUm0E/U0sFEiajnNZMuLE0xQQp5sJxmvLOVZGDdy6Qy3tXOch0X7n3ng/FWbz369X/t4QQv0aQmrg7Da+EEKvAVvX2qfLX1O5XBfuhJoAaIpRSCWXYKY2pmIZN6h6zi3TiQMtwOjqPD9vrlJhPVkl1VO1U02bkQUuGaU5SBXreIQ9wlSHUUzKQ5XmmBYepAwSuKVuBzKosJqAvfaV9WRUZ/BR7gyeq/l4nAlxgGnJKH6pyh08cZ+PS5eq7VOifFAW2CBgXrTXeVwyM3v2b1RshMDaEs6OdAXNzLVSjxd7dgqzQiKSDQGKFqna0wMaYDXKaC3UWrUIKy6U7O4xHhtGNEc54RK2JbDSZXZjhs6dXWNjos/r5J7nzLy7gBgXitYKGFxjlaC/NsfzTj7JlPN9fbnPzlmewmVH2c2ae6vK5n5rjey/uM95v8InZGpfLPEh2eNja3+fo4hpLhw5zY2eHSEpiHTr33jky48hzgykKqNXCn+w93fkOiBCaNkURZBOr8yPvm+hQMkDCEb5SZ6hCZyEqrVLevyPJB9OUbALSez+sHv8s8H/mnsLXf82/q/z1nwshfpWQ2Pf/sHwFoCSwUQZnqfokVeKN9yhrqyZh9cczHe2vpk2dq4gLgvNMNRuVCKGTcIFPLDSkqpCvSvIFVGTj4qACN62iUTWylFSU1h5UW+4vIBiqMj0eW1EnWVMNclYCTLgw5uGB1DlSW+VI+IO/Ze7YEVSthk1TtJLIiqU/VO0CRkdKGVgaXXBEHSmU1FgDIhKUpSHLCho1TVIWzM1ohsMxrZUOSRJhnSdyHll6SBqkucFkKbY/4XBjhnNPn+PibsHVsh+qYM0mUbPBF545xJfOznLpv/0222++y+LxRbL6mPHuiD32UXXFaG6GnfGAq6khqsfIO47JsI6KFPsvZdxauMlPLUecOqbYGQr66QAIjeY7vX3wlqMnj3H51dfR3oZelKqKKAJs6SiLe1Lk9/e+PALtMqR34YYCARMkpsUZQLiDfNhVSpnhZks1nfyjJ/jLwK9VyY8G/pH3/jeFED8A/okQ4leAG8Cfrd7/Lwll48uE0vFf/sO+wAO5C5UeLav6xDS/CPxHgSnSTxtLDm9dkDYDjLFVAicOSMSnjiREGFGRohKrcT5cbFVjsNoYqpyEqodSJYKVozgC9NgA03kJW13s0ybk9DALEcJEISr9eOErld/gLPjKobivDyYFIGnOzNCZmadX3iHcNCpGfaZ3QX/fblhVcmwYJsULnNd41UDWc4jrGK0Y9nNMaRlmKWNZ4pyjEUXMdCKajYTJHUfpYkzdMewPUL0Bd7c1taOriEYDH0XEruCtK7c4HW0xPNZjwAY6ucPcp5/g2KlHma918CPDNW7zQl7gMJxzkus3NnmqK1gRkrIPb6Utzn12hb5rskYJXlQ9Lcn2YIR1huW1JWQcI0yOdoHtU8gQTqtIVR2CaWOxOnE+9Fy0sChvMZVI1LRpPJ3cnt4I77lE+JyAhL2vavYe9kE0Ja8CT/6Q53eBL/+Q5z3wv/nDPvd+k0BTyWo7FGglKo6uKuypwhhR7SBCgNAVY2TVoxBV6KVElVv46WdPubimiXJFp1qVEQIyEqjCIQHgghioF7JKzsN7XJUPGecOkkhZOY0UoZrip2MnzlaQ5Hula1UNA06nnqfJ/XT+oJ7ErBw/Tm/nLtaUQWbC398om4aMAWbtkXgXQAMIwaPPf5rO3BLf+Gf/nH5vQmEF44ENR7hviJsJmZKMjGWCZs57Wstd6llJuttnu2l55/zb9HpjxMgHGO9Kg7OriuMLQ4pBwkOfajL/J+a40fgSTi7RTh2Xvvk2jcY+20tj1scrbFwY8WJP8QsPLfHJzQGNJEarGp/YGLLzxh2uDtvQjMN0hnNY6xhmKYUxtJptuktL9NdvIqrxFWfDgGVeBBJ26yzKOLysGIBUEJnSAurSk7tAU+Uq3uwQHcjpPZFQ7fRwcBUEZiHxIewsP3ZTAlqqAjxVNEfTAcqgiuUPLi6hOaBRDfEsB6yVcF9Ppmoy2umdxEPQ7KgYD8W0nzIdrZ8mzOHubQ7CqnuNLDxMFW4lYecylXyfhWqEJmxRWgXJPwgKZgBFNSlg/XQ8ZdrVD7tnTUccP3eaS6+8TOlyqPoB0oXTLJVCVUFEyFtEQP4JyZlPPMen/tiXufHuLSInsQhGE4v3FrzCZpDfKRFdRdSMmK/XcEqRDVKU2afZkIhC0zm2QnxUUmxGzB+e4dCzs/zxhT5z4h2G44zxcMLLd4b8weDbnOjMkt1oo98acOKvfY5stMcTh5bYGEx44qGET75+iezyHttJwehUHTsa8MTJeT69FnNns+RaklCWJd4LJqVjnKXUO3VWDh9i98464NBaMSkLJmlZAfJCJCFliUJhjCOK46q56DjTbvL2qGRc9VGmmj0hlXUBciGnFMDTsjIHSf772cfCWQBiWY2UAOCrst80pCLEK1IgqjLqPUkIqBom1ajM9ACooCQGFaS4SqarMSBJxaJyADkO2P4pFehB05F7LO3ivi7/Qdmx0oqUUhzI5yECZ4AQVaO0GmtRBICSp4I3i6q86UPj03lYPbLG0tohbl+6ENTDhMBjDob/gvqYCvr0EmSU8NTnvsBzX/kiSkmG4wGNlSWyq1eY7SqkbtDbL8kzi3OCw3OzzHYSaEQMjcNGHi9mGeVQeMPg9h1EIyEfa8zlHnvbl7neUbSbnidXLMeXTnD8qOeReINO9wzm9Elulle4/duXmAz3OPpYn6V3NpGbm3zLFhz57BpcyGjcsqwNGnTXTrG9s4Gaa9IathiNRnjvKA3sjUcsdOdZPbLKqy96IqVR0lJax2hisKXDFKaa9QuOI5XG+UB8aJ2jJhznOl3eHvbJ7b0JZEkYqNRKVJdLOMdKyTAiM6UJfh9/+Vg4i/NQVH2Ogy3CgZfuYDhSCYG8D0ttQrQT7tLVPzM+pG0WHyhTRcA0Tkfop0UDV+0oUoRd5l6y7g7kLRTigHxaVDnMtDOPt2gVBeodEU7ClDJ22ly91/OpGF3sfdOwQhzkXoKK+Lwi+0PAU5//DBvXr1MWxUEhwgPehtADFN5LVL3FF7/6J3joqUeDYJLztBt1sBDNz7O3vc/KUgxRydZWTn2hgSajPy64tT7EIfGxZnlpEbCsv3MXqRXiqCeZa2P3YPBuyUOfafOp59fYVC3qXUdLjkjUIW6lh9gtJpz86SO0Ikl2o05rrsUvHSvYv7nH/MMrtFSDfqlor3uElly5e5d0O6PbjlBKYg6ofARb/QFnVj0rq0uoOMbnGV4KlI4ZTQoGw5yF+UYlUhT+zZSY0atKeMobZmsRp32DN/Z6gVaryNBS0ak38PfVOwVUGqRU/a/ptfHD7WPhLBCqGcbZKrwBql0jjFF7LJUyVhXCW+/x0630PlJooEI3Vlg5cY9Mb9o/mfqjFyHBdJUsRThg8j5nEtW/qypWEryXFZeVq3D4tipRVmGVC3kWVZ9kWmSYypCHcM6BuzfbFmldieqEHWzt5BG+9NU/xe997evkg17gHsNUui0hC4uaLX7xP/5LLB9dYzq86b3n5voGGzfv0lkuWTnWoN5ukbQt3RlP4TWT3CGVoBZ70twiRUyRTShSqC0ukSwf4uzZeZKTTdZvOfp7ArcYc/duybHFlJfOF7z61i7z6hInVm9x7sgJdgtBltQQMczoFn5/j+ZOm9o7CoY5h547yi29zdvPn+JTJ2Zon9/hir/O+eEOne4KpQn5XW8yxnlDo5HQnp1lsLVJFGmivGCSOnqDjDQtiYuSpJaEq6O6QUkkUmmsGZOoZZbrDRbFHX7tlR+wtb5Be7bDp5/7KRZasyFcFoGQZJSNMc5gvGcuTzGlec9r9GPhLGFnCNhoV/UepmPq+MBOWU4lBqp8BVF19/10bCX0HoSc3umnW3XAs0xH6qeDjX7a5xBBvVhUr+M9qioWWF8lgOre+MxBbHswWRAqLtMQzB8UJqYSFdNByrCzHeQ9YjpGEzrL08aq8KEsfubpx1g7foQXv/ttbr/1DsPdHs5ZisJghOSZLz4Cs0HBOYzVCF584SKvvHKN2mIHwR6TzJGOC+pzdeoNjRtKsrGmvRDz0FHL+rWc3tAzsClHVtfw5ZD+rfO8cSEL5ORKIqRm+3jMHZFR767y2JEuv/j8GV7d0lzIND93dMB86zCTNOH331jn5r96CX+rT9QtMLZPcqbFypGUpV9+jm/egd+9OuLPfbJLNLPKN//hP6TRXARCz+ru/hDrDUpErK0dord1F+EdOo7IsoKt3YyVpQxdV9QSjVICITVChKawUgJXDoIwlBA8tnyMd5ovcXXzKuVkjmvr18mXDcvNWa5t3+HSpfP0d+/iTR5UC969dY+N/4fYx8JZPAH1GMKYcBFa5wPnbUg6QnIu7lWXwnUbdoBQgpxy3YbsZBr+HFQ/Kok9Vx3I+ylhD6aU/b3BSu85aHoe4GBU1aCsGqKqIuCblpnDB4bPnR7yad5iq7wl9H6mozv3pp2tt5TWsDfcweNYaC9xc3yXXbnJ0qcP091bZvPdG8gsY+noCqNmymsXf8Dnn/ppItXAWcv61jbleId2rQfU0VGDYc/jZY3NwT7NqMNkp4cQJXEtRjQNi506qRkzuLNB586YE+cO027UUfsFo9l5LhQ1Pvv5NtnaLM8szTEv3+EWR2nkKVvDdV6/fJNc73PNzdPOSo4eXeblmyNGKwvky3P8b3/pcU4eSri2NSSbWGrHWxx6eJmNdcuxtSWKokQnMR5Pf5JTGEM9jjl87BCvv/ZqIIU34ToYDAu293LqNU2rWadW93hv8dU5RWicHSFFdf5UxH/w2T/J3qDP777wFi/85r9k4cgyR08/yvlXrrB/4ypKemqtGCcs2XCf9xtE+dg4i6l2inCHCIrD02FDVSXCU1obKe6VAadKtYLQexFVY1FUYki+cgBf7Vy6UrDVShzsXFOHkFUzc9qjCS2QqWOFcqMUEiuDAxhrmRQTInXvMDpvSWSMrgoHogqblJBVLX8KVw7O7bzjVm+Li1cukI37jHu3ac/MMH/sMW5fvc5oYwuzvIarN3FzilgkTPyIYv0ms08+TSw14Ll7d4+bdzepr8wjCon2Q7zbYGFthV6uiZIZap0unZMxO+evYUaSWssjZjrMNTqgHM2HmvSu3WRgBJGV1Nu7zEU13vhGE32yzu2yYGc4YC+7zNFmh8UTmu+UKyTdOZ5v1vHDu/RnW7x8+gwuqTEz22Bjfx+V57TXTnB2PuNsXZPuOs7OJ1w5foaLd8Mgu/cwLkoG6ZhG0mRtbQWV1LDZBOUFWirGac7W/oROK2J2NqPRqqPjoDCgtDoIba0tECLwFCRxg7/0lT9Nfz/lpdcvMti6yzv7Q/ZvZZhJjosUUd0ilKfMy/e9Tj8mzlI1/6rE7Z5zh/JxYERXWG/Cxe6qqtl9/RSqPswUHRnQcfd15EXg1BJMG5vVnV39m2P9obRsD+aKrLMMiyFbvXW01KzMHcY4S5oPGaYjLly+SBTXw7qsQSjBQyceZa7Zpa5rKKEOpMantX1flZB3Rvvc2b7Dmy9/n96tm5RZjk48/WSP4ciT9QeY0lFMRqA11tQwriSODbXY0awnlYqA43tvXGa0M6Yoc2bbCb7YZZxLakqyf23A8mIHkw6gcIhah3EhGe8AOyk6hnai6WhPo4yQQqKkQw4zDktHY+I497mTzK2ts9V8DNk5wxJLDMc9zizVeXHzCnPpVfJOm69fGaFlgbl9k2NRlxNzZzl6/AxOa365YemZAj/u88bQMzj2KLr3B4EE3kHpPP1JxlLb0W63mJ2dY3tjTKQkpXQUCLb3JnQbCYvzNZqdAhXpKo+jCo0NzhUoHR1MWLTqXX7lq3+Gfvr32djdwtsxUB6E19YE/ZtiUr5vE/9j4SwAUxRbGJWuuq82XFRKVWPwngNHmTqBlNN5ruAgokq6qXo2UwGkUEEKOoNTDxNwgD/x+IMBzjQfIqSisI5rd94lG+8y2NnGlgXX55YpJ7u4fIQXCXdvbmLyAlREvdGgNAU7t2+QNFqcOvckp5ZP0IwS+vkYTIZznot3brCzs86ta1cZbe/Qu30TXwZivjiWmHzI1oXzRPUuJjV418O6PeLGPJOhQYicLMvYunOFN6M6k1JzdWeL5lKbRl6jzCfIKGG2Abghiw8dxe1M0HWNAo4/scZkdwCFIWnE1JsasTsh3e6hhKXRaVOzksh4YqmIZ2vIWsHuoIHvbTIc3+R3rxQUusNnHzmCaETsNjx58S5/+RHD+egkRfxpmtEccnmGoffsFx6rFQ0ZEYka417M23cLnpiZZXPQR0iQzrHdH3BuTYGQnDh5nPX1WySArvR1xmPDzjBjdz+j2clQkSDSgQ4poF4ttpygo3sKYd7DYneF/+yX/wL/zT/5e2zt7dCaV4y3PcU4CL0ezFF93KthgoCddlN8irN4z0HjyNkwB6QP8O3VYGSlCIYMZeXwfBWiqfDG6V3cu6DzMcjHREpR1zFZmR+EWjvDbbSMiZXm6tWXcEgGE0//9g3i2FJkGcZkbFy7TlKPSZJQQMjHGa60IAtsnqKTiHywz7i3TZFNGBzfZqY9z9Vr77BYlnzK5Fy59A63hhnewWhrj9HGAKUFtRlNLkBHgnyUkfbSoNNSJkgF6X6KjGJsZsAJ1q9eZHtjh7T+OJO0ZL6WUFpLJFsMJ6uUAprxUDrTHwAAN8FJREFUhE5NkbeXKCzUnSWaTJifCTIOYInrmqJic8xFhE40vXFOb7cPnQY1PeHC799kYe0YcbPOvp+w9swaJ2bX+IMXe7S7HX72zFFac3dYWVGcbqe8ld/mlZv7/N++WedLn17jy4stvIzhzoQbNcvSLHyh1KyYw9wd9QP5nYRbu3tBGU0qTp06yu/9bjiPWmuUMuS5oT/K2R+WzAxSGs0ERRUJqBB22zKtQuywo1cdKo4tneA/+YU/y9/+1f8f43SMqoEfh/y4loTJ5fezj4WzQOhLhDH8io0lxGX3hiGrKtm0ECVFaOyV1pBojSFwI2cmR1c7ipaS3JZc2rhGf7+HSfuMBj1UnFCvNzGmpCwKtBRkgz1E0qJIS4r+JnGsGPZKyrRE6hJrHCpWYD3ZKKXZ0UQ1gSst2cgABUoLrIkCNkY59m9fZe/WDcpc4F2ObyrKPGN0d53+bkZZeqT0RHWFzS0mDU23fFzgMk/ciXDWMdlJUYnCO0nSAp04TJEw6Vui+gLaRXTbXbLRmNG4oNNtUlOdoJ9YdkjLkNO50jPcGDIzFyNkHbwBVWeyOWCYOR5+5iTLcwvEzmDwvNNzXBvCKBK05uDM0S6zS2u8lvawMfjeOsc7KY1Oi7v9PfrJUfyNAUkr4fT8Mn5lkV5/j1mTE4lZlHOsv36TF47XObxcZ20G6n4eoTTCluChN0oP+i4LCwu0Wm3MaIB3kERhmDUd5ezvp4xnE9JxhpK10GDUYSjTmiFVY+wA8hFSU88jR57gL/3cL/B3fvV/IplxmBRcGSqVB9in97CPhbOEKaepMrCpYuaK1alqGApgUuZs7u+QxDGz9Q6Xbr/LoL9HUm/SanVoas3W7t3wGTphtjtPb7DPrctXMOMRNt0kiiRaewYubNtFnjPp5wgZ0VhcQcgYqRvk2YQiLzHGYcY53mlqLUhqgslgQlyrI6RExzE69hQTi8ksQmlKCnQsscaTjgtcVhDVI1JvcMbS39xhuDPBY2nO1Ug6Gls6dKRRkUcIS+5Ca1TrGsYUuAJcmZGLAqESTDZC1dtkZo58NGZ3exetNKrbwOkE6yTOG3RNs9sbMNtuIrUjXqix9e4mjQS6CzMkbY2crdOZabCdTti49C7emkCTKiVJ6tjeTskS6N8STEyDkyfr7PRbxCci4qjNu7csR8shP3XkCOJGk/RYm2ymReFT/spnTiBwvLq9y/FbY7g15NB8wrd7e9h6g//8qbmwK8ggxTHMDVmR0ag1SZKIw4cPcfl8H3+fIkJpHINxyXBUMupnRJFCK4U1hijSmGKAMQVaxwdFnoOQDHji1NMsH3qJu1sXEFKS75fUWhqt78+B/137WDgLBOI654JO5LR3gRdkpmCSTRhmY26tX+Pu1Ut0Vw9RqzUZ9nbYu3YDT05nfgHhStLRfhDvtBFO1cF50sE+UQRxzeKMw0kBaLI0C+CtOEHFbcqspN6UCOVB15CRpNkQlHlgqjKlwSNozzRJGhoZCXScoGPJfpbjFSgNSntMYdCJxDszbepg8jA0ONoa4CwkXY0zhjIz6EgGeiNAxYKkW/VtjMVT4rKQkKpIgTM4XzI7+xj7uWB4rUe+NUaWBaqr8cdmaM53gsNIjc0yCh2R1GK8EtSOzrN/p8dwfUijXaBrCucinj9zmLa3RJFCuoqIO4r53rUxu05jVpqcemSer545zNYoZ6+f8eqbA1CemVrOG+/ewB5apHl8FltLyJzlX75xmSv9gq2sxl9/dBU3usCjMx1e7CsaUYvvbvXZ3DDML4ATktxYJnlBPWkggEcfPcel8xeYYpq0DOjJ0aRknFmy3JKnJfVY43W4wY4nfV576wWef+xZEl2jyvSZ8lUP0hHzh0/S2+uzt3sXkxvSfrjZve81+uN0gA9qWVkwyTOSKMZaS1aO2d7bZXNrncH+FpP9feIkZrJ3F1OWiKJgYzhC15oUaYF3E0ZsoKQhSwvimqAYefLxVmCzJ3T6nBHkJdhSo7RHKEGRGfLU020Lenc2yZSm1tREtQitgwisEJpmJ9AyTUa2kr/QpIOcqDadIvBBKNUZPBqdgNSGzqKkHEO3qzmaVERxsUAWAbxly4BJMaUl75dEzRhnSpQWJN0QegkZUYwduu6JWx6TGUTUphTL1ISjvtQma0Z4BYMb+0x2U3Q9CQjQJMKrmL0rA6L5iKQR0ew2KKWjITV6PCEvMmSz4Oq2ZXm2iUgNXoaypCihO6/ZutontT2YHfDWZIt6LSaiwTNLgr0i4fpGjU9/7jgvTGJee2GLk5v7dM/MsH5XcrtoUnc5I/EK5/5qh2uTBp9qd+kbxXdvjul1jtMuLyMIw6Zbgz6L3Rmc9xw9ukatUScdWrTwxEqSCcmkMAzTkklmaRWGsjDESQBuN7TiOy9doLCCn3ryk8Q6OZjgEEKwOxlhPcwdOkO+H+HLjHSUU07U+20sHw9nScdDXnzje6ATbDoiH2ww3Noi7Q8psxKlNVpLkB4ZKSZDC8ZQDEbVfE+ByXOcIqhkKYVMPH5sECLkFvfvr/nE431J0lDEsaRMJ/Q3UyY7BbV2go4SbJlRFg6Jrg6kZG65gY4845FDKUU6zHHWkU8ynHMkSYxJS5w1KB0q2/VuRG0uhFemDDlYvSWJRpD1M3QSI4Sj1olRHYGOFUXqEbHA5R4VSYSW1Do1ZM0gI4c0HuO7ZH3H4aMNrJlQJnUKD0UnZzzOyIzD5AXRZord6qGSOuVGDosKYyy2VIylY75RZ2FGsDy/yZ004pWtBYq7guXTXY6c7XK222UkJCc+U2Ol2eKZhVl2rGc5SriRp3zr+i61HcnSmYKLPufocsypI4d4zkaka/N8szfgxlsb/PLTKU+v3uBmucC7kxp6Zp693oiJT5hdXqC2e4PMFngEW4MhjxCOX6PZ4OixI1x95yK2LJESIuFx1pIXliw3lMZjHAGX7wRxEjEfe772B+fptFp88szjAT3pA35lsz+itNDoNDj7mUcYpo47WxlJohGXr7zndfqxcBZvCjbf/gOMsfjSENVUYLG3jvHumLgeETerMeyyQIgcqUOZ13mPjhxSqeAopSOzLiR7HrJRgS4FUU3ijKDRkUEEZwLeWUwSCCmFAF8GSHM+KcKksFJkeYozliLTCNEiSjwqK8kmPbLRBJNp8jSj1o4h9jQaMdaUWOuIazHOCvq9EqklZT/FOU+tIUnQWOswuUEYT20xYeV4jVpdhfLoriXPocyKUFSYFPhhiYokKlaI9gLpIMcNFZNxgRcgtWTh9AL2+h6xSrCbKYPNgEb0WYoUjuzyPiJJ0DNdVk8tcW5uSK4ln3jkKxzVdfI3CjbUkNpCwqOHF5hrarA5q3FE31n27YhbRcmsrvPy5g6+Z3j9uuOzu5s8mVuWnjzO+IUNtj34Z9e43upSm2ny2LGMJNY8lHSYFwO+28t4rKF5ySt+cbHkpc0CoQMF7sbefpi0qJCQp08/xJV3LgaZdgIy1hgYjQuKooIbWxumPGQg+Ti20OT713f4rZff5uzho3Sbc4DE2YKt4TD0V7yncJ6xUZRREyOS971OPxbO4qxjtDtAJwohHGUusZknHwdK1KgeYVKLSU14T2yotSKkFhQ9i9QehA2bh7Rk+xabebw11GYSlFY4G2a0HKBrHixh664rpPZ4J5GxRSqNjBzZfoFOIuK6hLrEZJ4szap6vKfWktg8ohineFeg6oKkIYCEVlPjfEEcA04wvFtirKWWWZz17O8UTHKByQzOOPRsRLurEUohpWBmTtPuCu5uejZuFBQDi81yrClJ2jWS9jLza2cxw5TSFiT1iOJ2n2JnjGtHLKy2Ge+lpFsThBPgSqwTQV7PA2mGyVImc56bogCp+M2XriNiTborQTdYXJihUa9zamaGs3HMnit5JG4ghGB7NKQpYp6fEbxlRzyyN2L22oR4FLPz7cvYmYTVrz7OhSdWWd0es2Ik11tz5LbLYTHLzdE+K3rCxckyn5uZ8KUTTe5e6XApG+K9p5emWFcgVYL3cPKhE6GHNRrinAy7hLXkuSPNS8oioUgtpm4DrCGJOXtqhfKNkksbOf/0O9/lL375y7RqbTJb0KsigemIeTu2ZBGoKbHbe9jHwlm8B1sGJo+4EYgFitxQZAWusEx2RtixR0QKPERRKBHmY4PNDQiNjsEaF6hQhWW8NwkDd50YoWCyXyKEII8EtZaGmkEllepTJlCxoD4XYVJDNjIUowLwxM0EfMDJZJlBSpj0cspagooVdgBRQ1WlSUOtBjqGOAky40kCXkr27kC2leNcBSt2EjMxVWdUkU4gtwWNmkTHUIuh2ZTUEhj1x7jcopoahMTqk5jU0tUeo2PyrMTHMbQccq2JlIJaVNJZjhnugppbZl7BeGeHkS/RC0sk8/OMJiP213eYebjO2uoCsfGcXIFBXpBvrPPduzd5SXoi77ESuvUa3XqDkRQIF1Fc3OXc2iL59R12mo69x1qs0WRxGOEv9dCrdTY3BYMiob/R5+ee6PKPr+eIoeUXHtI8Ee1ydqmGRLHbG0MTsLA/TpkUY7pRgpSCZrPG8ZMnuPLmWwhRYYcE5FlJVljy6qbjvcfYIEy1NlNjZbbBraHl99/Zptv8ff6Dz32RcZEzTPNAlVXaA2zTQkfQaav3BYB9PJzFOPJeis1tuDhlaCrGNYlwOrCYJA4VS2QcJnwnwwIsxC1Nra3wVlJmoTmVdGC0EZJuZy0mE9jUoRuKMpXk/QJnLLUZTZmVJK2IbGDIB7Yi2gZd9+hmifWWYhCD1RhjwXgm2xOSLoGQO4K4rUFCaQIZRCwVZQlF6nFNkDqomPlAv0K5n1P6e6CwrF/Qv5sxf7RONjGQCmxTUKt7Wl3JTkVn6hzYCcQL86g0ZXL1LqzVkfUIOdfA1OPAAawkqhUjUgHbE0j2OfXwGs8ee45J3/B7r93mxt4e7cUuS8tH2M5G3Ly0ThJpjq7MMzNXpxVFZIVjpdOg1dDMNBUd5fHGcOfNDe6+sEk7blBuGrQUmM+fIH70OIN37xJllqVPneUylpVDBfWru8yLFsdrS/zi0SFXbuYwyOmurlKvKwb9HqvtOtvFCKQkLSyDSUq3UckZSnjsyUe59ObbKCkDk750GARZ4RgXllFaUs+KUPFzniiSfPHxOf7Z1ZLStvnWhW0WZ1/jyOoyeWkrLgZRKb+FgCEq848/+Ms7FzDQUmALg6xbdF0gdY16UsN7A85RTBzem4AviQJRQzYsSHtQjgMWxJocXVOgBXEnIapFCG3Jxx5beJKWZ7SbY1JHMSmQkSBpRQGIoj21GYmOBFkaUW+HERqXS7Rqkg76QUG4yMkGPgCuhCEfQdSSJC1JLh1RXMMZx2QAKlJIlYAoUUkoNJjSIJM6KpHYssSPS9JBzqAnsZmhNJbuQovFtRqmzO8hJsclrVOnWDhxCP37l5gUnu0bOUWUsrhaY/7QAkfmVnm7nzPc2Ge0OYK4SWkt37u+wYvX7rDUiDh5YpnBbBel64zGQ86omCMzbfyTRyi8oMg93z+/g1cx5vYEpSU/+/ASn3Q50XaJGHcYDHuAxUYTxKkOyYs3ePbhh9j/0hNcznJ+483b3Ly0zenHl+i2FtmeTCjMbcaR5rlHj+J7uyxoQ57ntJp1lA9/n2zFlMax2RtwaD4A4qzzrB1ZpT03y2hnl0gKEqWYGEOeWyZplejnJaY0uIoX4dlDNb65CRMXY5KIX/v+BU6s3cYUNiBkXTUB4u81xWtaved1+rFwFgTohsaXjnyY0egIVKzBlIx2c1RS9Re0hxLKiUHEEh1rVEtRjquybn8ERYmdSYjqQbph0i+ImxFxPcHlUIwgbiZEDUs2zLClxPlQhbI1G0i4VYBWFilEsaY5G6OQ9O9mFP0CVxZ4Y7ASIHBw4SOUipDKIWc8MnbEDY+MLGVqGe6OmE3CrLQvPE5ZhAJRhrk0VVPoRKJEhJ8IvNWUuSJp1Wgu1ZjsWLwRqCTB7g9xu2MmDoa2ZO3UAlFdsr2+x+b1bcaFQtTn6Z45QTkYk+/3cJ0ca1M2ipLexgaPzs/REIrd7Zz969fpTcbEFy7RfmqV9fWU3p0chELogPG/E+1zeVaw850bAYbdEbiJYU7Xmb0Da3/iLLId8b2X3uD3X75Or9dg9tQZrm1ErKY7PPTsDK8NW/S2M6jvk168jTw/5PCff5x0uUWeljQkTCqk6c5wFC4MEWYCIx1x+txpXvveLsqJip/dM0wLukVCmjvGw4JmoySpl0R1xUzkOdyUXBh6okYNl6zw7vWb1Fo14kYCThzAw3EBNPg+G8vHw1mEEnhTUI6KigyihnAOYybY3ONNTOkD/2+ZlmAljeU6zZlQTnZtaM7XGW1bRptBBluoQPDgjaAcl+T7ObVmEzOyCC2RsaexFEZAXFkQ1QIga9IztBYjkpbAlZ4itSjhiGueRrdOMShwuUE1NO3DbWxumeyU4BWudJjUk6YFkVbUGoIoCojKRlejjEEoSGYTyC1KaYxzYa1SoLUgyywyjvAoyswhZMzMsRmKbIAdCxrzszTHJelcHaskjd1qFCYrSBKNiyVKG9LRLQaXR9DqUj9yDDnfoa7HTNQAOxzw5m6P2d0BTz9+jAV1jFYn5k7dcb4ouHWtj8/DBK5XErTklVfGtJ9vsvy5BSbnCxaPzlHeGaFv5wx8wbu+5FtvXaQp5xh2H+fsoZjP7m9R/81bNJ45y9IIXtg5z7EzK2zcrHN3Y0Ln/G12/tY+O3/5M3zhmXOML77LuknJSsPtvX7Aqnh1AAQ89/g5XnvhJbyxxDpC5ZYsK8lyx2hc0k4UZWmxpkTbiCjSPDYHV7Iwwl+oGn52ATPaC2NUUXIAzvPiPtj3e9jHwlmkFoDFy8BE4r1GakOcSOKGwHmJSQV5r6Q5L9ANj04CDWctURjh8MqGqd1WiNtNEWhOvTQUkxKXezIKKBWNpEaZj0P1w3smvZyorolqMYiSfGzQsaRIw3xZljriKKLWbBIlY8oBgYs4kchYoMceFcswOas9RVqS2YJWp4mKNHmeo+sKXZPVSQpDoUIpGJRhENN7itSRjxxxQ+JdyWTkg8PFkririFfmOfroWfLRkJmzY5q+xq5sEvVyFo538EYwGI6xAtr1Fq1Wwu6dffovfx/VbPCFX/4ETx0/jpSSXSRvvLrL3VsbJBKe/Kkn+eS5QzSyEU9/uuS3Xu9hneVQC3722G0eXt2iU8uo1+vIX/oiDs/uDc/t33mX69vb2ENznOx0efc763zy1Ss8u1DnzNIaI5Gw/Tsv01h9kj/zlaOI4iL94zP0knl6371OIuu8s51z6E6fre27yPkOznvGhcV5i/Ah6VZSsri0yMrRQ9y5fA3pHVo6ihwG45xaominlvG4pNE0RDWHVJ5z84p/teUobUjmRXsmoG5HPeLGFPdUcRz4fw86+OBpLQnSnsCMBdYU5ClhZ3ACnWhkDLoTYliVOISwFCOHdIEzy9iUuGHI9g2uVHjjsRjiToTPHTKRxHWNlRWphIJs32FLgWpInINiUqLiKSOlwpUG6pKo7ijyjPHOGFdYhFJBiSyzqCSiPhNhigJvPM4K8qHHGlDSEdcl4/2M8Y6F/QJnPXk/RXRbxI0GqR+jhMQYSz4uKSYFHotKwGuHUY6oDvMnG7SSYyTFhDEjdmnR1WNqD69Qvnib3Zd2OPvkYb78udPoqMlOf8jV7R5v6YTRfoFWkjffuMztiyVnHj4M7RmG7Yy1x1foDjO+8Vu/T/5qiyuxJRI11IZHa83cnMJNrvDmlRFdURCJbS7svsvAzfL06U+jigxfV4xu7XDlB1f5uf0Jx3cE3eePcO2N6zDJqScaarvsp6eI5DFuRU0aUUraFSzWBX/+ZBf3co90A1wriEeN0pw0z2jWNELIABVWkocff5S7129gC4sWgtwZsqwkzQ39UcbMJKZbJCTGoKKIhRp0pWDbVDwLUiIXFvBY8mGfqB5m/LwIaNz3s4+HsziPMR6PoshLpAOIwgGpxUgpKFODM1AWHltIojq43DExBhkZEB4dQX0uwZSB/CFOQgNPaignENUC4VyejXC2xBWh3Oy9CRPNXlKOHVETkoYD4cjHJUlXkdQS9m+NyAYZcSsCJXClQycClVQOiKAcBVb/uCbJJ4bRYIzSCpNbXDa9hUnKscGMR2Hb1yqUr5XEFRIjIiZSo2ONGEfoqAE+wpgZZhEMM836bg6HjgE1xOMr+Nt7vPWDW1y9s8/iWsHR1aN85vHTXEuOMjt/lsX6hM+faiIGAy7tZ9zYXCdpdfj+1jXm221aMy3GXmPHmtnDLVaXNEt5g++sj+noR3n0XJuBlzwqbrJyZAOHoNFukP70o+Tnr3BoZoG7Mqfe3+Fm01P73tvUl+fYuT1i8Zcf5sJqzvde2OKXTw04uzxD6/Ae22dnmH9khuOP72Ae/TR/9tUV/ue3f0AuHHnp6Kc5rVqz4lMIIL0Tp47zvXoDZ4YVx5wkLwyDSU6sBMNxQZoW1JolURzTqCesNQz7XuJERToiJGZmEdIcP8nCaJCa6sK9t31Q5a8Z4O8BjxFO938MXOTDUv5yUI7DhepCe4M8N6AlSktMAS6fTo4GalRXKOJ6xHg4OWgmuSiuYk6HbnqEDE5U61q8kJTWYB2YoiBuS6z22LHBZgZnFXEjwjqDFoHwz5WBIEOgUCKhGJZBM907tIzCmhoWKQELIlI448HGZPmYTqtOp9WgphMGd3ZAFoDAixqeBqpRp96tg5IkjQYFMLvSQtVrTHKHjyJEUiN1gFAcWWoy2hqTLB3hUJJiUwfXN/FnluCowlnDeJTRkoIXS8eV87do9sasiYLHHznB08dOsNRSrKQZvQtXyW7vMr+yyOxI89rbNzDjCaIs2Gsq4llJNLGMdkpeXmhw/eWIzmqbf3Z5F1fTfPYx+ORjOXXpiTqS7NoddH8Dh6MmNTaTLJ86xNawwX8/mEW/MmA52uPEoXm+fskgZMLewhLnHlXcmkTEF25jyIhiyG0Qu90ZDlmbmQs8BRXKtNmqcfj4cS69/jpB4SCw8KeZYRCV7A8LZgYZzXaDpOmIgGNtwfmJRQqF1oFMxGsBS0vYO+u4NCVq1MKI0o/qLNXF/5ve+z8thIiBBvB/4ENT/gIhS4phIGv21mGNI9YJZpIj4wSPC1y3PuQF3kCROVxW4d47CeXYgLIUwxJvQXcUKikI6H2FcwZVU9RiTTn2mEmgIZKJxACqpomkJ0qg2ZGUE4lzhjiJiYRC1SKIBDKqKJIQCK/AeBaWFwFJWi/RcYJzbdozLU4cXaadtNnY3SBJMuS1dVpPfQbb6iKkgjgKuAslqQsJtQivBcoEni1V4fU1ECce2SuZ3BjAWp16PUfYHHNpF3t4Fl9vsbNv2Hy7ZGbtBr2oxmdOLVKXS4yuD/mvfv23mOtGHD1ep9001BsNzs50+f7L27ioRbK6RFKLsULRbiseeWaGm5cNCx3N82e7LM+3uLY94eLtESe6V9nd2mGhKanXE+TGBCkjTFbQ2pfMHptn+ZlDbB5LUG/sEC82+S9/ocFvvdNBRQWvnh9zaLbDt0yNL2UdRldv8fZsHekLnA/Ed1uDEVIpBALrTEXTKzj92DkuvP5aRd0qyAqLyAxCwKCmGae1QI5elhitOdrW2JsFTnq8V0gRiNtLB8nSEm5jAzvO6dTj971GPwiLfhf4AvAfAXjvC6AQQnwV+GL1tv+BH0H5y3sP0hG3XAiXGpJsz2Fyi25pbFkSJRonPUpE2MKgIoGMHdoGho/6nGO854gaMXgwaUEuA0pMC4nSHls4bOaIGnNYNSJuBvpXGUnERFSEMBqlI5y11FoJtnTU6xGRsBx9ZCVg4ktBacA5gYrrODy5aOGFpmxISh2j4hp7UcyL6zW8jrCLZ2m2U8QL3yE5skat0wl8xYIQSwsBIujPICEidKqDKpnA7GeYNEI168iaxG7llNd2MaMhKUNOHevw0BdPcvPyLus3chbXxswvL5Gk77J99xJp0WB+ZZXtSc5yGnOqPuH7Fw1fc54yt/zMagd78Rp5muNVxNjkXH+9wV4UYx5u8U8nEU8eq3Fzr+CJteOMiyVmOwMutg/RGBf4FA6P2qhunXGzYPu0J92+xhOPneI/OXaC//cr29zaEax0DDOLHT53dInfeOUWj6UxD80cYn3nFje0wJSBI8EJy/r2LkWZEenkgClUKsHRI2t0unPs7+6gCeDAPCsQUgQ8TGaYTDJmyiYucqzWoeFK+qVEV6preB+YY+I6pjuH3blLzHtzhn0gZwFOANvA3xdCPAm8DPw1PkTlL51oJtuGqKaJGgIVhw6+jBRRQ2PSHJdafAS1ToKzBms9SU0jtKAcekZbRSCStg5jDLbwqMQz3vHEDYWQgWGlKB2MDc6EC9FWYZ+ux2jVxGYlpW6yNy5BNogSTZo3Sb0mR2JUhFUKr+JQ4o1jnBdYrUAp0AoZR6AlUoFUAhnHaKmo9/eRStCeaWAaSeCIk6GKFpjeA8+YVBIh1AEnmhYKuz/C1hKEl5CmRFFEdmqG0W5M/+oW+9+9yO7AcWTRc/rJp7jVm+W5tSV0Y47OZIMZU+OF1yJ29nc5NJtQa83SEDfxGz1OL3Xpru8jS4habVafPszo9S288agTa2yfPYxYkpBnnCVj641trvZHHHp4niLPOTqA7udP8+tvb6D3d/jyJxZ58tmHmJmfBSE4mu8R3b7N7507wV957DjvDnf41bcucfF6RjdNeLR+l9+bb/DU6UWuX6qznhqElfQmaUAvVvqGooI+N1p1HnriEV7+9ndQYqqoJjDGMB7n9MYFM5OYvCjRtZhuPWYtkexPwLhKn8VTae946vMdnHbcvPDOj+wsGngG+C+89y8IIf42IeQ6sB9V+as5W/c6EdjS4gpJPvI46ymzHDUOFEcmN8hYUlTaKq4wGBVyHGccac+TzEbYoSHbLsPh0wpfetKJJ2m1iOsx9aZEyjq5rmr49RivFTKKcbpJtNrCIpBa4XSEURFjrQM5ntaBC6AqOasowhPI+ZwPjiFkSNQDRY0gSqKKm0xRq8UVv65CSYnFhhMX+J1CcikqLrT74LDGWhotTy4s+aiSdXOO/uaY3tVtonaMqtfZzeqoVFCfXMflirfe7bGHRzdjfmZlmXPHHe9sjfidSymH5kY8cmqFI7ev0rpwi4nNGTcF0mXc/hcvIWoSMoe/fRP5cg39uSVGjZxdYkpm6Q3hdKPNeK8gzzL++eU9JtRZOv4QT335YVoy4+7uLutpwbWizbOffIilCNb7PU525pk7Nubxa3dYe+USO0dirjfh4ckVnM8QQuHxjHNLVhpifU/rRsiAmHzymSd48/svUJbjoM9jLM7B/qBgv58zmWtQ5IakMETK8NCc4kJqcXmBiiVRxdcGAmMcot2FzgxF+d50SB/EWW4Dt733L1S//7PKWT405S9nw51BKYWTgVnQeocowKQCW4TwKmpqXO4CZlt6yomqYtqIKJHEcZNiAlFnBqFjVJxAEqHiBBE3UK0GSmuiOCHPPUon6FghIoVQCmXgoWNL3Nzrk7sQHgVOMR8EWqUGQglTMuVPBqoYWGsVdGAqLLirGC19xUrj7FSXxaGVPEgmJSIItZYWY0qSJMJZkCowNWIlWaopxgN2ipJZEdNsNNGdOrNPHUYkJbM1i0ktDbPBkRk4dLJBs9vlO+c7dDPP+Svnmax2mNy5wWRsSc4c56nHD7O3MmB4O+NIq4Ooacp+if7kLIPzd5CzEp86Gs0mj//JT7G7aLCmYIYlfuOtIQ7Fk52UaGXIp59YY6yWeOO1Af/tdy/xx0/W2BjXGK00mBWG44cl51ZXeW1/F/vuNb4iY9qfOsmbv32F0daIcycW+dT8TV55bYDszCO8Y5Tm7A0HdOqNqnkYxoW8h9m5LodPnuTaO+9QVLuOE4JJadkbZOz3c8bjgkYzwZiSI02IZFBFcDYI2irnaGmYNxYXSw43JO8H//og+iybQohbQoiz3vuLBE2W89XPf8iHoPw1zRVwQa+jmFgwAmSMKyQShYrqSBVDTeOJ0O0GXmq80sS6gYgSXK2GEhqtY0QUIbSqfsJ2+8mHlznpU5ajiN/cmHAj80FgSEqkVvjCsjWZQKKoScVUwttZixKqokoKs0MOj3ThwEopETrkHzJSSCUCjh2Bd1NOTFFRMlER/XniWGNsgBZ464giVU0re5xxYIMDWueIG3W01HTG+7hak2zo2L/dw3Udx1uGpu3TnO8wEyvGec4bl3sM8ohbtzb5+c+cY67XoFg7zqunYr56doleb8CV33kVO8wYtgy7bojbc9RmEny2x7iVMU5gZHPKss8P/tG3aSx54nZMjiaqR2TFMm+PevzZL9zlZPkWb+61eerUWVorK/yLGwN2L2/yv/rSGZ44cpQ3BrtcurWJTmIaF1LccJfBXB05E3Mn8/TqbXTjOeTuFUScI5TGC9gdTTi+BM6F4zkVKJJS8fgnnub6xXeJtMMWFuFCHrLfz9naTVlcyOh2akjhOTFb56kZRWIMR2VKSxpaCmYSD/2CclKwqLMPpYP/XwD/sKqEXSWoeUk+JOUvkAg3g0PjY0G8WIMoJm50EFGCVxriBK81Oq6hkogvPn2YH1zcJjPgtURH4eCKSlfSK3lAoi2lx1u4ttXnSGfCjb5GJg0SAaIMo/9CiIABl/JACNZbsJWmhxBVuOUczhMaWNUF76cxtVBYB9YZIq0Q1gd6Uq2R6h7Fq8djrA2Owj0Mp6j6AAcw6DwMlwrpkZGgWRiUHLN7y6IaBjOxRN0GghFOabQb0oglCwsRM3NneOnqEu/cvMXOeo+7b95kfrXGH1txrLl9RDPm8vwhJvQ4khfY/QEeQaE8C08doVUucKNf8NKlMQbFLDF/9bmIovkwX88znlhSPLebsrsjqDVSulpwbGWG3dEMP7i0w5tXmzz60Ek2ZUFy8zYnF+dYfWiN3l6Pb3/3BziX8/QvPcXyp1b55oU+O6sjZHfMF55Z47fe3YHZGayHzf3hARzYuQDOEyIQIR45cYjZhSU2N9cRAqwJx3ZcFOz0UnZ2JyzOJUiZ0I4Vf/EE5Psj8rQgasSYwuJKj4kdcbOGbukfferYe/8a8OwPeenDUf5KmnDsOXRcI9IKGUUIrZFaIkW1MVYYhjIvaTY1S50GxxebXNzNiOsxIgrUqAKBM6G/IqfU93iEkvSt5xuDLgJPaUwgDBeVapTSgZHbg/cSZ03FgB9mzIT32Gr6V0iJM44oqtSlfEg8HRbpA9Gfc+HfJVPdloo8ECplYylQQmGq7/G+EsswljIvAmG5VAjhKcYZ8cSS9Qqy5SM0ZiT97Q0atZThzZx+Y4XZzz/CWq3gcH6VK3tj3sglj61K5n7mNM+26+zuOR79wuMU2YCsZ7n2+k3OHG7x3bLFenwM6XZAS77wpRU6ylDkESszMXu2wYsX94mV4I112N25yclZyc3X4YnnWlzcm3Cn1+b4es7aI6c59dkn6ayUXBq/w6n6kKdmjnN8psP2xiYvvfEut772Js1LJd3DXbJ3BiitONLf5ysLW7S14pNPHeGFd7fo+XAu+5PsQD0gHD9/8F+tFvPIJ55k6+t3An2vFEgbQub+MGW7n3FkUlKLFUVUUK8l2DjGupLBzgAhFI1GTGOmThTrAA95H/tYdPBFHJOsLqOUQsj7S6kCKmm7acKrkhBa/WBzzI6R6HoMMmzLcE94CBm2bC9C+BPY+RUGh/AC50BHElSg5bPWEGQFq+RdVeFWxVGGIEg/GItKJFGkcN5hywoyoBRRrNG6onGV4kDmTYgp+bivAEoWay3WG7AeFemwRmOQIugoBlL+wAEgcoEdFlgpaU02EJGkPy6YPxKRvpGy8/o1ZC3nxpzizz28wHPHM+LmiJbeI466OHOSredP8e7GmK+/2eOnVxMeO73CVr7Llo+5VJ9FPH6Yzxyq8fxnFxC2xBNQnQ9/WvD18wN+/cqQJ45vcKGxghKeK/sD/sfvOB4+v8s5Vyfv13h74xovlim1owv8/GdP8lCzwRtv3uDv/+B7fKLb5clCsfjMk1y4+SrbO/vsfWfM0ukWP/UfLdCes0gUSSJ47EiX7+4VCKnY3NujtDmxCD0QUakoeB9I9R576hFe/d6LFDvbSGcovUEhyAvD7t6E/X7GwmwDqnNcFiVFVtJoJSTNhKJfYHODNx6T/+il4x+7CSGI6wEIr5TCWkukNQ4XLnamJdZQZhVecneYh7ArCn9CaSzWuRD+ROpAUi1MkyhsUSJEkLN21gY1YHH/iHbANkgVdOdVxTghqzjZWYfHVuq5ldyFDYwiIdEPz+WTFB1phAwa964iKy+L4p5oq/BEUmCtO9CfQQqSJAkCRjLE6EqCcAZXlphhTnM2IUsVxZ7DbOT0GxqcQHQSrFDofc8/+M0bHOla5uJrLLQNreYs9eV5Xlk3rP/+ZTb3J+wttnnyYc3PPDXmL0Sr/OZr17l5JePGOvyPtwUzgkCD6iG3jstbjmw95e/erPPUp1t87vQRZo/Nkq171ndv8tuF4dp8nYUnTzHxNQ7vCV64+BL9qyV3kznml07wn/7MYW7/g99j8607LDy7wOD1bZaeaHD6l2aozcgAWiPoaz79yCov/s51ilixNyrZHw2Za7dQKsY7CxXZYlFkJDXNM597nm/8xtdAgvUhn/TOsd2bcOVWn1YiOX16qSJtlOzt5+A8S1qRliWRMWAFcj8N+Jb3sI+Fs0DgIw7YakekI7wMEtzeh5KgcIANF6ux7iCRdsYFcSHnqNUCqRrWoiJd3fUFTqtQXrQucIpNWfO1oswyEII4irDO4UuHFg68QApV7WgeoUDXE0xRUhYGLUBLUNJD6SjLMGDprQMZmmcuL3FCIIRCVu8HDoSHhNYBQVlanHNkaY4tC6JajBeewlqUkESthO3+LvOHE0ZZh9FWnzxTTPYyvPYsnFtkcXmGw0stxmoNF8U8u5gg7V2ask3SOMyJExFXtnJeeHeLdrrLZ87sYMuUk4tD/sOvtPhueY6ZYYIvNcszCacXY4RWKKXop7Dz7XUaizFPP9xm1nry/oDf295k5fmHkCdWqd+ecPvlO5ybsZyaP0xn4Sxfzy2yzOHuBd54/QWe+TNLPNb+Cv0LI26WPR76aptap+KqrhqFHs/SQpMjnYiLmcc4S+EKrC0p7YRa3CVIjIAXAutKnvrE47x7/iKXLl5gSteqpafIMt65tIEZ58y0Io4cX6LWFHRnmgz2Uvrbo2qINRRpkvTfAxZ9gDDlBqjAdSxkWJpUCqEkZRnutNb7g7q6qRI6LUM4hXWBAcR7XJ6jlK6cLgxWemsC34QM+YSTEt1s4IzFADKKA2OMNQeM/qG2X0lK+6oCZkusACU1ZZFjS4dSMZaQrAspcGUOUqJEcFoXEijAkxUW11JoqShLQ2ENWitEFBEnGoHAV7uO1horc1qHZjBKk+5usbhc4lKPmWshu4pOt4UrDXf3BhxamWWx08DELWa6R8Pf6WFWaT7ZtDx8pMPF3+hx62uWxmeXEMUlxuPb1NM9Nvo7bA48v7PeRArPzJEW8+2ESeppeMGqn6Gnlpk/fIhhXMN6we28T3y+x+k3Njh8IecTX3yWk2fP8v985Q71jSv89KEt/tSfGjHXLZjYDEuXeFnx8C+30Y1KaEqqcF4J095eWJ48McfFt3YxQrDTz1ibWwYHUmiCzuc9lWipBD//iz/H3/s765RFD2lLXNBhZzQsuHJzh2YisKnl8EPLtOe7FGmlem0tSTuiGJVhl/+4w4ohULgKF5j0g+AQFXGdxjuDNBYRBQzI9M5sjSFKYpQUWFOG+rlUFbF4OBFaKoQ1WGvAB8qkSKlqUDI4hdQa4R3WGISOwk7mLVKE+r30hrIIPRJjDRAakhKJKQ1KRwfyelII8smESIOKdMiJtMJaT+nuCSw56ygOhjIFShCgrsZg8xylACfIbI4QkrnDbca5RSx12RlPmD8Oo0kdn4DNC0onsBZ6u33G+wMiBzM1TVxX3MkMRzptnjm0yq4zZLcHmMNrvDQ7w6/MTugkiyAKsqLBv9ho8xtfc6ytNPnkZxc43KgxqMqyiZNc3R2ylo3JDCz3S+a/eZWluUXk7QQ5jrn27fN8Z/MqLxSKY588wR//8iKd0Qvc2nd8a3SUL8ttjtRzooovwFaqZ1oGlQMhJd54Th+ZoX1hl10Dd/tDlFA44SqA1jRH5UC+uzvT4rNf/AJf/7VfR6rQq5PWY7RgkOe8fHGTmzd2+dLzQ849dYzZpS6mMEx6Y8aTMuSmhcWaj3kYJoQA5zClO1DH8sYglMIUBcIR7s4lqERD1bOQ1uALH4gkfAhliGvoOKpEOqeTyqFnYvIctEYkUegSS4ESVc7hAeuxrgQXpCmccaAFRVmEXo+SeK1wQlI6kM4gVISQusJ+B7Z/IWUQBg2T5VAadBQx7bho6ZEy6MdQ2kCTag1eSGIVdGhsHtjd40ZMFEcwGZBen2BNhp9L2LkrUC4Ns3LkKKcwpUPN1um26tSbMSfnZplt1Bj2+9wtS17d6mHHYxaSmMX5Gc4NYwZX17ALsyw+vsbcfIflYoI4scPh0zMsnJ3lM602fRMYUzI8nTLnuVqDvD/hrVsj+kWMup2y58fsL5XoL53i8hOr1N9UyGXJet+gJl380PGzbpvFpgYVEfqzMsiTe8JNJwpE6Eoq2k3N6cUW/TtDNvcGYRcId1CsczjvUFIfqEE7L3juU08z2N3ne9/+Dnk+QQPNSNJPDcNRYJ38re/dxMqYU2cWsM6TFpad3THGemZHGfZ9yME/Fs7ivadMsxByVXmH8GCtrSSZK/k654K4qQqj+x5VyW0HitU4iavScYnPS1SjBgJsWSKsQOoYnUQUeUZcq6PxOByUDhVJSldiCo+ONaYwVYNKIXUE5VT8RqJ8GNwzzoUQTxiiRh2pPN4ZLFT8Y1EI45REah3Uiwk7prXByaULBQApJN6WlCYPfZko9GukBh0rvC1om12ysqB3tYbJfejY1zVJAvv7ffJ8QmRL7KhB1EnYHU04vTTLYSn57uaIV95yLBUT/srZI+Tfuk1T13FDg1MZ48ct3V95ni8e6/BriynxIuwXQ353p89TKmY8njAZDrj8ylVur49I7mZsij4bx2K6zTqrNx2rtDn37NO8207J9m5wuF2wle9QsxkzcY1GA6AqszsPJuR2nlAmP6ityzB1/MSRGV5aH7DRG5AVGUkcB9lEUSm9YZFShwhEBvjxz/6pL2FX5/nWt/+AZG8PlWU0jCV1jljW2E9LvvG9yzzRG7E832R3L2V7f0Ip4PCowIn3JqwQB1qIH6G1Vtf847/yV7HOE8URUktcUQYiZ6aa8gJfGoTzWFsStephrETqEDrZMFMlrMEMxzgPcauJdTY4lnM4F0guEJWknq0u9mr40VXalSFMKomTCCElZZaH3o2XqCRCKIGzHmemTUV/0A9y1qCiAFGWSgUtSR+UcbuDIX/r7/zfuba6honisF442AHxAX7kfKB7kjJUiUBAmkFWYJ3HOhBKkU0sqhUF6fKyICJozcSxDlPZkSaKFO1awm5RMtkuqWnFQl4icotGBQnCaniz8cgyVntujcZE0tK2MMmyAJdQglgJyp2cyAkKA65Twy53iLMCtZVSa9RoHprhwrjP5M6Y+TmJmaQsNzSqGipVSgamSQRKBxSkmOruVH0yUSl5FaXl0t0R4Dm5PB+Yfg5auBWQTtyb8ZIVS/7eJOPm/gjhLCpLkaMRxt2TTrfWEceaZi2itI68NDip6RrLn9vtczlNf2jm8rFwFiHEkAAm+6htAdj5qBfBg3X82/aTXMcx7/3iD3vhYxGGARe99z9sQuAnakKIlx6s48E63sveXxfsgT2wB3ZgD5zlgT2wD2gfF2f57z/qBVT2YB3/pj1Yx332sUjwH9gD+/fBPi47ywN7YB97+8idRQjxc0KIi0KIyxWl0o/zu/6/QogtIcRb9z03J4T4phDiUvX/2ep5IYT4f1TrekMI8cyHuI4jQoh/LYQ4L4R4Wwjx1z6KtQghakKIF4UQr1fr+D9Vz58QQrxQfd8/rkB/CCGS6vfL1evHP4x1VJ+thBCvCiG+9lGt4Q+1+6WPf9I/BLjVFeAkEAOvA4/8GL/vCwTyjbfue+7/CvyN6vHfAP4v1eOfB/4VgXftU8ALH+I6VoFnqsdt4F3gkZ/0WqrPa1WPI+CF6vP/CfDnq+f/O+CvVo//M+C/qx7/eeAff4jH5H8H/CPga9XvP/E1/KFr/El90XscoE8D37jv978J/M0f83ce/7ec5SKwWj1eJfR8AP4u8Bd+2Pt+DGv6deArH+VaCMSJrxB4E3YA/W+fI+AbwKerx7p6n/gQvvsw8NvATwNfq5z4J7qGD/LzUYdh78Ux9pO0Pyr/2YdqVRjxNOGu/hNfSxX+vEZg5/kmYaff995PYYP3f9fBOqrX+8D8h7CM/wb43xMkP6k+8ye9hj/UPmpn+ViZD7ern1h5UAjRAv458F967wcfxVq899Z7/xTh7v4ccO7H/Z33mxDiTwJb3vuXf5Lf+7/EPmpn+SNzjP0Y7G7Fe8aPyn/2RzEhRERwlH/ovf+fP8q1AHjv94F/TQh5ZoQQ01Go+7/rYB3V611g90f86s8CvyCEuA78KiEU+9s/4TV8IPuoneUHwOmq8hETErbf+Amv4TcIvGfw7/Kf/a+rStSn+AD8Zx/URJj9//8A73jv/9ZHtRYhxKIICgkIIeqEvOkdgtP86fdYx3R9fxr4nWoH/F9s3vu/6b0/7L0/Tjj/v+O9/4s/yTX8URb7kf4QKj3vEmLl/+OP+bv+JwLnckmIg3+FEO/+NnAJ+BYwV71XAP+val1vAs9+iOv4HCHEegN4rfr5+Z/0WoAngFerdbwF/FfV8yeBFwncb/8USKrna9Xvl6vXT37I5+eL3KuGfSRreL+fBx38B/bAPqB91GHYA3tg/97YA2d5YA/sA9oDZ3lgD+wD2gNneWAP7APaA2d5YA/sA9oDZ3lgD+wD2gNneWAP7APaA2d5YA/sA9r/HyUIPmt4Cce/AAAAAElFTkSuQmCC\n",
+      "text/plain": [
+       "<Figure size 432x288 with 1 Axes>"
+      ]
+     },
+     "metadata": {
+      "needs_background": "light"
+     },
+     "output_type": "display_data"
+    }
+   ],
+   "source": [
+    "from opendr.perception.object_detection_2d import draw_bounding_boxes\n",
+    "\n",
+    "img_annotated = draw_bounding_boxes(img.opencv(), boxes, class_names=nanodet.classes, show=False)\n",
+    "\n",
+    "plt.imshow(cv2.cvtColor(img_annotated, cv2.COLOR_BGR2RGB))"
+   ]
+  },
+  {
+   "cell_type": "code",
+   "execution_count": null,
+   "id": "436aaefe-fe18-49d7-b881-d0f64ce47742",
+   "metadata": {},
+   "outputs": [],
+   "source": []
+  }
+ ],
+ "metadata": {
+  "kernelspec": {
+   "display_name": "Python 3 (ipykernel)",
+   "language": "python",
+   "name": "python3"
+  },
+  "language_info": {
+   "codemirror_mode": {
+    "name": "ipython",
+    "version": 3
+   },
+   "file_extension": ".py",
+   "mimetype": "text/x-python",
+   "name": "python",
+   "nbconvert_exporter": "python",
+   "pygments_lexer": "ipython3",
+   "version": "3.8.10"
+  }
+ },
+ "nbformat": 4,
+ "nbformat_minor": 5
+}
diff --git a/projects/python/perception/object_detection_2d/nanodet/train_demo.py b/projects/python/perception/object_detection_2d/nanodet/train_demo.py
new file mode 100644
index 0000000000000000000000000000000000000000..3ef0394392920fd75dbb59cd8f6290156bb41e6f
--- /dev/null
+++ b/projects/python/perception/object_detection_2d/nanodet/train_demo.py
@@ -0,0 +1,51 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import argparse
+
+from opendr.engine.datasets import ExternalDataset
+from opendr.perception.object_detection_2d import NanodetLearner
+
+
+if __name__ == '__main__':
+    parser = argparse.ArgumentParser()
+    parser.add_argument("--dataset", help="Dataset to train on", type=str, default="coco", choices=["voc", "coco"])
+    parser.add_argument("--data-root", help="Dataset root folder", type=str)
+    parser.add_argument("--model", help="Model that config file will be used", type=str)
+    parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, default="cuda", choices=["cuda", "cpu"])
+    parser.add_argument("--batch-size", help="Batch size to use for training", type=int, default=6)
+    parser.add_argument("--lr", help="Learning rate to use for training", type=float, default=5e-4)
+    parser.add_argument("--checkpoint-freq", help="Frequency in-between checkpoint saving and evaluations",
+                        type=int, default=50)
+    parser.add_argument("--n-epochs", help="Number of total epochs", type=int, default=300)
+    parser.add_argument("--resume-from", help="Epoch to load checkpoint file and resume training from",
+                        type=int, default=0)
+
+    args = parser.parse_args()
+
+    if args.dataset == 'voc':
+        dataset = ExternalDataset(args.data_root, 'voc')
+        val_dataset = ExternalDataset(args.data_root, 'voc')
+    elif args.dataset == 'coco':
+        dataset = ExternalDataset(args.data_root, 'coco')
+        val_dataset = ExternalDataset(args.data_root, 'coco')
+
+    nanodet = NanodetLearner(model_to_use=args.model, iters=args.n_epochs, lr=args.lr, batch_size=args.batch_size,
+                             checkpoint_after_iter=args.checkpoint_freq, checkpoint_load_iter=args.resume_from,
+                             device=args.device)
+
+    nanodet.download("./predefined_examples", mode="pretrained")
+    nanodet.load("./predefined_examples/nanodet-{}/nanodet-{}.ckpt".format(args.model, args.model), verbose=True)
+    nanodet.fit(dataset, val_dataset)
+    nanodet.save()
diff --git a/projects/perception/object_detection_2d/nms/cluster_nms/README.md b/projects/python/perception/object_detection_2d/nms/cluster_nms/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/nms/cluster_nms/README.md
rename to projects/python/perception/object_detection_2d/nms/cluster_nms/README.md
diff --git a/projects/perception/object_detection_2d/nms/cluster_nms/inference_demo.py b/projects/python/perception/object_detection_2d/nms/cluster_nms/inference_demo.py
similarity index 92%
rename from projects/perception/object_detection_2d/nms/cluster_nms/inference_demo.py
rename to projects/python/perception/object_detection_2d/nms/cluster_nms/inference_demo.py
index e653f5820cc670ed579e0b6b9b6eb4364c4ec18a..37f1f1d7242b16c327cf0ea8d22b50f305afaef5 100644
--- a/projects/perception/object_detection_2d/nms/cluster_nms/inference_demo.py
+++ b/projects/python/perception/object_detection_2d/nms/cluster_nms/inference_demo.py
@@ -23,7 +23,7 @@ OPENDR_HOME = os.environ['OPENDR_HOME']
 ssd = SingleShotDetectorLearner(device='cuda')
 ssd.download(".", mode="pretrained")
 ssd.load("./ssd_default_person", verbose=True)
-img = Image.open(OPENDR_HOME + '/projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
+img = Image.open(OPENDR_HOME + '/projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
 if not isinstance(img, Image):
     img = Image(img)
 cluster_nms = ClusterNMS(device='cuda', nms_type='default', cross_class=True)
diff --git a/projects/perception/object_detection_2d/nms/fast_nms/README.md b/projects/python/perception/object_detection_2d/nms/fast_nms/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/nms/fast_nms/README.md
rename to projects/python/perception/object_detection_2d/nms/fast_nms/README.md
diff --git a/projects/perception/object_detection_2d/nms/fast_nms/inference_demo.py b/projects/python/perception/object_detection_2d/nms/fast_nms/inference_demo.py
similarity index 91%
rename from projects/perception/object_detection_2d/nms/fast_nms/inference_demo.py
rename to projects/python/perception/object_detection_2d/nms/fast_nms/inference_demo.py
index 5e0a5b48fafcde3461e5f3d215114c08f50d8de5..1582fe8f0b2b449b973199cadd9ecff4953eeff9 100644
--- a/projects/perception/object_detection_2d/nms/fast_nms/inference_demo.py
+++ b/projects/python/perception/object_detection_2d/nms/fast_nms/inference_demo.py
@@ -23,7 +23,7 @@ OPENDR_HOME = os.environ['OPENDR_HOME']
 ssd = SingleShotDetectorLearner(device='cuda')
 ssd.download(".", mode="pretrained")
 ssd.load("./ssd_default_person", verbose=True)
-img = Image.open(OPENDR_HOME + '/projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
+img = Image.open(OPENDR_HOME + '/projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
 if not isinstance(img, Image):
     img = Image(img)
 cluster_nms = FastNMS(device='cpu', cross_class=True)
diff --git a/projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg b/projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg
similarity index 100%
rename from projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg
rename to projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg
diff --git a/projects/perception/object_detection_2d/nms/seq2seq-nms/README.md b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/nms/seq2seq-nms/README.md
rename to projects/python/perception/object_detection_2d/nms/seq2seq-nms/README.md
diff --git a/projects/perception/object_detection_2d/nms/seq2seq-nms/eval_demo.py b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/eval_demo.py
similarity index 93%
rename from projects/perception/object_detection_2d/nms/seq2seq-nms/eval_demo.py
rename to projects/python/perception/object_detection_2d/nms/seq2seq-nms/eval_demo.py
index 01437e578bc20897198bcbb6516284a9de5bb55a..7110edef4e46f36a0927c752f5618c899f58c439 100644
--- a/projects/perception/object_detection_2d/nms/seq2seq-nms/eval_demo.py
+++ b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/eval_demo.py
@@ -33,13 +33,13 @@ parser.add_argument("--dataset", help="Dataset to train on", type=str, default="
                                                                                                 "TEST_MODULE"])
 parser.add_argument("--data_root", help="Dataset root folder", type=str,
                     default=os.path.join(OPENDR_HOME,
-                                         'projects/perception/object_detection_2d/nms/seq2seq-nms/datasets'))
+                                         'projects/python/perception/object_detection_2d/nms/seq2seq-nms/datasets'))
 parser.add_argument("--use_ssd", help="Train using SSD as detector", type=bool, default=False)
 parser.add_argument("--post_thres", help="Confidence threshold, used for RoI selection after seq2seq-nms rescoring",
                     type=float, default=0.0)
 
 args = parser.parse_args()
-tmp_path = os.path.join(OPENDR_HOME, 'projects/perception/object_detection_2d/nms/seq2seq-nms/tmp')
+tmp_path = os.path.join(OPENDR_HOME, 'projects/python/perception/object_detection_2d/nms/seq2seq-nms/tmp')
 seq2SeqNMSLearner = Seq2SeqNMSLearner(device=args.device, app_feats=args.app_feats, fmod_map_type=args.fmod_type,
                                       iou_filtering=args.iou_filtering,
                                       temp_path=tmp_path)
diff --git a/projects/perception/object_detection_2d/nms/seq2seq-nms/inference_demo.py b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/inference_demo.py
similarity index 91%
rename from projects/perception/object_detection_2d/nms/seq2seq-nms/inference_demo.py
rename to projects/python/perception/object_detection_2d/nms/seq2seq-nms/inference_demo.py
index c260546d13bcf54e18f061187d001183aff6df27..437942bca8ca5452c612ab412131092b215d4ff3 100755
--- a/projects/perception/object_detection_2d/nms/seq2seq-nms/inference_demo.py
+++ b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/inference_demo.py
@@ -31,7 +31,7 @@ parser.add_argument("--pretrained_model", help="Name of pretrained model", type=
                     choices=['seq2seq_pets_jpd'])
 
 args = parser.parse_args()
-tmp_path = os.path.join(OPENDR_HOME, 'projects/perception/object_detection_2d/nms/seq2seq-nms/tmp')
+tmp_path = os.path.join(OPENDR_HOME, 'projects/python/perception/object_detection_2d/nms/seq2seq-nms/tmp')
 seq2SeqNMSLearner = Seq2SeqNMSLearner(device=args.device, app_feats=args.app_feats, fmod_map_type=args.fmod_type,
                                       iou_filtering=args.iou_filtering,
                                       temp_path=tmp_path)
@@ -41,7 +41,7 @@ seq2SeqNMSLearner.load(os.path.join(tmp_path, args.pretrained_model), verbose=Tr
 ssd = SingleShotDetectorLearner(device=args.device)
 ssd.download(".", mode="pretrained")
 ssd.load("./ssd_default_person", verbose=True)
-img = Image.open(OPENDR_HOME + '/projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
+img = Image.open(OPENDR_HOME + '/projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
 if not isinstance(img, Image):
     img = Image(img)
 boxes = ssd.infer(img, threshold=0.3, custom_nms=seq2SeqNMSLearner)
diff --git a/projects/perception/object_detection_2d/nms/seq2seq-nms/train_demo.py b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/train_demo.py
similarity index 94%
rename from projects/perception/object_detection_2d/nms/seq2seq-nms/train_demo.py
rename to projects/python/perception/object_detection_2d/nms/seq2seq-nms/train_demo.py
index 4facf2696b19eab6550d5b5529e9fa41355b6d2f..843517214ae93a0aa658f2ab947cf9ccb34f5627 100644
--- a/projects/perception/object_detection_2d/nms/seq2seq-nms/train_demo.py
+++ b/projects/python/perception/object_detection_2d/nms/seq2seq-nms/train_demo.py
@@ -28,7 +28,7 @@ parser.add_argument("--device", help="Device to use (cpu, cuda)", type=str, defa
 parser.add_argument("--lr", help="Learning rate to use for training", type=float, default=1e-4)
 parser.add_argument("--n_epochs", help="Number of total epochs", type=int, default=10)
 parser.add_argument("--tmp_path", help="Temporary path where weights will be saved", type=str,
-                    default=os.path.join(OPENDR_HOME, 'projects/perception/object_detection_2d/nms/seq2seq-nms/tmp'))
+                    default=os.path.join(OPENDR_HOME, 'projects/python/perception/object_detection_2d/nms/seq2seq-nms/tmp'))
 parser.add_argument("--checkpoint_freq", help="Frequency in-between checkpoint saving", type=int, default=1)
 parser.add_argument("--resume-from", help="Epoch to load checkpoint file and resume training from", type=int, default=0)
 parser.add_argument("--dataset", help="Dataset to train on", type=str, default="PETS", choices=["PETS", "COCO",
@@ -37,7 +37,7 @@ parser.add_argument("--use_ssd", help="Train using SSD as default detector", typ
 parser.add_argument("--max_dt_boxes", help="Maximum number of input RoIs fed to Seq2Seq-NMS", type=int, default=500)
 parser.add_argument("--data-root", help="Dataset root folder", type=str,
                     default=os.path.join(OPENDR_HOME,
-                                         'projects/perception/object_detection_2d/nms/seq2seq-nms/datasets'))
+                                         'projects/python/perception/object_detection_2d/nms/seq2seq-nms/datasets'))
 args = parser.parse_args()
 seq2SeqNMSLearner = Seq2SeqNMSLearner(epochs=args.n_epochs, lr=args.lr, device=args.device, app_feats=args.app_feats,
                                       fmod_map_type=args.fmod_type, iou_filtering=args.iou_filtering,
diff --git a/projects/perception/object_detection_2d/nms/soft_nms/README.md b/projects/python/perception/object_detection_2d/nms/soft_nms/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/nms/soft_nms/README.md
rename to projects/python/perception/object_detection_2d/nms/soft_nms/README.md
diff --git a/projects/perception/object_detection_2d/nms/soft_nms/inference_demo.py b/projects/python/perception/object_detection_2d/nms/soft_nms/inference_demo.py
similarity index 92%
rename from projects/perception/object_detection_2d/nms/soft_nms/inference_demo.py
rename to projects/python/perception/object_detection_2d/nms/soft_nms/inference_demo.py
index c05ff4c7c2515fbf02dd6ebdbc77025e4ef76d8e..c34d9fe46d06190053edb561a789b4e7df51dea7 100644
--- a/projects/perception/object_detection_2d/nms/soft_nms/inference_demo.py
+++ b/projects/python/perception/object_detection_2d/nms/soft_nms/inference_demo.py
@@ -23,7 +23,7 @@ OPENDR_HOME = os.environ['OPENDR_HOME']
 ssd = SingleShotDetectorLearner(device='cuda')
 ssd.download(".", mode="pretrained")
 ssd.load("./ssd_default_person", verbose=True)
-img = Image.open(OPENDR_HOME + '/projects/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
+img = Image.open(OPENDR_HOME + '/projects/python/perception/object_detection_2d/nms/img_temp/frame_0000.jpg')
 if not isinstance(img, Image):
     img = Image(img)
 cluster_nms = SoftNMS(device='cpu', nms_type='gaussian')
diff --git a/projects/perception/object_detection_2d/retinaface/README.md b/projects/python/perception/object_detection_2d/retinaface/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/retinaface/README.md
rename to projects/python/perception/object_detection_2d/retinaface/README.md
diff --git a/projects/perception/object_detection_2d/retinaface/eval_demo.py b/projects/python/perception/object_detection_2d/retinaface/eval_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/retinaface/eval_demo.py
rename to projects/python/perception/object_detection_2d/retinaface/eval_demo.py
diff --git a/projects/perception/object_detection_2d/retinaface/inference_demo.py b/projects/python/perception/object_detection_2d/retinaface/inference_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/retinaface/inference_demo.py
rename to projects/python/perception/object_detection_2d/retinaface/inference_demo.py
diff --git a/projects/perception/object_detection_2d/retinaface/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/retinaface/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/object_detection_2d/retinaface/inference_tutorial.ipynb
rename to projects/python/perception/object_detection_2d/retinaface/inference_tutorial.ipynb
diff --git a/projects/perception/object_detection_2d/retinaface/train_demo.py b/projects/python/perception/object_detection_2d/retinaface/train_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/retinaface/train_demo.py
rename to projects/python/perception/object_detection_2d/retinaface/train_demo.py
diff --git a/projects/perception/object_detection_2d/ssd/README.md b/projects/python/perception/object_detection_2d/ssd/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/ssd/README.md
rename to projects/python/perception/object_detection_2d/ssd/README.md
diff --git a/projects/perception/object_detection_2d/ssd/eval_demo.py b/projects/python/perception/object_detection_2d/ssd/eval_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/ssd/eval_demo.py
rename to projects/python/perception/object_detection_2d/ssd/eval_demo.py
diff --git a/projects/perception/object_detection_2d/ssd/inference_demo.py b/projects/python/perception/object_detection_2d/ssd/inference_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/ssd/inference_demo.py
rename to projects/python/perception/object_detection_2d/ssd/inference_demo.py
diff --git a/projects/perception/object_detection_2d/ssd/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/ssd/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/object_detection_2d/ssd/inference_tutorial.ipynb
rename to projects/python/perception/object_detection_2d/ssd/inference_tutorial.ipynb
diff --git a/projects/perception/object_detection_2d/ssd/train_demo.py b/projects/python/perception/object_detection_2d/ssd/train_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/ssd/train_demo.py
rename to projects/python/perception/object_detection_2d/ssd/train_demo.py
diff --git a/projects/perception/object_detection_2d/yolov3/README.md b/projects/python/perception/object_detection_2d/yolov3/README.md
similarity index 100%
rename from projects/perception/object_detection_2d/yolov3/README.md
rename to projects/python/perception/object_detection_2d/yolov3/README.md
diff --git a/projects/perception/object_detection_2d/yolov3/eval_demo.py b/projects/python/perception/object_detection_2d/yolov3/eval_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/yolov3/eval_demo.py
rename to projects/python/perception/object_detection_2d/yolov3/eval_demo.py
diff --git a/projects/perception/object_detection_2d/yolov3/inference_demo.py b/projects/python/perception/object_detection_2d/yolov3/inference_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/yolov3/inference_demo.py
rename to projects/python/perception/object_detection_2d/yolov3/inference_demo.py
diff --git a/projects/perception/object_detection_2d/yolov3/inference_tutorial.ipynb b/projects/python/perception/object_detection_2d/yolov3/inference_tutorial.ipynb
similarity index 100%
rename from projects/perception/object_detection_2d/yolov3/inference_tutorial.ipynb
rename to projects/python/perception/object_detection_2d/yolov3/inference_tutorial.ipynb
diff --git a/projects/perception/object_detection_2d/yolov3/train_demo.py b/projects/python/perception/object_detection_2d/yolov3/train_demo.py
similarity index 100%
rename from projects/perception/object_detection_2d/yolov3/train_demo.py
rename to projects/python/perception/object_detection_2d/yolov3/train_demo.py
diff --git a/projects/perception/object_detection_3d/benchmark/.gitignore b/projects/python/perception/object_detection_3d/benchmark/.gitignore
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/.gitignore
rename to projects/python/perception/object_detection_3d/benchmark/.gitignore
diff --git a/projects/perception/object_detection_3d/benchmark/benchmark_voxel.py b/projects/python/perception/object_detection_3d/benchmark/benchmark_voxel.py
similarity index 97%
rename from projects/perception/object_detection_3d/benchmark/benchmark_voxel.py
rename to projects/python/perception/object_detection_3d/benchmark/benchmark_voxel.py
index 05690550c450076e93cb436e289370a1df6767f1..eae1a6d6e34a498b8257e5ec61711bc8c1106d94 100644
--- a/projects/perception/object_detection_3d/benchmark/benchmark_voxel.py
+++ b/projects/python/perception/object_detection_3d/benchmark/benchmark_voxel.py
@@ -26,7 +26,7 @@ logger.setLevel("DEBUG")
 
 
 def benchmark_voxel():
-    root_dir = "./projects/perception/object_detection_3d/benchmark"
+    root_dir = "./projects/python/perception/object_detection_3d/benchmark"
     temp_dir = root_dir + "/tmp"
     configs_dir = root_dir + "/configs"
     models_dir = root_dir + "/models"
diff --git a/projects/perception/object_detection_3d/benchmark/configs/pointpillars_car_xyres_16.proto b/projects/python/perception/object_detection_3d/benchmark/configs/pointpillars_car_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/pointpillars_car_xyres_16.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/pointpillars_car_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/pointpillars_ped_cycle_xyres_16.proto b/projects/python/perception/object_detection_3d/benchmark/configs/pointpillars_ped_cycle_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/pointpillars_ped_cycle_xyres_16.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/pointpillars_ped_cycle_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16.proto b/projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24.proto b/projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24_2.proto b/projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24_2.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24_2.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.24_2.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.33.proto b/projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.33.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.33.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.33.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.5.proto b/projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.5.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.5.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/tanet_car_xyres_16_near_0.5.proto
diff --git a/projects/perception/object_detection_3d/benchmark/configs/tanet_ped_cycle_xyres_16.proto b/projects/python/perception/object_detection_3d/benchmark/configs/tanet_ped_cycle_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/configs/tanet_ped_cycle_xyres_16.proto
rename to projects/python/perception/object_detection_3d/benchmark/configs/tanet_ped_cycle_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/benchmark/media/000000.bin b/projects/python/perception/object_detection_3d/benchmark/media/000000.bin
similarity index 100%
rename from projects/perception/object_detection_3d/benchmark/media/000000.bin
rename to projects/python/perception/object_detection_3d/benchmark/media/000000.bin
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/README.md b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/README.md
similarity index 92%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/README.md
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/README.md
index 0a8303a1be2d3db9e61588d740d7f52083441853..b6b48042c8cb3b98986eac8cfff11ae342694125 100644
--- a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/README.md
+++ b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/README.md
@@ -19,12 +19,12 @@ pip install -e .
 ## Running the example
 Car 3D Object Detection using [TANet](https://arxiv.org/abs/1912.05163) from [KITTI](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)-like dataset
 ```bash
-python3 demo.py --ip=0.0.0.0 --port=2605 --algorithm=voxel --model_name=tanet_car_xyres_16 --source=disk --data_path=/data/sets/kitti_second/training/velodyne --model_config=configs/tanet_car_xyres_16.proto
+python3 demo.py --ip=0.0.0.0 --port=2605 --algorithm=voxel --model_name=tanet_car_xyres_16 --source=disk --data_path=/data/sets/kitti_tracking/training/velodyne/0000--model_config=configs/tanet_car_xyres_16.proto
 ```
 
 Car 3D Object Detection using [PointPillars](https://arxiv.org/abs/1812.05784) from [KITTI](http://www.cvlibs.net/datasets/kitti/eval_object.php?obj_benchmark=3d)-like dataset
 ```bash
-python3 demo.py --ip=0.0.0.0 --port=2605 --algorithm=voxel --model_name=pointpillars_car_xyres_16 --source=disk --data_path=/data/sets/kitti_second/training/velodyne --model_config=configs/tanet_car_xyres_16.proto
+python3 demo.py --ip=0.0.0.0 --port=2605 --algorithm=voxel --model_name=pointpillars_car_xyres_16 --source=disk --data_path=/data/sets/kitti_tracking/training/velodyne/0000 --model_config=configs/pointpillars_car_xyres_16.proto
 ```
 
 3D Object Detection using a specially trained model X for O3M Lidar
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/__init__.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/__init__.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/__init__.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_car_xyres_16.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_car_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_car_xyres_16.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_car_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_ped_cycle_xyres_16.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_ped_cycle_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_ped_cycle_xyres_16.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/pointpillars_ped_cycle_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24_2.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24_2.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24_2.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.24_2.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.33.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.33.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.33.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.33.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.5.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.5.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.5.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_car_xyres_16_near_0.5.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_ped_cycle_xyres_16.proto b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_ped_cycle_xyres_16.proto
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_ped_cycle_xyres_16.proto
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/configs/tanet_ped_cycle_xyres_16.proto
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/data_generators.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/data_generators.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/data_generators.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/data_generators.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/demo.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/demo.py
similarity index 97%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/demo.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/demo.py
index d113b26a050d8f387f0fc26c07c337f1211aa3b3..2ce25d77335212873bd8b072af2c21232a459262 100644
--- a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/demo.py
+++ b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/demo.py
@@ -23,6 +23,7 @@ from flask import Flask, Response, render_template, request
 
 # OpenDR imports
 from opendr.perception.object_detection_3d import VoxelObjectDetection3DLearner
+from opendr.perception.object_tracking_3d import ObjectTracking3DAb3dmotLearner
 from data_generators import (
     lidar_point_cloud_generator,
     disk_point_cloud_generator,
@@ -162,6 +163,7 @@ def voxel_object_detection_3d(config_path, model_name=None):
 
         # Init model
         detection_learner = VoxelObjectDetection3DLearner(config_path)
+        tracking_learner = ObjectTracking3DAb3dmotLearner()
 
         if model_name is not None and not os.path.exists(
             "./models/" + model_name
@@ -172,6 +174,7 @@ def voxel_object_detection_3d(config_path, model_name=None):
 
     else:
         detection_learner = None
+        tracking_learner = None
 
     def process_key(key):
 
@@ -284,8 +287,10 @@ def voxel_object_detection_3d(config_path, model_name=None):
 
             if predict:
                 predictions = detection_learner.infer(point_cloud)
+                tracking_predictions = tracking_learner.infer(predictions)
             else:
                 predictions = []
+                tracking_predictions = []
 
             if len(predictions) > 0:
                 print(
@@ -296,7 +301,7 @@ def voxel_object_detection_3d(config_path, model_name=None):
             t = time.time()
 
             frame_bev_2 = draw_point_cloud_bev(
-                point_cloud.data, predictions, scale, xs, ys
+                point_cloud.data, tracking_predictions, scale, xs, ys
             )
             frame_proj_2 = draw_point_cloud_projected_numpy(
                 point_cloud.data,
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/draw_point_clouds.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/draw_point_clouds.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/draw_point_clouds.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/draw_point_clouds.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/fonts/.gitignore b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/fonts/.gitignore
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/fonts/.gitignore
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/fonts/.gitignore
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/media/demo.png b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/media/demo.png
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/media/demo.png
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/media/demo.png
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/metrics.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/metrics.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/metrics.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/metrics.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/models/.gitignore b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/models/.gitignore
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/models/.gitignore
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/models/.gitignore
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/channel.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/channel.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/channel.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/channel.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/main.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/main.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/main.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/main.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/o3m_lidar.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/o3m_lidar.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/o3m_lidar.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/o3m_lidar.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/structures.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/structures.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/structures.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/o3m_lidar/structures.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/requirements.txt b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/requirements.txt
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/requirements.txt
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/requirements.txt
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/rplidar_processor.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/rplidar_processor.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/rplidar_processor.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/rplidar_processor.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/setup.py b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/setup.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/setup.py
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/setup.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/templates/index.html b/projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/templates/index.html
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/templates/index.html
rename to projects/python/perception/object_detection_3d/demos/voxel_object_detection_3d/templates/index.html
diff --git a/projects/perception/object_tracking_2d/benchmark/.gitignore b/projects/python/perception/object_tracking_2d/benchmark/.gitignore
similarity index 100%
rename from projects/perception/object_tracking_2d/benchmark/.gitignore
rename to projects/python/perception/object_tracking_2d/benchmark/.gitignore
diff --git a/projects/perception/object_tracking_2d/benchmark/benchmark_deep_sort.py b/projects/python/perception/object_tracking_2d/benchmark/benchmark_deep_sort.py
similarity index 98%
rename from projects/perception/object_tracking_2d/benchmark/benchmark_deep_sort.py
rename to projects/python/perception/object_tracking_2d/benchmark/benchmark_deep_sort.py
index 5cd6ce8e836aca689b79508b35fd6cbeadf6e26b..01e0643f3f7754df24a2023f60a6e7ece7dc51ba 100644
--- a/projects/perception/object_tracking_2d/benchmark/benchmark_deep_sort.py
+++ b/projects/python/perception/object_tracking_2d/benchmark/benchmark_deep_sort.py
@@ -29,7 +29,7 @@ logger.setLevel("DEBUG")
 
 
 def benchmark_fair_mot():
-    root_dir = "./projects/perception/object_tracking_2d/benchmark"
+    root_dir = "./projects/python/perception/object_tracking_2d/benchmark"
     temp_dir = root_dir + "/tmp"
     models_dir = root_dir + "/models"
     num_runs = 100
diff --git a/projects/perception/object_tracking_2d/benchmark/benchmark_fair_mot.py b/projects/python/perception/object_tracking_2d/benchmark/benchmark_fair_mot.py
similarity index 97%
rename from projects/perception/object_tracking_2d/benchmark/benchmark_fair_mot.py
rename to projects/python/perception/object_tracking_2d/benchmark/benchmark_fair_mot.py
index 23f205fe790badeab0f6443e70ea1428d1eef390..c94bd23fec7d5e885aca06f2dcd413625c7e4282 100644
--- a/projects/perception/object_tracking_2d/benchmark/benchmark_fair_mot.py
+++ b/projects/python/perception/object_tracking_2d/benchmark/benchmark_fair_mot.py
@@ -26,7 +26,7 @@ logger.setLevel("DEBUG")
 
 
 def benchmark_fair_mot():
-    root_dir = "./projects/perception/object_tracking_2d/benchmark"
+    root_dir = "./projects/python/perception/object_tracking_2d/benchmark"
     temp_dir = root_dir + "/tmp"
     models_dir = root_dir + "/models"
     media_dir = root_dir + "/media"
diff --git a/projects/perception/object_tracking_2d/benchmark/media/000001.jpg b/projects/python/perception/object_tracking_2d/benchmark/media/000001.jpg
similarity index 100%
rename from projects/perception/object_tracking_2d/benchmark/media/000001.jpg
rename to projects/python/perception/object_tracking_2d/benchmark/media/000001.jpg
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/.gitignore b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/.gitignore
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/.gitignore
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/.gitignore
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/README.md b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/README.md
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/README.md
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/README.md
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/data_generators.py b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/data_generators.py
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/data_generators.py
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/data_generators.py
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/demo.py b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/demo.py
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/demo.py
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/demo.py
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/media/video.gif b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/media/video.gif
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/media/video.gif
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/media/video.gif
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/requirements.txt b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/requirements.txt
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/requirements.txt
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/requirements.txt
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/setup.py b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/setup.py
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/setup.py
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/setup.py
diff --git a/projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/templates/index.html b/projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/templates/index.html
similarity index 100%
rename from projects/perception/object_tracking_2d/demos/fair_mot_deep_sort/templates/index.html
rename to projects/python/perception/object_tracking_2d/demos/fair_mot_deep_sort/templates/index.html
diff --git a/projects/perception/object_tracking_3d/benchmark/.gitignore b/projects/python/perception/object_tracking_3d/benchmark/.gitignore
similarity index 100%
rename from projects/perception/object_tracking_3d/benchmark/.gitignore
rename to projects/python/perception/object_tracking_3d/benchmark/.gitignore
diff --git a/projects/perception/object_tracking_3d/benchmark/benchmark_ab3dmot.py b/projects/python/perception/object_tracking_3d/benchmark/benchmark_ab3dmot.py
similarity index 97%
rename from projects/perception/object_tracking_3d/benchmark/benchmark_ab3dmot.py
rename to projects/python/perception/object_tracking_3d/benchmark/benchmark_ab3dmot.py
index f86caa334cd057242eccba0a8bf24a9110cf4889..ec4d6e12c87db3d14e36b4d71f25957f074ccded 100644
--- a/projects/perception/object_tracking_3d/benchmark/benchmark_ab3dmot.py
+++ b/projects/python/perception/object_tracking_3d/benchmark/benchmark_ab3dmot.py
@@ -25,7 +25,7 @@ logger.setLevel("DEBUG")
 
 
 def benchmark_ab3dmot():
-    root_dir = "./projects/perception/object_tracking_3d/benchmark"
+    root_dir = "./projects/python/perception/object_tracking_3d/benchmark"
     media_dir = root_dir + "/media"
     num_runs = 100
 
diff --git a/projects/perception/object_tracking_3d/benchmark/media/0000.txt b/projects/python/perception/object_tracking_3d/benchmark/media/0000.txt
similarity index 100%
rename from projects/perception/object_tracking_3d/benchmark/media/0000.txt
rename to projects/python/perception/object_tracking_3d/benchmark/media/0000.txt
diff --git a/projects/perception/panoptic_segmentation/efficient_ps/README.md b/projects/python/perception/panoptic_segmentation/efficient_ps/README.md
similarity index 100%
rename from projects/perception/panoptic_segmentation/efficient_ps/README.md
rename to projects/python/perception/panoptic_segmentation/efficient_ps/README.md
diff --git a/projects/perception/panoptic_segmentation/efficient_ps/example_usage.py b/projects/python/perception/panoptic_segmentation/efficient_ps/example_usage.py
similarity index 100%
rename from projects/perception/panoptic_segmentation/efficient_ps/example_usage.py
rename to projects/python/perception/panoptic_segmentation/efficient_ps/example_usage.py
diff --git a/projects/perception/semantic_segmentation/bisenet/README.md b/projects/python/perception/semantic_segmentation/bisenet/README.md
similarity index 100%
rename from projects/perception/semantic_segmentation/bisenet/README.md
rename to projects/python/perception/semantic_segmentation/bisenet/README.md
diff --git a/projects/perception/semantic_segmentation/bisenet/eval_demo.py b/projects/python/perception/semantic_segmentation/bisenet/eval_demo.py
similarity index 100%
rename from projects/perception/semantic_segmentation/bisenet/eval_demo.py
rename to projects/python/perception/semantic_segmentation/bisenet/eval_demo.py
diff --git a/projects/perception/semantic_segmentation/bisenet/inference_demo.py b/projects/python/perception/semantic_segmentation/bisenet/inference_demo.py
similarity index 100%
rename from projects/perception/semantic_segmentation/bisenet/inference_demo.py
rename to projects/python/perception/semantic_segmentation/bisenet/inference_demo.py
diff --git a/projects/perception/semantic_segmentation/bisenet/train_demo.py b/projects/python/perception/semantic_segmentation/bisenet/train_demo.py
similarity index 100%
rename from projects/perception/semantic_segmentation/bisenet/train_demo.py
rename to projects/python/perception/semantic_segmentation/bisenet/train_demo.py
diff --git a/projects/perception/skeleton_based_action_recognition/REAMDE.md b/projects/python/perception/skeleton_based_action_recognition/REAMDE.md
similarity index 100%
rename from projects/perception/skeleton_based_action_recognition/REAMDE.md
rename to projects/python/perception/skeleton_based_action_recognition/REAMDE.md
diff --git a/projects/perception/skeleton_based_action_recognition/benchmark/benchmark_stgcn.py b/projects/python/perception/skeleton_based_action_recognition/benchmark/benchmark_stgcn.py
similarity index 100%
rename from projects/perception/skeleton_based_action_recognition/benchmark/benchmark_stgcn.py
rename to projects/python/perception/skeleton_based_action_recognition/benchmark/benchmark_stgcn.py
diff --git a/projects/perception/skeleton_based_action_recognition/demos/demo.py b/projects/python/perception/skeleton_based_action_recognition/demos/demo.py
similarity index 100%
rename from projects/perception/skeleton_based_action_recognition/demos/demo.py
rename to projects/python/perception/skeleton_based_action_recognition/demos/demo.py
diff --git a/projects/perception/skeleton_based_action_recognition/demos/ntu60_labels.csv b/projects/python/perception/skeleton_based_action_recognition/demos/ntu60_labels.csv
similarity index 100%
rename from projects/perception/skeleton_based_action_recognition/demos/ntu60_labels.csv
rename to projects/python/perception/skeleton_based_action_recognition/demos/ntu60_labels.csv
diff --git a/projects/perception/skeleton_based_action_recognition/demos/samples_with_missing_skeletons.txt b/projects/python/perception/skeleton_based_action_recognition/demos/samples_with_missing_skeletons.txt
similarity index 100%
rename from projects/perception/skeleton_based_action_recognition/demos/samples_with_missing_skeletons.txt
rename to projects/python/perception/skeleton_based_action_recognition/demos/samples_with_missing_skeletons.txt
diff --git a/projects/perception/skeleton_based_action_recognition/demos/skeleton_extraction.py b/projects/python/perception/skeleton_based_action_recognition/demos/skeleton_extraction.py
similarity index 100%
rename from projects/perception/skeleton_based_action_recognition/demos/skeleton_extraction.py
rename to projects/python/perception/skeleton_based_action_recognition/demos/skeleton_extraction.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/README.md b/projects/python/perception/slam/full_map_posterior_gmapping/README.md
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/README.md
rename to projects/python/perception/slam/full_map_posterior_gmapping/README.md
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/CMakeLists.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/CMakeLists.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/CMakeLists.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/CMakeLists.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/README.md b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/README.md
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/README.md
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/README.md
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/default.rviz b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/default.rviz
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/default.rviz
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/default.rviz
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/gt_map.rviz b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/gt_map.rviz
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/gt_map.rviz
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/cfg/rviz/gt_map.rviz
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/experiment_real_data.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/gt_map.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/gt_map.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/gt_map.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/gt_map.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/test_computeR.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/test_computeR.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/test_computeR.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/launch/test_computeR.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/err_collector b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/err_collector
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/err_collector
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/err_collector
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/fmp_plot b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/fmp_plot
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/fmp_plot
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/fmp_plot
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/gt_mapping b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/gt_mapping
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/gt_mapping
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/gt_mapping
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/occ_map_saver b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/occ_map_saver
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/occ_map_saver
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/occ_map_saver
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/odom_pose b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/odom_pose
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/odom_pose
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/odom_pose
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/pose_error_calc b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/pose_error_calc
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/pose_error_calc
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/nodes/pose_error_calc
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/package.xml b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/package.xml
similarity index 97%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/package.xml
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/package.xml
index db53efca551ab6cb1f9f0246f1e333694326c657..62d9d562bc41293052280df14d78d63e10708eac 100644
--- a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/package.xml
+++ b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/package.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <package format="2">
   <name>fmp_slam_eval</name>
-  <version>1.0.0</version>
+  <version>1.1.1</version>
   <description>FMP SLAM Evaluation Package</description>
 
   <maintainer email="jose.arce@students.uni-freiburg.de">Jose Arce</maintainer>
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_curves.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_curves.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_curves.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_curves.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_histograms.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_histograms.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_histograms.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/err_histograms.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/method_comparison.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/method_comparison.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/method_comparison.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/scripts/method_comparison.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/setup.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/setup.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/setup.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/setup.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/disc_states.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/disc_states.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/disc_states.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/enums/disc_states.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/error_data_collector.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/error_data_collector.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/error_data_collector.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/error_data_collector.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/fmp_plotter.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/fmp_plotter.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/fmp_plotter.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/fmp_plotter.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ground_truth_mapping.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ground_truth_mapping.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ground_truth_mapping.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ground_truth_mapping.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/map_colorizer.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/map_colorizer.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/map_colorizer.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/map_colorizer.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/net_utils.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/net_utils.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/net_utils.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/net_utils.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/occ_map_saver.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/occ_map_saver.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/occ_map_saver.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/occ_map_saver.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/odom_pose_publisher.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/odom_pose_publisher.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/odom_pose_publisher.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/odom_pose_publisher.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/pose_error_calculator.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/pose_error_calculator.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/pose_error_calculator.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/pose_error_calculator.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ros_launcher.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ros_launcher.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ros_launcher.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/ros_launcher.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/roscore.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/roscore.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/roscore.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/roscore.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_blue.urdf b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_blue.urdf
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_blue.urdf
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_blue.urdf
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_green.urdf b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_green.urdf
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_green.urdf
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_green.urdf
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_red.urdf b/projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_red.urdf
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_red.urdf
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/urdf/simple_robot_red.urdf
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/CMakeLists.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/CMakeLists.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/CMakeLists.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/CMakeLists.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/README.md b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/README.md
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/README.md
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/README.md
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/launch/mapsim2d.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/launch/mapsim2d.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/launch/mapsim2d.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/launch/mapsim2d.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/package.xml b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/package.xml
similarity index 97%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/package.xml
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/package.xml
index 3c47d8d9a4d1911ce466f57e6f8a75c424bd03f2..5a2c18310740fdf8eb18277666fe335712b9a0bc 100644
--- a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/package.xml
+++ b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/package.xml
@@ -1,7 +1,7 @@
 <?xml version="1.0"?>
 <package format="2">
   <name>map_simulator</name>
-  <version>1.0.0</version>
+  <version>1.1.1</version>
   <description>The 2D Map Simulator package for generating datasets for testing and evaluating SLAM algorithms</description>
 
   <maintainer email="jose.arce@students.uni-freiburg.de">Jose Arce</maintainer>
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/rosbags/Robot_10Loop_noisy_3_2pi_180rays.bag b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/rosbags/Robot_10Loop_noisy_3_2pi_180rays.bag
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/rosbags/Robot_10Loop_noisy_3_2pi_180rays.bag
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/rosbags/Robot_10Loop_noisy_3_2pi_180rays.bag
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/Common.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/Common.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/Common.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/Common.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Cell.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Cell.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Cell.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Cell.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l020m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l020m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l020m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l020m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l030m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l030m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l030m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l030m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l040m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l040m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l040m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l040m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l050m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l050m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l050m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l050m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l060m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l060m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l060m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l060m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l070m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l070m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l070m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l070m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l080m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l080m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l080m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l080m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l090m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l090m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l090m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l090m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m_LocOnly.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m_LocOnly.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m_LocOnly.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_1l100m_LocOnly.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l120m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l120m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l120m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l120m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l140m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l140m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l140m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l140m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l160m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l160m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l160m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l160m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l180m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l180m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l180m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l180m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l200m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l200m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l200m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_2l200m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l240m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l240m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l240m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l240m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l270m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l270m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l270m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l270m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l300m.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l300m.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l300m.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_10Loop_3l300m.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_45deg.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_45deg.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_45deg.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_45deg.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_CellTest.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_CellTest.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_CellTest.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_CellTest.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_EOS.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_EOS.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_EOS.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/commands/CMD_EOS.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Cell.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Cell.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Cell.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Cell.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop_window.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop_window.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop_window.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_10Loop_window.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_45deg.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_45deg.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_45deg.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_45deg.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_CellTest.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_CellTest.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_CellTest.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/maps/Map_CellTest.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_1ray.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_1ray.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_1ray.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_1ray.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_2pi_180rays.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_2pi_180rays.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_2pi_180rays.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_2pi_180rays.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_3_2pi_180rays.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_3_2pi_180rays.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_3_2pi_180rays.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_det_3_2pi_180rays.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_1ray.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_1ray.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_1ray.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_1ray.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_2pi_180rays.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_2pi_180rays.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_2pi_180rays.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_2pi_180rays.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_3_2pi_180rays.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_3_2pi_180rays.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_3_2pi_180rays.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Cell_noisy_3_2pi_180rays.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_det_3_2pi_180rays.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_det_3_2pi_180rays.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_det_3_2pi_180rays.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_det_3_2pi_180rays.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_noisy_3_2pi_180rays.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_noisy_3_2pi_180rays.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_noisy_3_2pi_180rays.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_10Loop_noisy_3_2pi_180rays.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_45deg_det_1ray.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_45deg_det_1ray.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_45deg_det_1ray.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_45deg_det_1ray.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_CellTest_det_8ray.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_CellTest_det_8ray.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_CellTest_det_8ray.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_CellTest_det_8ray.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_020m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_020m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_020m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_020m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_030m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_030m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_030m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_030m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_040m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_040m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_040m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_040m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_050m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_050m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_050m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_050m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_060m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_060m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_060m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_060m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_070m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_070m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_070m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_070m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_080m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_080m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_080m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_080m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_090m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_090m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_090m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_090m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_100m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_100m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_100m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_100m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_120m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_120m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_120m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_120m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_140m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_140m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_140m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_140m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_160m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_160m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_160m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_160m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_180m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_180m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_180m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_180m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_200m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_200m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_200m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_200m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_240m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_240m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_240m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_240m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_270m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_270m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_270m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_270m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_300m1loc.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_300m1loc.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_300m1loc.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/robots/Robot_Exp_10Loop_300m1loc.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_2pi.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_2pi.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_2pi.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_2pi.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_3_2pi.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_3_2pi.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_3_2pi.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_180Ray_3_2pi.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_1Ray.json b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_1Ray.json
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_1Ray.json
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scenarios/sensors/Sensor_1Ray.json
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scripts/mapsim2d.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scripts/mapsim2d.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/scripts/mapsim2d.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/scripts/mapsim2d.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/setup.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/setup.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/setup.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/setup.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/__init__.py
diff --git a/projects/perception/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/__init__.py
similarity index 100%
rename from projects/perception/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/closed_shape_2D.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/closed_shape_2D.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/closed_shape_2D.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/closed_shape_2D.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/line.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/line.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/line.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/line.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/polygon.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/polygon.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/polygon.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/polygon.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/pose.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/pose.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/pose.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/primitives/pose.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/transform.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/transform.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/transform.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/transform.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/obstacle.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/obstacle.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/obstacle.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/obstacle.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/polygonal_obstacle.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/polygonal_obstacle.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/polygonal_obstacle.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_obstacles/polygonal_obstacle.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_simulator_2d.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_simulator_2d.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_simulator_2d.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/map_simulator_2d.py
diff --git a/projects/perception/activity_recognition/demos/online_recognition/activity_recognition/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/__init__.py
similarity index 100%
rename from projects/perception/activity_recognition/demos/online_recognition/activity_recognition/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/command.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/command.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/command.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/command.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/bool_msg_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/bool_msg_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/bool_msg_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/bool_msg_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/message_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/message_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/message_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/message/message_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/comment_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/comment_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/comment_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/comment_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/misc_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/misc_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/misc_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/misc_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/scan_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/scan_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/scan_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/scan_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/sleep_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/sleep_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/sleep_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/misc/sleep_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/__init__.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/__init__.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_circular_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_circular_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_circular_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_circular_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_interpol_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_interpol_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_interpol_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_interpol_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_linear_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_linear_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_linear_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_linear_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_pose_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_pose_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_pose_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_pose_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_rotation_cmd.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_rotation_cmd.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_rotation_cmd.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/move/move_rotation_cmd.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/utils.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/utils.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/utils.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/utils.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CHANGELOG.rst b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CHANGELOG.rst
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CHANGELOG.rst
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CHANGELOG.rst
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CMakeLists.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CMakeLists.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CMakeLists.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/CMakeLists.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/TODO.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/TODO.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/TODO.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/TODO.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.app b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.app
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.app
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.app
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.generic-shared-object b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.generic-shared-object
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.generic-shared-object
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.generic-shared-object
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.subdirs b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.subdirs
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.subdirs
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/Makefile.subdirs
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/generate_shared_object b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/generate_shared_object
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/generate_shared_object
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/generate_shared_object
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/message b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/message
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/message
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/message
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/pretty_compiler b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/pretty_compiler
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/pretty_compiler
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/pretty_compiler
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/testlib b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/testlib
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/testlib
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/build_tools/testlib
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/configfile_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/test.ini b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/test.ini
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/test.ini
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configfile/test.ini
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configure b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configure
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configure
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/configure
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/Instructions.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/Instructions.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/Instructions.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/Instructions.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/scanmatcher.tex b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/scanmatcher.tex
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/scanmatcher.tex
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/scanmatcher.tex
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/userver.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/userver.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/userver.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/docs/userver.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/graphmap.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/graphmap.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/graphmap.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/graphmap.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/map_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/map_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/map_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/grid/map_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2log.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2log.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2log.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2log.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2neff.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2neff.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2neff.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2neff.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2rec.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2rec.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2rec.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfs2rec.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfsreader.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfsreader.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfsreader.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gfsreader.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor_tree.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor_tree.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor_tree.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/gridslamprocessor_tree.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/motionmodel.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/motionmodel.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/motionmodel.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/gridfastslam/motionmodel.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/configfile/configfile.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/configfile/configfile.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/configfile/configfile.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/configfile/configfile.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/accessstate.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/accessstate.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/accessstate.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/accessstate.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/array2d.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/array2d.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/array2d.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/array2d.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/harray2d.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/harray2d.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/harray2d.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/harray2d.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/map.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/map.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/map.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/grid/map.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gfsreader.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gfsreader.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gfsreader.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gfsreader.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.hxx b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.hxx
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.hxx
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/gridslamprocessor.hxx
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/motionmodel.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/motionmodel.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/motionmodel.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/gridfastslam/motionmodel.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/carmenconfiguration.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/carmenconfiguration.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/carmenconfiguration.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/carmenconfiguration.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/configuration.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/configuration.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/configuration.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/configuration.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorlog.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorlog.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorlog.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorlog.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorstream.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorstream.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorstream.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/log/sensorstream.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/particlefilter.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/particlefilter.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/particlefilter.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/particlefilter.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/pf.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/pf.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/pf.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/particlefilter/pf.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/eig3.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/eig3.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/eig3.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/eig3.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/gridlinetraversal.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/gridlinetraversal.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/gridlinetraversal.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/gridlinetraversal.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/icp.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/icp.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/icp.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/icp.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcher.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcher.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcher.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcher.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcherprocessor.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcherprocessor.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcherprocessor.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/scanmatcherprocessor.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/smmap.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/smmap.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/smmap.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/scanmatcher/smmap.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensor.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensor.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensor.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensor.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensoreading.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensoreading.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensoreading.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensoreading.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensorreading.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensorreading.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensorreading.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_base/sensorreading.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometryreading.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometryreading.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometryreading.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometryreading.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometrysensor.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometrysensor.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometrysensor.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_odometry/odometrysensor.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangereading.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangereading.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangereading.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangereading.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangesensor.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangesensor.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangesensor.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/sensor/sensor_range/rangesensor.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/autoptr.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/autoptr.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/autoptr.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/autoptr.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/commandline.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/commandline.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/commandline.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/commandline.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/gvalues.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/gvalues.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/gvalues.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/gvalues.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/macro_params.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/macro_params.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/macro_params.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/macro_params.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/movement.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/movement.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/movement.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/movement.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/point.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/point.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/point.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/point.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/stat.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/stat.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/stat.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include/gmapping/utils/stat.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/carmenconfiguration.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/carmenconfiguration.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/carmenconfiguration.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/carmenconfiguration.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/configuration.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/configuration.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/configuration.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/configuration.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_plot.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_plot.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_plot.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_plot.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/log_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/rdk2carmen.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/rdk2carmen.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/rdk2carmen.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/rdk2carmen.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/scanstudio2carmen.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/scanstudio2carmen.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/scanstudio2carmen.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/scanstudio2carmen.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorlog.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorlog.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorlog.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorlog.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorstream.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorstream.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorstream.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/log/sensorstream.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk-template b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk-template
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk-template
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/manual.mk-template
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/package.xml b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/package.xml
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/package.xml
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/package.xml
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/eig3.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/eig3.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/eig3.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/eig3.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/icptest.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/icptest.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/icptest.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/icptest.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/line_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/line_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/line_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/line_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatch_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatch_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatch_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatch_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.new.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.new.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.new.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcher.new.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcherprocessor.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcherprocessor.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcherprocessor.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/scanmatcherprocessor.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/smmap.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/smmap.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/smmap.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/scanmatcher/smmap.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensor.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensor.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensor.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensor.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensorreading.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensorreading.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensorreading.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_base/sensorreading.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometryreading.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometryreading.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometryreading.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometryreading.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometrysensor.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometrysensor.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometrysensor.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_odometry/odometrysensor.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangereading.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangereading.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangereading.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangereading.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangesensor.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangesensor.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangesensor.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/sensor/sensor_range/rangesensor.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/setlibpath b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/setlibpath
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/setlibpath
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/setlibpath
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/Makefile b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/Makefile
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/Makefile
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/Makefile
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/autoptr_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/autoptr_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/autoptr_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/autoptr_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/movement.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/movement.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/movement.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/movement.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat_test.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat_test.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat_test.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/utils/stat_test.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/.gitignore b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/.gitignore
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/.gitignore
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/.gitignore
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/README.md b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/README.md
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/README.md
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/README.md
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CHANGELOG.rst b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CHANGELOG.rst
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CHANGELOG.rst
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CHANGELOG.rst
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CMakeLists.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CMakeLists.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CMakeLists.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/CMakeLists.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/launch/slam_gmapping_pr2.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/launch/slam_gmapping_pr2.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/launch/slam_gmapping_pr2.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/launch/slam_gmapping_pr2.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/doubleMap.msg b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/doubleMap.msg
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/doubleMap.msg
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/doubleMap.msg
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/mapModel.msg b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/mapModel.msg
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/mapModel.msg
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/msg/mapModel.msg
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/nodelet_plugins.xml b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/nodelet_plugins.xml
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/nodelet_plugins.xml
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/nodelet_plugins.xml
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/package.xml b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/package.xml
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/package.xml
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/package.xml
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/main.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/main.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/main.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/main.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/nodelet.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/nodelet.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/nodelet.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/nodelet.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/replay.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/replay.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/replay.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/replay.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.h b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.h
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.h
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/src/slam_gmapping.h
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_laser_different_beamcount.test b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_laser_different_beamcount.test
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_laser_different_beamcount.test
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_laser_different_beamcount.test
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay2.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay2.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay2.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_stage_replay2.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_symmetry.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_symmetry.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_symmetry.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_symmetry.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_upside_down.launch b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_upside_down.launch
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_upside_down.launch
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/basic_localization_upside_down.launch
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/rtest.cpp b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/rtest.cpp
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/rtest.cpp
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/rtest.cpp
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/test_map.py b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/test_map.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/test_map.py
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/gmapping/test/test_map.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CHANGELOG.rst b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CHANGELOG.rst
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CHANGELOG.rst
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CHANGELOG.rst
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CMakeLists.txt b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CMakeLists.txt
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CMakeLists.txt
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/CMakeLists.txt
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/package.xml b/projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/package.xml
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/package.xml
rename to projects/python/perception/slam/full_map_posterior_gmapping/src/slam_gmapping/slam_gmapping/package.xml
diff --git a/projects/perception/speech_command_recognition/README.MD b/projects/python/perception/speech_command_recognition/README.MD
similarity index 100%
rename from projects/perception/speech_command_recognition/README.MD
rename to projects/python/perception/speech_command_recognition/README.MD
diff --git a/projects/perception/speech_command_recognition/demo.py b/projects/python/perception/speech_command_recognition/demo.py
similarity index 100%
rename from projects/perception/speech_command_recognition/demo.py
rename to projects/python/perception/speech_command_recognition/demo.py
diff --git a/projects/perception/speech_command_recognition/example1.wav b/projects/python/perception/speech_command_recognition/example1.wav
similarity index 100%
rename from projects/perception/speech_command_recognition/example1.wav
rename to projects/python/perception/speech_command_recognition/example1.wav
diff --git a/projects/perception/speech_command_recognition/example2.wav b/projects/python/perception/speech_command_recognition/example2.wav
similarity index 100%
rename from projects/perception/speech_command_recognition/example2.wav
rename to projects/python/perception/speech_command_recognition/example2.wav
diff --git a/projects/simulation/SMPL+D_human_models/README.md b/projects/python/simulation/SMPL+D_human_models/README.md
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/README.md
rename to projects/python/simulation/SMPL+D_human_models/README.md
diff --git a/projects/simulation/SMPL+D_human_models/examples/model_1.png b/projects/python/simulation/SMPL+D_human_models/examples/model_1.png
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/examples/model_1.png
rename to projects/python/simulation/SMPL+D_human_models/examples/model_1.png
diff --git a/projects/simulation/SMPL+D_human_models/examples/model_2.png b/projects/python/simulation/SMPL+D_human_models/examples/model_2.png
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/examples/model_2.png
rename to projects/python/simulation/SMPL+D_human_models/examples/model_2.png
diff --git a/projects/simulation/SMPL+D_human_models/examples/model_3.png b/projects/python/simulation/SMPL+D_human_models/examples/model_3.png
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/examples/model_3.png
rename to projects/python/simulation/SMPL+D_human_models/examples/model_3.png
diff --git a/projects/simulation/SMPL+D_human_models/examples/model_4.png b/projects/python/simulation/SMPL+D_human_models/examples/model_4.png
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/examples/model_4.png
rename to projects/python/simulation/SMPL+D_human_models/examples/model_4.png
diff --git a/projects/simulation/SMPL+D_human_models/src/download_data.py b/projects/python/simulation/SMPL+D_human_models/src/download_data.py
similarity index 93%
rename from projects/simulation/SMPL+D_human_models/src/download_data.py
rename to projects/python/simulation/SMPL+D_human_models/src/download_data.py
index 4dc42bf2e9cc30f0b6c1afc37f29f3d83116b142..ddf4a2c51e22f0f7b36e5f8eb6153e310916f821 100644
--- a/projects/simulation/SMPL+D_human_models/src/download_data.py
+++ b/projects/python/simulation/SMPL+D_human_models/src/download_data.py
@@ -44,13 +44,13 @@ def download_data(raw_data_only):
             )
 
     human_data_url = OPENDR_SERVER_URL + "simulation/SMPLD_body_models/human_models.tar.gz"
-    downloaded_human_data_path = os.path.join(OPENDR_HOME, 'projects/simulation/SMPL+D_human_models/human_models.tar.gz')
+    downloaded_human_data_path = os.path.join(OPENDR_HOME, 'projects/python/simulation/SMPL+D_human_models/human_models.tar.gz')
     print("Downloading data from", human_data_url, "to", downloaded_human_data_path)
     start_time = 0
     last_print = 0
     urlretrieve(human_data_url, downloaded_human_data_path, reporthook=reporthook)
     with tarfile.open(downloaded_human_data_path) as tar:
-        tar.extractall(path=os.path.join(OPENDR_HOME, 'projects/simulation/SMPL+D_human_models'))
+        tar.extractall(path=os.path.join(OPENDR_HOME, 'projects/python/simulation/SMPL+D_human_models'))
     tar.close()
     os.remove(downloaded_human_data_path)
 
@@ -58,13 +58,13 @@ def download_data(raw_data_only):
         return
 
     model_url = OPENDR_SERVER_URL + "simulation/SMPLD_body_models/model.tar.gz"
-    downloaded_model_path = os.path.join(OPENDR_HOME, 'projects/simulation/SMPL+D_human_models/model.tar.gz')
+    downloaded_model_path = os.path.join(OPENDR_HOME, 'projects/python/simulation/SMPL+D_human_models/model.tar.gz')
     print("Downloading data from", model_url, "to", downloaded_model_path)
     start_time = 0
     last_print = 0
     urlretrieve(model_url, downloaded_model_path, reporthook=reporthook)
     with tarfile.open(downloaded_model_path) as tar:
-        tar.extractall(path=os.path.join(OPENDR_HOME, 'projects/simulation/SMPL+D_human_models'))
+        tar.extractall(path=os.path.join(OPENDR_HOME, 'projects/python/simulation/SMPL+D_human_models'))
     tar.close()
     os.remove(downloaded_model_path)
 
diff --git a/projects/simulation/SMPL+D_human_models/src/generate_models.py b/projects/python/simulation/SMPL+D_human_models/src/generate_models.py
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/src/generate_models.py
rename to projects/python/simulation/SMPL+D_human_models/src/generate_models.py
diff --git a/projects/simulation/SMPL+D_human_models/webots/extract_anims.py b/projects/python/simulation/SMPL+D_human_models/webots/extract_anims.py
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/extract_anims.py
rename to projects/python/simulation/SMPL+D_human_models/webots/extract_anims.py
diff --git a/projects/simulation/SMPL+D_human_models/webots/install_project.sh b/projects/python/simulation/SMPL+D_human_models/webots/install_project.sh
similarity index 55%
rename from projects/simulation/SMPL+D_human_models/webots/install_project.sh
rename to projects/python/simulation/SMPL+D_human_models/webots/install_project.sh
index ee1cf9392566d435d83c21a112e29b13dd038aad..6ce95c86faf2a8b14df8e84750eae6a148c3fe81 100644
--- a/projects/simulation/SMPL+D_human_models/webots/install_project.sh
+++ b/projects/python/simulation/SMPL+D_human_models/webots/install_project.sh
@@ -7,7 +7,7 @@ cd $WEBOTS_HOME/projects/smpl_webots/controllers/smpl_animation
 make
 mkdir $WEBOTS_HOME/projects/smpl_webots/skins
 mkdir $WEBOTS_HOME/projects/smpl_webots/skins/model-204
-cp $OPENDR_HOME/projects/simulation/SMPL+D_human_models/fbx_models/female/204_0/204_0.fbx $WEBOTS_HOME/projects/smpl_webots/skins/model-204/model-204.fbx
+cp $OPENDR_HOME/projects/python/simulation/SMPL+D_human_models/fbx_models/female/204_0/204_0.fbx $WEBOTS_HOME/projects/smpl_webots/skins/model-204/model-204.fbx
 mkdir $WEBOTS_HOME/projects/smpl_webots/protos/textures
 mkdir $WEBOTS_HOME/projects/smpl_webots/protos/textures/model-204
-cp $OPENDR_HOME/projects/simulation/SMPL+D_human_models/fbx_models/female/204_0/texture.jpg $WEBOTS_HOME/projects/smpl_webots/protos/textures/model-204/texture.jpg
+cp $OPENDR_HOME/projects/python/simulation/SMPL+D_human_models/fbx_models/female/204_0/texture.jpg $WEBOTS_HOME/projects/smpl_webots/protos/textures/model-204/texture.jpg
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/Makefile b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/Makefile
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/Makefile
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/Makefile
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/smpl_animation.c b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/smpl_animation.c
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/smpl_animation.c
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/controllers/smpl_animation/smpl_animation.c
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/Makefile b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/Makefile
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/Makefile
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/Makefile
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/quaternion_private.h b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/quaternion_private.h
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/quaternion_private.h
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/quaternion_private.h
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/smpl_util.h b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/smpl_util.h
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/smpl_util.h
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/smpl_util.h
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/vector3_private.h b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/vector3_private.h
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/vector3_private.h
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/include/vector3_private.h
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/quaternion.c b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/quaternion.c
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/quaternion.c
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/quaternion.c
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/smpl_util.c b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/smpl_util.c
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/smpl_util.c
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/smpl_util.c
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/vector3.c b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/vector3.c
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/vector3.c
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/libraries/smpl_util/src/vector3.c
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/protos/smpl_model_1.proto b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/protos/smpl_model_1.proto
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/protos/smpl_model_1.proto
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/protos/smpl_model_1.proto
diff --git a/projects/simulation/SMPL+D_human_models/webots/smpl_webots/worlds/demo_world.wbt b/projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/worlds/demo_world.wbt
similarity index 100%
rename from projects/simulation/SMPL+D_human_models/webots/smpl_webots/worlds/demo_world.wbt
rename to projects/python/simulation/SMPL+D_human_models/webots/smpl_webots/worlds/demo_world.wbt
diff --git a/projects/simulation/human_dataset_generation/README.md b/projects/python/simulation/human_dataset_generation/README.md
similarity index 100%
rename from projects/simulation/human_dataset_generation/README.md
rename to projects/python/simulation/human_dataset_generation/README.md
diff --git a/projects/simulation/human_dataset_generation/background.py b/projects/python/simulation/human_dataset_generation/background.py
similarity index 100%
rename from projects/simulation/human_dataset_generation/background.py
rename to projects/python/simulation/human_dataset_generation/background.py
diff --git a/projects/simulation/human_dataset_generation/create_background_images.py b/projects/python/simulation/human_dataset_generation/create_background_images.py
similarity index 100%
rename from projects/simulation/human_dataset_generation/create_background_images.py
rename to projects/python/simulation/human_dataset_generation/create_background_images.py
diff --git a/projects/simulation/human_dataset_generation/create_dataset.py b/projects/python/simulation/human_dataset_generation/create_dataset.py
similarity index 100%
rename from projects/simulation/human_dataset_generation/create_dataset.py
rename to projects/python/simulation/human_dataset_generation/create_dataset.py
diff --git a/projects/simulation/human_dataset_generation/data_generator.py b/projects/python/simulation/human_dataset_generation/data_generator.py
similarity index 100%
rename from projects/simulation/human_dataset_generation/data_generator.py
rename to projects/python/simulation/human_dataset_generation/data_generator.py
diff --git a/projects/simulation/human_dataset_generation/dependencies.ini b/projects/python/simulation/human_dataset_generation/dependencies.ini
similarity index 100%
rename from projects/simulation/human_dataset_generation/dependencies.ini
rename to projects/python/simulation/human_dataset_generation/dependencies.ini
diff --git a/projects/simulation/human_dataset_generation/download_models.sh b/projects/python/simulation/human_dataset_generation/download_models.sh
similarity index 100%
rename from projects/simulation/human_dataset_generation/download_models.sh
rename to projects/python/simulation/human_dataset_generation/download_models.sh
diff --git a/projects/simulation/human_dataset_generation/reformat_cityscapes.py b/projects/python/simulation/human_dataset_generation/reformat_cityscapes.py
similarity index 100%
rename from projects/simulation/human_dataset_generation/reformat_cityscapes.py
rename to projects/python/simulation/human_dataset_generation/reformat_cityscapes.py
diff --git a/projects/simulation/human_model_generation/README.md b/projects/python/simulation/human_model_generation/README.md
similarity index 100%
rename from projects/simulation/human_model_generation/README.md
rename to projects/python/simulation/human_model_generation/README.md
diff --git a/projects/simulation/human_model_generation/demos/imgs_input/msk/result_0004.jpg b/projects/python/simulation/human_model_generation/demos/imgs_input/msk/result_0004.jpg
similarity index 100%
rename from projects/simulation/human_model_generation/demos/imgs_input/msk/result_0004.jpg
rename to projects/python/simulation/human_model_generation/demos/imgs_input/msk/result_0004.jpg
diff --git a/projects/simulation/human_model_generation/demos/imgs_input/rgb/result_0004.jpg b/projects/python/simulation/human_model_generation/demos/imgs_input/rgb/result_0004.jpg
similarity index 100%
rename from projects/simulation/human_model_generation/demos/imgs_input/rgb/result_0004.jpg
rename to projects/python/simulation/human_model_generation/demos/imgs_input/rgb/result_0004.jpg
diff --git a/projects/simulation/human_model_generation/demos/model_generation.ipynb b/projects/python/simulation/human_model_generation/demos/model_generation.ipynb
similarity index 100%
rename from projects/simulation/human_model_generation/demos/model_generation.ipynb
rename to projects/python/simulation/human_model_generation/demos/model_generation.ipynb
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/README.md b/projects/python/simulation/synthetic_multi_view_facial_image_generation/README.md
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/README.md
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/README.md
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/SyntheticDataGeneration.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/SyntheticDataGeneration.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/SyntheticDataGeneration.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/SyntheticDataGeneration.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/bfm_show.m b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/bfm_show.m
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/bfm_show.m
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/bfm_show.m
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_noneck.jpg b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_noneck.jpg
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_noneck.jpg
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_noneck.jpg
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_refine.jpg b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_refine.jpg
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_refine.jpg
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/imgs/bfm_refine.jpg
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/readme.md b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/readme.md
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/readme.md
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/readme.md
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/render_face_mesh.m b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/render_face_mesh.m
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/render_face_mesh.m
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/BFM_Remove_Neck/render_face_mesh.m
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/LICENSE b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/LICENSE
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/LICENSE
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/LICENSE
diff --git a/projects/perception/lightweight_open_pose/jetbot/utils/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/__init__.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/utils/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/__init__.py
diff --git a/projects/opendr_ws/src/ros_bridge/msg/.keep b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/example/Images/.keep
similarity index 100%
rename from projects/opendr_ws/src/ros_bridge/msg/.keep
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/example/Images/.keep
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/mobilenet_v1.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/mobilenet_v1.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/mobilenet_v1.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/mobilenet_v1.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_1.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_1.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_1.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_1.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_2.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_2.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_2.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/preprocessing_2.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/simple_dataset.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/simple_dataset.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/simple_dataset.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/simple_dataset.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/test.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/test.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/test.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/test.py
diff --git a/projects/perception/object_detection_3d/demos/voxel_object_detection_3d/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/__init__.py
similarity index 100%
rename from projects/perception/object_detection_3d/demos/voxel_object_detection_3d/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cv_plot.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cv_plot.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cv_plot.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cv_plot.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.cpp b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.cpp
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.cpp
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.cpp
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.h b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.h
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.h
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core.h
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cp37-win_amd64.pyd b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cp37-win_amd64.pyd
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cp37-win_amd64.pyd
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cp37-win_amd64.pyd
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cpp b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cpp
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cpp
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.cpp
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.pyx b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.pyx
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.pyx
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/mesh_core_cython.pyx
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/readme.md b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/readme.md
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/readme.md
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/readme.md
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/setup.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/setup.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/setup.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/cython/setup.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/ddfa.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/ddfa.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/ddfa.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/ddfa.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/estimate_pose.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/estimate_pose.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/estimate_pose.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/estimate_pose.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/inference.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/inference.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/inference.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/inference.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/io.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/io.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/io.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/io.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/lighting.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/lighting.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/lighting.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/lighting.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/paf.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/paf.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/paf.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/paf.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/params.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/params.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/params.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/params.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/path_helper.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/path_helper.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/path_helper.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/path_helper.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/render.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/render.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/render.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/utils/render.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/readme.md b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/readme.md
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/readme.md
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/readme.md
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_demo.m b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_demo.m
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_demo.m
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_demo.m
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_face_mesh.m b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_face_mesh.m
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_face_mesh.m
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/render_face_mesh.m
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/tri.mat b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/tri.mat
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/tri.mat
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/DDFA/visualize/tri.mat
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/LICENSE b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/LICENSE
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/LICENSE
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/LICENSE
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/fmp_slam_eval/src/fmp_slam_eval/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/allface_dataset.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/allface_dataset.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/allface_dataset.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/allface_dataset.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/base_dataset.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/base_dataset.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/base_dataset.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/base_dataset.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/curve.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/curve.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/curve.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/curve.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/data_utils.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/data_utils.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/data_utils.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/data/data_utils.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/test.sh b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/test.sh
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/test.sh
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/test.sh
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/train.sh b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/train.sh
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/train.sh
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/train.sh
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/v100_test.sh b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/v100_test.sh
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/v100_test.sh
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/experiments/v100_test.sh
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/architecture.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/architecture.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/architecture.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/architecture.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/base_network.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/base_network.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/base_network.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/base_network.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/discriminator.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/discriminator.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/discriminator.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/discriminator.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/encoder.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/encoder.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/encoder.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/encoder.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/generator.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/generator.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/generator.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/generator.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/loss.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/loss.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/loss.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/loss.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/normalization.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/normalization.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/normalization.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/normalization.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/render.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/render.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/render.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/render.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/rotate_render.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/rotate_render.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/rotate_render.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/rotate_render.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm_reimpl.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm_reimpl.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm_reimpl.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/batchnorm_reimpl.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/comm.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/comm.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/comm.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/comm.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/replicate.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/replicate.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/replicate.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/replicate.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/scatter_gather.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/unittest.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/unittest.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/unittest.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/sync_batchnorm/unittest.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/test_render.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/test_render.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/test_render.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/test_render.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/util.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/util.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/util.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/networks/util.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotate_model.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotate_model.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotate_model.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotate_model.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotatespade_model.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotatespade_model.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotatespade_model.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/rotatespade_model.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/test_model.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/test_model.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/test_model.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/models/test_model.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/base_options.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/base_options.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/base_options.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/base_options.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/test_options.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/test_options.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/test_options.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/test_options.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/train_options.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/train_options.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/train_options.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/options/train_options.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_frontal.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_frontal.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_frontal.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_frontal.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_multipose.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_multipose.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_multipose.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/test_multipose.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/train.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/train.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/train.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/train.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/__init__.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotate_trainer.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotate_trainer.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotate_trainer.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotate_trainer.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotatespade_trainer.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotatespade_trainer.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotatespade_trainer.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/trainers/rotatespade_trainer.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/__init__.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/__init__.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/__init__.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/html.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/html.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/html.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/html.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/iter_counter.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/iter_counter.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/iter_counter.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/iter_counter.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/util.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/util.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/util.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/util.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/visualizer.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/visualizer.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/visualizer.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm/Rotate_and_Render/util/visualizer.py
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/demos/imgs_input/person01145+0-15.jpg b/projects/python/simulation/synthetic_multi_view_facial_image_generation/demos/imgs_input/person01145+0-15.jpg
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/demos/imgs_input/person01145+0-15.jpg
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/demos/imgs_input/person01145+0-15.jpg
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/requirements.txt b/projects/python/simulation/synthetic_multi_view_facial_image_generation/requirements.txt
similarity index 88%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/requirements.txt
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/requirements.txt
index 3f74a7cee46fd7ac730525f4536e9ad7e2fe4921..518c4a9a3c2a6c531b9a757d3bb8851c7438146b 100644
--- a/projects/data_generation/synthetic_multi_view_facial_image_generation/requirements.txt
+++ b/projects/python/simulation/synthetic_multi_view_facial_image_generation/requirements.txt
@@ -2,7 +2,7 @@ torch>=1.0.0
 torchvision
 dominate>=2.3.1
 dill
-scikit-image
+scikit-image>0.16.2
 numpy>=1.15.4
 scipy>=1.1.0
 matplotlib>=2.2.2
diff --git a/projects/data_generation/synthetic_multi_view_facial_image_generation/tool_synthetic_facial_generation.py b/projects/python/simulation/synthetic_multi_view_facial_image_generation/tool_synthetic_facial_generation.py
similarity index 100%
rename from projects/data_generation/synthetic_multi_view_facial_image_generation/tool_synthetic_facial_generation.py
rename to projects/python/simulation/synthetic_multi_view_facial_image_generation/tool_synthetic_facial_generation.py
diff --git a/projects/utils/hyperparameter_tuner/hyperparameter_tuner_demo.py b/projects/python/utils/hyperparameter_tuner/hyperparameter_tuner_demo.py
similarity index 100%
rename from projects/utils/hyperparameter_tuner/hyperparameter_tuner_demo.py
rename to projects/python/utils/hyperparameter_tuner/hyperparameter_tuner_demo.py
diff --git a/projects/utils/hyperparameter_tuner/hyperparameter_tuning_tutorial.ipynb b/projects/python/utils/hyperparameter_tuner/hyperparameter_tuning_tutorial.ipynb
similarity index 100%
rename from projects/utils/hyperparameter_tuner/hyperparameter_tuning_tutorial.ipynb
rename to projects/python/utils/hyperparameter_tuner/hyperparameter_tuning_tutorial.ipynb
diff --git a/src/c_api/face_recognition.cpp b/src/c_api/face_recognition.cpp
index 2449baca18fe0f5267f8b5b5a67d9b9e43751832..9ed33b2ba118a4d59068939f4922b6add60e5891 100644
--- a/src/c_api/face_recognition.cpp
+++ b/src/c_api/face_recognition.cpp
@@ -82,9 +82,9 @@ std::string json_get_key_string(std::string json, const std::string &key) {
   std::size_t start_idx = json.find(key);
   std::string value = json.substr(start_idx);
   value = value.substr(value.find(":") + 1);
-  value = value.substr(0, value.find(","));
+  value.resize(value.find(","));
   value = value.substr(value.find("\"") + 1);
-  value = value.substr(0, value.find("\""));
+  value.resize(value.find("\""));
   return value;
 }
 
@@ -116,7 +116,7 @@ void load_face_recognition_model(const char *model_path, face_recognition_model_
   std::string basepath = model_json_path.substr(0, split_pos);
   split_pos = basepath.find_last_of("/");
   split_pos = split_pos > 0 ? split_pos + 1 : 0;
-  basepath = basepath.substr(0, split_pos);
+  basepath.resize(split_pos);
 
   // Parse JSON
   std::string onnx_model_path = basepath + json_get_key_string(str, "model_paths");
@@ -289,20 +289,20 @@ void build_database_face_recognition(const char *database_folder, const char *ou
   // Write number of persons
   int n = person_names.size();
 
-  fout.write(static_cast<char *>(&n), sizeof(int));
+  fout.write(reinterpret_cast<char *>(&n), sizeof(int));
   for (int i = 0; i < n; i++) {
     // Write the name of the person (along with its size)
     int name_length = person_names[i].size() + 1;
-    fout.write(static_cast<char *>(&name_length), sizeof(int));
+    fout.write(reinterpret_cast<char *>(&name_length), sizeof(int));
     fout.write(person_names[i].c_str(), name_length);
   }
 
   cv::Size s = database_out.size();
 
-  fout.write(static_cast<char *>(&s.height), sizeof(int));
-  fout.write(static_cast<char *>(&s.width), sizeof(int));
-  fout.write(static_cast<char *>(database_out.data), sizeof(float) * s.height * s.width);
-  fout.write(static_cast<char *>(&database_ids[0]), sizeof(int) * s.height);
+  fout.write(reinterpret_cast<char *>(&s.height), sizeof(int));
+  fout.write(reinterpret_cast<char *>(&s.width), sizeof(int));
+  fout.write(reinterpret_cast<char *>(database_out.data), sizeof(float) * s.height * s.width);
+  fout.write(reinterpret_cast<char *>(&database_ids[0]), sizeof(int) * s.height);
   fout.flush();
   fout.close();
 }
@@ -318,14 +318,14 @@ void load_database_face_recognition(const char *database_path, face_recognition_
     return;
   }
   int n;
-  fin.read(static_cast<char *>(&n), sizeof(int));
+  fin.read(reinterpret_cast<char *>(&n), sizeof(int));
   char **person_names = new char *[n];
 
   for (int i = 0; i < n; i++) {
     person_names[i] = new char[512];
     // Read person name
     int name_length;
-    fin.read(static_cast<char *>(&name_length), sizeof(int));
+    fin.read(reinterpret_cast<char *>(&name_length), sizeof(int));
     if (name_length > 512) {
       std::cerr << "Person name exceeds max number of characters (512)" << std::endl;
       return;
@@ -334,13 +334,13 @@ void load_database_face_recognition(const char *database_path, face_recognition_
   }
 
   int height, width;
-  fin.read(static_cast<char *>(&height), sizeof(int));
-  fin.read(static_cast<char *>(&width), sizeof(int));
+  fin.read(reinterpret_cast<char *>(&height), sizeof(int));
+  fin.read(reinterpret_cast<char *>(&width), sizeof(int));
 
   float *database_buff = new float[height * width];
   int *features_ids = new int[height];
-  fin.read(static_cast<char *>(database_buff), sizeof(float) * height * width);
-  fin.read(static_cast<char *>(features_ids), sizeof(int) * height);
+  fin.read(reinterpret_cast<char *>(database_buff), sizeof(float) * height * width);
+  fin.read(reinterpret_cast<char *>(features_ids), sizeof(int) * height);
 
   fin.close();
 
diff --git a/src/opendr/_version.py b/src/opendr/_version.py
index c98a58882380c9ec15c4a577d7684df8fa9fba3e..15ea2c3dc38d44586343efb9da4408975b1843de 100644
--- a/src/opendr/_version.py
+++ b/src/opendr/_version.py
@@ -12,4 +12,4 @@
 # See the License for the specific language governing permissions and
 # limitations under the License.
 
-__version__ = "1.0.0"
+__version__ = "1.1.1"
diff --git a/src/opendr/control/mobile_manipulation/install_mobile_manipulation.sh b/src/opendr/control/mobile_manipulation/install_mobile_manipulation.sh
index 48378d4e5f966f038958ffc645b45b392f908f1a..5eb24caa0acdfe868ca72b90eca34fae7f83975e 100755
--- a/src/opendr/control/mobile_manipulation/install_mobile_manipulation.sh
+++ b/src/opendr/control/mobile_manipulation/install_mobile_manipulation.sh
@@ -11,7 +11,7 @@ if [[ -z "$ROS_DISTRO" ]]; then
 fi
 
 MODULE_PATH=${OPENDR_HOME}/src/opendr/control/mobile_manipulation
-WS_PATH=${OPENDR_HOME}/projects/control/mobile_manipulation/mobile_manipulation_ws
+WS_PATH=${OPENDR_HOME}/projects/python/control/mobile_manipulation/mobile_manipulation_ws
 
 ## ROS
 sudo apt-get update && sudo apt-get install -y \
diff --git a/src/opendr/control/single_demo_grasp/Makefile b/src/opendr/control/single_demo_grasp/Makefile
index 0b50a1bf76c23f0603eaa6053bf83c194b3b8a83..e9ab064cf3cd3b61a5f3a1c759b8ef66de6de2df 100644
--- a/src/opendr/control/single_demo_grasp/Makefile
+++ b/src/opendr/control/single_demo_grasp/Makefile
@@ -25,7 +25,7 @@ install_runtime_dependencies:
 
 install_compilation_dependencies:
 	@+echo "#"; echo "# * Install Compilation Dependencies for single demonstration grasping *"; echo "#"
-	@+python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
+	@+python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13'
 	@./install_single_demo_grasp.sh
 
 help:
diff --git a/src/opendr/control/single_demo_grasp/dependencies.ini b/src/opendr/control/single_demo_grasp/dependencies.ini
index f2bd802a46995d7e89a56b188f0904ff349fbc90..9c31b905a8660145305ebe6d57b187404d36812c 100644
--- a/src/opendr/control/single_demo_grasp/dependencies.ini
+++ b/src/opendr/control/single_demo_grasp/dependencies.ini
@@ -13,4 +13,4 @@ python=torch==1.9.0
 
 opendr=opendr-toolkit-engine
 
-post-install=python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git'
\ No newline at end of file
+post-install=python3 -m pip install 'git+https://github.com/facebookresearch/detectron2.git@5aeb252b194b93dc2879b4ac34bc51a31b5aee13'
diff --git a/src/opendr/control/single_demo_grasp/install_single_demo_grasp.sh b/src/opendr/control/single_demo_grasp/install_single_demo_grasp.sh
index 4a9fceaa718cb8a452c3d6d35ec55f2ab941c044..30f48f8f852a5cae198007c32b4087c968b18344 100755
--- a/src/opendr/control/single_demo_grasp/install_single_demo_grasp.sh
+++ b/src/opendr/control/single_demo_grasp/install_single_demo_grasp.sh
@@ -11,7 +11,7 @@ if [[ -z "$ROS_DISTRO" ]]; then
 fi
 
 MODULE_PATH=${OPENDR_HOME}/src/opendr/control/single_demo_grasp
-WS_PATH=${OPENDR_HOME}/projects/control/single_demo_grasp/simulation_ws
+WS_PATH=${OPENDR_HOME}/projects/python/control/single_demo_grasp/simulation_ws
 BRIDGE_PATH=${OPENDR_HOME}/projects/opendr_ws/src/ros_bridge
 
 
diff --git a/src/opendr/engine/target.py b/src/opendr/engine/target.py
index 9bc9dbacf14118c106afcadd5a65cb13574acc27..c00187e882eae70fda69a6558761c73516fb9527 100644
--- a/src/opendr/engine/target.py
+++ b/src/opendr/engine/target.py
@@ -296,9 +296,13 @@ class Pose(Target):
             raise ValueError("Pose expects either NumPy arrays or lists as data")
 
     def __str__(self):
-        """Matches kpt_names and keypoints x,y to get the best human-readable format for pose."""
+        """
+        Returns pose in a human-readable format, that contains the pose ID, detection confidence and
+        the matched kpt_names and keypoints x,y position.
+        """
 
-        out_string = ""
+        out_string = "Pose ID: " + str(self.id)
+        out_string += "\nDetection confidence: " + str(self.confidence) + "\nKeypoints name-position:\n"
         # noinspection PyUnresolvedReferences
         for name, kpt in zip(Pose.kpt_names, self.data.tolist()):
             out_string += name + ": " + str(kpt) + "\n"
@@ -1068,6 +1072,14 @@ class Heatmap(Target):
         # Since this class stores the data as NumPy arrays, we can directly return the data.
         return self.data
 
+    def opencv(self):
+        """
+        Required to support the ros bridge for images.
+        :return: a NumPy-compatible representation of data
+        :rtype: numpy.ndarray
+        """
+        return self.numpy()
+
     def shape(self) -> Tuple[int, ...]:
         """
         Returns the shape of the underlying NumPy array.
diff --git a/src/opendr/perception/object_detection_2d/__init__.py b/src/opendr/perception/object_detection_2d/__init__.py
index 9fac6ba42477eb7c776f8956f6a06680046d46d0..3b44686b21d3d9f649c90346a5ad3eeb3b90b778 100644
--- a/src/opendr/perception/object_detection_2d/__init__.py
+++ b/src/opendr/perception/object_detection_2d/__init__.py
@@ -4,6 +4,7 @@ from opendr.perception.object_detection_2d.gem.gem_learner import GemLearner
 from opendr.perception.object_detection_2d.retinaface.retinaface_learner import RetinaFaceLearner
 from opendr.perception.object_detection_2d.ssd.ssd_learner import SingleShotDetectorLearner
 from opendr.perception.object_detection_2d.yolov3.yolov3_learner import YOLOv3DetectorLearner
+from opendr.perception.object_detection_2d.nanodet.nanodet_learner import NanodetLearner
 
 from opendr.perception.object_detection_2d.datasets.wider_person import WiderPersonDataset
 from opendr.perception.object_detection_2d.datasets.wider_face import WiderFaceDataset
@@ -16,6 +17,6 @@ from opendr.perception.object_detection_2d.nms.fast_nms.fast_nms import FastNMS
 from opendr.perception.object_detection_2d.nms.soft_nms.soft_nms import SoftNMS
 from opendr.perception.object_detection_2d.nms.seq2seq_nms.seq2seq_nms_learner import Seq2SeqNMSLearner
 
-__all__ = ['CenterNetDetectorLearner', 'DetrLearner', 'GemLearner', 'RetinaFaceLearner',
-           'SingleShotDetectorLearner', 'YOLOv3DetectorLearner', 'WiderPersonDataset', 'WiderFaceDataset',
-           'transforms', 'draw_bounding_boxes', 'ClusterNMS', 'FastNMS', 'SoftNMS', 'Seq2SeqNMSLearner']
+__all__ = ['CenterNetDetectorLearner', 'DetrLearner', 'GemLearner', 'RetinaFaceLearner', 'SingleShotDetectorLearner',
+           'YOLOv3DetectorLearner', 'NanodetLearner', 'WiderPersonDataset', 'WiderFaceDataset', 'transforms',
+           'draw_bounding_boxes', 'ClusterNMS', 'FastNMS', 'SoftNMS', 'Seq2SeqNMSLearner']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/README.md b/src/opendr/perception/object_detection_2d/nanodet/README.md
new file mode 100644
index 0000000000000000000000000000000000000000..409e07a84764a062cec217d00d3ecf8977ef5464
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/README.md
@@ -0,0 +1,13 @@
+OpenDR 2D Object Detection - Nanodet
+======
+
+This folder contains the OpenDR Learner class for Nanodet for 2D object detection.
+
+Sources
+------
+Large parts of the implementation are taken from [Nanodet Github](https://github.com/RangiLyu/nanodet) with modifications to make it compatible with OpenDR specifications.
+
+Usage
+------
+- For VOC and COCO like datasets, an ```ExternalDataset``` with the root path and dataset name (```voc```, ```coco```) must be passed to the fit function.
+- The ```temp_path``` folder is used to save checkpoints during training.
\ No newline at end of file
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/geometry/__init__.py
rename to src/opendr/perception/object_detection_2d/nanodet/__init__.py
diff --git a/projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/__init__.py
similarity index 100%
rename from projects/perception/slam/full_map_posterior_gmapping/src/map_simulator/src/map_simulator/robot_commands/__init__.py
rename to src/opendr/perception/object_detection_2d/nanodet/algorithm/__init__.py
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/config_file_detail.md b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/config_file_detail.md
new file mode 100644
index 0000000000000000000000000000000000000000..b6224df4d22af9c2cab241ee8f964459c012abdc
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/config_file_detail.md
@@ -0,0 +1,201 @@
+# NanoDet Config File Analysis
+
+NanoDet using [yacs](https://github.com/rbgirshick/yacs) to read YAML config file.
+
+## Saving path
+
+```yaml
+save_dir: PATH_TO_SAVE
+```
+
+Change `save_dir` to where you want to save logs and models. If path doesn't exist, NanoDet will create it.
+
+## Model
+
+```yaml
+model:
+    arch:
+        name: OneStageDetector
+        backbone: xxx
+        fpn: xxx
+        head: xxx
+```
+
+Most detection model architecture can be devided into 3 parts: backbone, task head and connector between them (e.g., FPN, BiFPN, PAN).
+
+### Backbone
+
+```yaml
+backbone:
+    name: ShuffleNetV2
+    model_size: 1.0x
+    out_stages: [2,3,4]
+    activation: LeakyReLU
+    with_last_conv: False
+```
+
+NanoDet using ShuffleNetV2 as backbone. You can modify model size, output feature levels and activation function. Moreover, NanoDet provides other lightweight backbones like **GhostNet** and **MobileNetV2**. You can also add your backbone network by importing it in `nanodet/model/backbone/__init__.py`.
+
+### FPN
+
+```yaml
+fpn:
+    name: PAN
+    in_channels: [116, 232, 464]
+    out_channels: 96
+    start_level: 0
+    num_outs: 3
+```
+
+NanoDet using modified [PAN](http://arxiv.org/abs/1803.01534) (replace downsample convs with interpolation to reduce amount of computations).
+
+`in_channels`: a list of feature map channels extracted from backbone.
+
+`out_channels`: output feature map channel.
+
+### Head
+
+```yaml
+head:
+    name: NanoDetHead
+    num_classes: 80
+    input_channel: 96
+    feat_channels: 96
+    stacked_convs: 2
+    share_cls_reg: True
+    octave_base_scale: 8
+    scales_per_octave: 1
+    strides: [8, 16, 32]
+    reg_max: 7
+    norm_cfg:
+      type: BN
+    loss:
+```
+
+`name`: task head class name
+
+`num_classes`: number of classes
+
+`input_channel`: input feature map channel
+
+`feat_channels`: channel of task head convs
+
+`stacked_convs`: how many conv blocks use in one task head
+
+`share_cls_reg`: use same conv blocks for classification and box regression
+
+`octave_base_scale`: base box scale
+
+`scales_per_octave`: anchor free model only have one base box, default value 1
+
+`strides`: down sample stride of each feature map level
+
+`reg_max`: max value of per-level l-r-t-b distance
+
+`norm_cfg`: normalization layer setting
+
+`loss`: adjust loss functions and weights
+
+## Weight averaging
+
+Nanodet supports weight averaging method like EMA:
+
+```yaml
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    ...
+```
+
+## Data
+
+```yaml
+data:
+    train:
+        input_size: [320,320]
+        keep_ratio: True
+        multi_scale: [0.6, 1.4]
+        pipeline:
+    val:
+        ...
+```
+
+In `data` you need to set your train and validate dataset.
+
+`input_size`: [width, height]
+`keep_ratio`: whether to maintain the original image ratio when resizing to input size
+`multi_scale`: scaling range for multi-scale training. Set to None to turn off.
+`pipeline`: data preprocessing and augmentation pipeline
+
+## Device
+
+```yaml
+device:
+    gpu_ids: [0]
+    workers_per_gpu: 12
+    batchsize_per_gpu: 160
+```
+
+`gpu_ids`: CUDA device id. For multi-gpu training, set [0, 1, 2...].
+
+`workers_per_gpu`: how many dataloader processes for each gpu
+
+`batchsize_per_gpu`: amount of images in one batch for each gpu
+
+## schedule
+
+```yaml
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.14
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 300
+    ratio: 0.1
+  total_epochs: 70
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [40,55,60,65]
+    gamma: 0.1
+  val_intervals: 10
+```
+
+Set training schedule.
+
+`resume`: to restore # checkpoint, if 0 model start from random initialization
+
+`load_model`: path to trained weight
+
+`optimizer`: support all optimizer provided by pytorch.
+
+You should adjust the `lr` with `batch_size`. Following linear scaling rule in paper *[Accurate, Large Minibatch SGD: Training ImageNet in 1 Hour](https://research.fb.com/wp-content/uploads/2017/06/imagenet1kin1h5.pdf)*
+
+`warmup`: warm up your network before training. Support `constant`, `exp` and `linear` three types of warm up.
+
+`total_epochs`: total epochs to train
+
+`lr_schedule`: please refer to [pytorch lr_scheduler documentation](https://pytorch.org/docs/stable/optim.html?highlight=lr_scheduler#torch.optim.lr_scheduler)
+
+`val_intervals`: epoch interval of evaluating during training
+
+## Evaluate
+
+```yaml
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+```
+
+Currently only support COCO eval.
+
+`save_key`: metric of best model. Support mAP, AP50, AP75....
+
+****
+
+`class_names`: used in visualization
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite0_320.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite0_320.yml
new file mode 100644
index 0000000000000000000000000000000000000000..cdddc320cb8b183a13113a25a30640e846b413af
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite0_320.yml
@@ -0,0 +1,112 @@
+# nanodet-EfficientNet-Lite0_320
+# COCO mAP(0.5:0.95) = 0.247
+#             AP_50  = 0.404
+#             AP_75  = 0.250
+#           AP_small = 0.079
+#               AP_m = 0.243
+#               AP_l = 0.406
+save_dir: ./workspace/efficient0_320
+check_point_name: EfficientNet_Lite0_320
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: EfficientNetLite
+      model_name: efficientnet_lite0
+      out_stages: [2,4,6]
+      activation: ReLU6
+    fpn:
+      name: PAN
+      in_channels: [40, 112, 320]
+      out_channels: 96
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      activation: ReLU6
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 12
+  batchsize_per_gpu: 150
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.15
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.01
+  total_epochs: 190
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [140,170,180,185]
+    gamma: 0.1
+  val_intervals: 1
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite1_416.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite1_416.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a189662a77738003f78cf1baf283e64f85dd44f6
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite1_416.yml
@@ -0,0 +1,113 @@
+# nanodet-EfficientNet-Lite1_416
+# COCO mAP(0.5:0.95) = 0.303
+#             AP_50  = 0.471
+#             AP_75  = 0.313
+#           AP_small = 0.122
+#               AP_m = 0.321
+#               AP_l = 0.432
+save_dir: ./workspace/efficient1_416_SGD
+check_point_name: EfficientNet_Lite1_416
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: EfficientNetLite
+      model_name: efficientnet_lite1
+      out_stages: [2,4,6]
+      activation: ReLU6
+      pretrain: True
+    fpn:
+      name: PAN
+      in_channels: [40, 112, 320]
+      out_channels: 128
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 3
+      activation: ReLU6
+      share_cls_reg: True
+      octave_base_scale: 8
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 10
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.5, 1.5]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 12
+  batchsize_per_gpu: 100
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.07
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.01
+  total_epochs: 170
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [130,150,160,165]
+    gamma: 0.1
+  val_intervals: 5
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite2_512.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite2_512.yml
new file mode 100644
index 0000000000000000000000000000000000000000..20664fe7ca5543383bcdef10a2ade92ea20f0964
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/EfficientNet_Lite/nanodet_EfficientNet_Lite2_512.yml
@@ -0,0 +1,113 @@
+# nanodet-EfficientNet-Lite2_512
+# COCO mAP(0.5:0.95) = 0.326
+#             AP_50  = 0.501
+#             AP_75  = 0.344
+#           AP_small = 0.152
+#               AP_m = 0.342
+#               AP_l = 0.481
+save_dir: ./workspace/efficientlite2_512
+check_point_name: EfficientNet_Lite2_512
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: EfficientNetLite
+      model_name: efficientnet_lite2
+      out_stages: [2,4,6]
+      activation: ReLU6
+      pretrain: True
+    fpn:
+      name: PAN
+      in_channels: [48, 120, 352]
+      out_channels: 128
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 4
+      activation: ReLU6
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 10
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [512,512] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.5, 1.5]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]]
+  val:
+    input_size: [512,512] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[127.0, 127.0, 127.0], [128.0, 128.0, 128.0]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 12
+  batchsize_per_gpu: 60
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.06
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 300
+    ratio: 0.1
+  total_epochs: 135
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [90,110,120,130]
+    gamma: 0.1
+  val_intervals: 5
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/RepVGG/nanodet_RepVGG_A0_416.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/RepVGG/nanodet_RepVGG_A0_416.yml
new file mode 100644
index 0000000000000000000000000000000000000000..8a0d8debebe6a928aa5c98ff9b45862f6f4cdab5
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/RepVGG/nanodet_RepVGG_A0_416.yml
@@ -0,0 +1,107 @@
+save_dir: ./workspace/RepVGG_A0_416
+check_point_name: RepVGG_A0_416
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: RepVGG
+      arch: A0
+      out_stages: [2,3,4]
+      activation: ReLU
+      last_channel: 512
+      deploy: False
+    fpn:
+      name: PAN
+      in_channels: [96, 192, 512]
+      out_channels: 128
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      conv_type: Conv
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      activation: ReLU
+      share_cls_reg: True
+      octave_base_scale: 8
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 10
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.5, 1.5]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 1
+  batchsize_per_gpu: 100
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.07
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.01
+  total_epochs: 170
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [130,150,160,165]
+    gamma: 0.1
+  val_intervals: 5
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/Transformer/nanodet_t.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/Transformer/nanodet_t.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a8c312cd61024f339a92e3fb578830956847a64b
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/Transformer/nanodet_t.yml
@@ -0,0 +1,115 @@
+# NanoDet-m with transformer attention
+# COCO mAP(0.5:0.95) = 0.217
+#             AP_50  = 0.363
+#             AP_75  = 0.218
+#           AP_small = 0.069
+#               AP_m = 0.214
+#               AP_l = 0.364
+
+save_dir: ./workspace/nanodet_t
+check_point_name: t
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.0x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: TAN # transformer attention network
+      in_channels: [116, 232, 464]
+      out_channels: 128
+      feature_hw: [20,20] # size for position embedding
+      num_heads: 8
+      num_encoders: 1
+      mlp_ratio: 4
+      dropout_ratio: 0.1
+      activation: LeakyReLU
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.8, 1.2]
+      saturation: [0.8, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 8
+  batchsize_per_gpu: 160
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.14
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.01
+  total_epochs: 190
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [140,170,180,185]
+    gamma: 0.1
+  val_intervals: 10
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_g.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_g.yml
new file mode 100644
index 0000000000000000000000000000000000000000..0d09c335ab56284d7fa61db91f46f68538b8675f
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_g.yml
@@ -0,0 +1,115 @@
+# NanoDet-g-416 is designed for edge NPU, GPU or TPU with high parallel computing power but low memory bandwidth
+# COCO mAP(0.5:0.95) = 22.9
+# Flops = 4.2B
+# Params = 3.8M
+# COCO pre-trained weight link: https://drive.google.com/file/d/10uW7oqZKw231l_tr4C1bJWkbCXgBf7av/view?usp=sharing
+save_dir: ./workspace/nanodet_g
+check_point_name: g
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: CustomCspNet
+      net_cfg: [[ 'Conv', 3, 32, 3, 2],  # 1/2
+                [ 'MaxPool', 3, 2 ],  # 1/4
+                [ 'CspBlock', 32, 1, 3, 1 ],  # 1/4
+                [ 'CspBlock', 64, 2, 3, 2 ],  # 1/8
+                [ 'CspBlock', 128, 2, 3, 2 ],  # 1/16
+                [ 'CspBlock', 256, 3, 3, 2 ]]  # 1/32
+      out_stages: [3,4,5]
+      activation: LeakyReLU
+    fpn:
+      name: PAN
+      in_channels: [128, 256, 512]
+      out_channels: 128
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      conv_type: Conv
+      activation: LeakyReLU
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 1
+      share_cls_reg: True
+      octave_base_scale: 8
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 10
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 10
+  batchsize_per_gpu: 128
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.1
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.01
+  total_epochs: 190
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [130,160,175,185]
+    gamma: 0.1
+  val_intervals: 5
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m.yml
new file mode 100644
index 0000000000000000000000000000000000000000..876168e7adefebf9446be5b936f1948c65cf3237
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m.yml
@@ -0,0 +1,104 @@
+#Config File example
+save_dir: ./workspace/nanodet_m
+check_point_name: m
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.0x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: PAN
+      in_channels: [116, 232, 464]
+      out_channels: 96
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 8
+  batchsize_per_gpu: 192
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.14
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 300
+    ratio: 0.1
+  total_epochs: 280
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [240,260,275]
+    gamma: 0.1
+  val_intervals: 10
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_0.5x.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_0.5x.yml
new file mode 100644
index 0000000000000000000000000000000000000000..2a38388336b3e0c88688fe17e24e7e3253e328ad
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_0.5x.yml
@@ -0,0 +1,110 @@
+# nanodet-m-0.5x
+# COCO mAP(0.5:0.95) = 0.135
+#             AP_50  = 0.245
+#             AP_75  = 0.129
+#           AP_small = 0.036
+#               AP_m = 0.119
+#               AP_l = 0.232
+save_dir: ./workspace/nanodet_m_0.5x
+check_point_name: m_0.5x
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: ShuffleNetV2
+      model_size: 0.5x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: PAN
+      in_channels: [48, 96, 192]
+      out_channels: 96
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.5, 1.5]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 8
+  batchsize_per_gpu: 96
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.07
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 1000
+    ratio: 0.00001
+  total_epochs: 180
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [130,160,175]
+    gamma: 0.1
+  val_intervals: 10
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 50
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_1.5x.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_1.5x.yml
new file mode 100644
index 0000000000000000000000000000000000000000..a54268f70ad8d74d8c98dd257ac921c275634c9a
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_1.5x.yml
@@ -0,0 +1,111 @@
+#nanodet-m-1.5x
+# COCO mAP(0.5:0.95) = 0.235
+#             AP_50  = 0.384
+#             AP_75  = 0.239
+#           AP_small = 0.069
+#               AP_m = 0.235
+#               AP_l = 0.389
+save_dir: ./workspace/nanodet_m_1.5x
+check_point_name: m_1.5x
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.5x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: PAN
+      in_channels: [176, 352, 704]
+      out_channels: 128
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 8
+  batchsize_per_gpu: 192
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.14
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 300
+    ratio: 0.1
+  total_epochs: 280
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [240,260,275]
+    gamma: 0.1
+  val_intervals: 10
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_1.5x_416.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_1.5x_416.yml
new file mode 100644
index 0000000000000000000000000000000000000000..b8274403b1f52563c331babf0e7d3709ff27a418
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_1.5x_416.yml
@@ -0,0 +1,110 @@
+#nanodet-m-1.5x-416
+# COCO mAP(0.5:0.95) = 0.268
+#             AP_50  = 0.424
+#             AP_75  = 0.276
+#           AP_small = 0.098
+#               AP_m = 0.277
+#               AP_l = 0.420
+save_dir: ./workspace/nanodet_m_1.5x_416
+check_point_name: m_1.5x_416
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.5x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: PAN
+      in_channels: [176, 352, 704]
+      out_channels: 128
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.5, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 8
+  batchsize_per_gpu: 176
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.14
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 300
+    ratio: 0.1
+  total_epochs: 280
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [240,260,275]
+    gamma: 0.1
+  val_intervals: 10
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_416.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_416.yml
new file mode 100644
index 0000000000000000000000000000000000000000..eb30de1e0d2fad743d733b8a9cf0174276f82372
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/legacy_v0.x_configs/nanodet_m_416.yml
@@ -0,0 +1,111 @@
+#nanodet-m-416
+# COCO mAP(0.5:0.95) = 0.235
+#             AP_50  = 0.384
+#             AP_75  = 0.242
+#           AP_small = 0.082
+#               AP_m = 0.240
+#               AP_l = 0.375
+save_dir: ./workspace/nanodet_m_416
+check_point_name: m_416
+model:
+  arch:
+    name: OneStageDetector
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.0x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: PAN
+      in_channels: [116, 232, 464]
+      out_channels: 96
+      start_level: 0
+      num_outs: 3
+    head:
+      name: NanoDetHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      stacked_convs: 2
+      share_cls_reg: True
+      octave_base_scale: 5
+      scales_per_octave: 1
+      strides: [8, 16, 32]
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.5, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 8
+  batchsize_per_gpu: 192
+schedule:
+  resume: 0
+  optimizer:
+    name: SGD
+    lr: 0.14
+    momentum: 0.9
+    weight_decay: 0.0001
+  warmup:
+    name: linear
+    steps: 300
+    ratio: 0.1
+  total_epochs: 280
+  lr_schedule:
+    name: MultiStepLR
+    milestones: [240,260,275]
+    gamma: 0.1
+  val_intervals: 10
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+
+log:
+  interval: 10
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_custom.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_custom.yml
new file mode 100644
index 0000000000000000000000000000000000000000..bf58986a4863546e94977baaea32b4c2c9d71fbf
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_custom.yml
@@ -0,0 +1,125 @@
+# nanodet-plus-m-1.5x_416
+# COCO mAP(0.5:0.95) = 0.341
+#             AP_50  = 0.506
+#             AP_75  = 0.357
+#           AP_small = 0.143
+#               AP_m = 0.363
+#               AP_l = 0.539
+save_dir: ./workspace/nanodet_plus_m_1.5x_416/test_training
+check_point_name: plus_m_1.5x_416_default
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    name: NanoDetPlus
+    detach_epoch: 10
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.5x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: GhostPAN
+      in_channels: [176, 352, 704]
+      out_channels: 128
+      kernel_size: 5
+      num_extra_level: 1
+      use_depthwise: True
+      activation: LeakyReLU
+    head:
+      name: NanoDetPlusHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      kernel_size: 5
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+    # Auxiliary head, only use in training time.
+    aux_head:
+      name: SimpleConvHead
+      num_classes: 80
+      input_channel: 256
+      feat_channels: 256
+      stacked_convs: 4
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: False
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[0.8, 1.2], [0.8, 1.2]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: False
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 10
+  batchsize_per_gpu: 12 #96
+schedule:
+  resume: 0
+  optimizer:
+    name: AdamW
+    lr: 0.000125
+    weight_decay: 0.05
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.0001
+  total_epochs: 300
+  lr_schedule:
+    name: CosineAnnealingLR
+    T_max: 300
+    eta_min: 0.00005
+  val_intervals: 10
+grad_clip: 35
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 50
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_guide.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_guide.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3729c111ec544dd6443dd52c54ac4b83637a4805
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_guide.yml
@@ -0,0 +1,107 @@
+#Config File example
+save_dir: ./workspace/nanodet_m
+check_point_name:
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    name: NanoDetPlus
+    detach_epoch: 10
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.0x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: GhostPAN
+      in_channels: [116, 232, 464]
+      out_channels: 96
+      kernel_size: 5
+      num_extra_level: 1
+      use_depthwise: True
+      activation: LeakyReLU
+    head:
+      name: NanoDetPlusHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      stacked_convs: 2
+      kernel_size: 5
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+    # Auxiliary head, only use in training time.
+    aux_head:
+      name: SimpleConvHead
+      num_classes: 80
+      input_channel: 192
+      feat_channels: 192
+      stacked_convs: 4
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+
+class_names: &class_names ['NAME1', 'NAME2', 'NAME3', 'NAME4', '...']  #Please fill in the category names (not include background category)
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[1, 1], [1, 1]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.8, 1.2]
+      saturation: [0.8, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: True
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0] # Set like [0, 1, 2, 3] if you have multi-GPUs
+  workers_per_gpu: 8
+  batchsize_per_gpu: 96
+schedule:
+  resume: 0
+  optimizer:
+    name: AdamW
+    lr: 0.001
+    weight_decay: 0.05
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.0001
+  total_epochs: 300
+  lr_schedule:
+    name: CosineAnnealingLR
+    T_max: 300
+    eta_min: 0.00005
+  val_intervals: 10
+grad_clip: 35
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+
+log:
+  interval: 10
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_1.5x_320.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_1.5x_320.yml
new file mode 100644
index 0000000000000000000000000000000000000000..3dcd1a2973bf552d065b0e238ef12f23f563dbad
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_1.5x_320.yml
@@ -0,0 +1,125 @@
+# nanodet-plus-m-1.5x_320
+# COCO mAP(0.5:0.95) = 0.299
+#             AP_50  = 0.454
+#             AP_75  = 0.312
+#           AP_small = 0.102
+#               AP_m = 0.309
+#               AP_l = 0.493
+save_dir: ./workspace/nanodet_plus_m_1.5x_320
+check_point_name: plus_m_1.5x_320
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    name: NanoDetPlus
+    detach_epoch: 10
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.5x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: GhostPAN
+      in_channels: [176, 352, 704]
+      out_channels: 128
+      kernel_size: 5
+      num_extra_level: 1
+      use_depthwise: True
+      activation: LeakyReLU
+    head:
+      name: NanoDetPlusHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      kernel_size: 5
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+    # Auxiliary head, only use in training time.
+    aux_head:
+      name: SimpleConvHead
+      num_classes: 80
+      input_channel: 256
+      feat_channels: 256
+      stacked_convs: 4
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: False
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[0.8, 1.2], [0.8, 1.2]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: False
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 10
+  batchsize_per_gpu: 96
+schedule:
+  resume: 0
+  optimizer:
+    name: AdamW
+    lr: 0.001
+    weight_decay: 0.05
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.0001
+  total_epochs: 300
+  lr_schedule:
+    name: CosineAnnealingLR
+    T_max: 300
+    eta_min: 0.00005
+  val_intervals: 10
+grad_clip: 35
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 50
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_1.5x_416.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_1.5x_416.yml
new file mode 100644
index 0000000000000000000000000000000000000000..5a76789b501fb2e62220ba78ba3806eff550e594
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_1.5x_416.yml
@@ -0,0 +1,125 @@
+# nanodet-plus-m-1.5x_416
+# COCO mAP(0.5:0.95) = 0.341
+#             AP_50  = 0.506
+#             AP_75  = 0.357
+#           AP_small = 0.143
+#               AP_m = 0.363
+#               AP_l = 0.539
+save_dir: ./workspace/nanodet_plus_m_1.5x_416
+check_point_name: plus_m_1.5x_416
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    name: NanoDetPlus
+    detach_epoch: 10
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.5x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: GhostPAN
+      in_channels: [176, 352, 704]
+      out_channels: 128
+      kernel_size: 5
+      num_extra_level: 1
+      use_depthwise: True
+      activation: LeakyReLU
+    head:
+      name: NanoDetPlusHead
+      num_classes: 80
+      input_channel: 128
+      feat_channels: 128
+      stacked_convs: 2
+      kernel_size: 5
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+    # Auxiliary head, only use in training time.
+    aux_head:
+      name: SimpleConvHead
+      num_classes: 80
+      input_channel: 256
+      feat_channels: 256
+      stacked_convs: 4
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: False
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[0.8, 1.2], [0.8, 1.2]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: False
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 10
+  batchsize_per_gpu: 96
+schedule:
+  resume: 0
+  optimizer:
+    name: AdamW
+    lr: 0.001
+    weight_decay: 0.05
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.0001
+  total_epochs: 300
+  lr_schedule:
+    name: CosineAnnealingLR
+    T_max: 300
+    eta_min: 0.00005
+  val_intervals: 10
+grad_clip: 35
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 50
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_320.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_320.yml
new file mode 100644
index 0000000000000000000000000000000000000000..e4b5f58f9c8afffd49359ac61cfef28fdef706eb
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_320.yml
@@ -0,0 +1,125 @@
+# nanodet-plus-m_320
+# COCO mAP(0.5:0.95) = 0.270
+#             AP_50  = 0.418
+#             AP_75  = 0.281
+#           AP_small = 0.083
+#               AP_m = 0.278
+#               AP_l = 0.451
+save_dir: ./workspace/nanodet_plus_m_320
+check_point_name: plus_m_320
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    name: NanoDetPlus
+    detach_epoch: 10
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.0x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: GhostPAN
+      in_channels: [116, 232, 464]
+      out_channels: 96
+      kernel_size: 5
+      num_extra_level: 1
+      use_depthwise: True
+      activation: LeakyReLU
+    head:
+      name: NanoDetPlusHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      stacked_convs: 2
+      kernel_size: 5
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+    # Auxiliary head, only use in training time.
+    aux_head:
+      name: SimpleConvHead
+      num_classes: 80
+      input_channel: 192
+      feat_channels: 192
+      stacked_convs: 4
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+data:
+  train:
+    input_size: [320,320] #[w,h]
+    keep_ratio: False
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[0.8, 1.2], [0.8, 1.2]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [320,320] #[w,h]
+    keep_ratio: False
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0] # Set like [0, 1, 2, 3] if you have multi-GPUs
+  workers_per_gpu: 10
+  batchsize_per_gpu: 32 #96
+schedule:
+  resume: 0
+  optimizer:
+    name: AdamW
+    lr: 0.001
+    weight_decay: 0.05
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.0001
+  total_epochs: 300
+  lr_schedule:
+    name: CosineAnnealingLR
+    T_max: 300
+    eta_min: 0.00005
+  val_intervals: 10
+grad_clip: 35
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 50
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_416.yml b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_416.yml
new file mode 100644
index 0000000000000000000000000000000000000000..61a536ad7d9a3e6a4d5505f61b882d7c2da80f9f
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/config/nanodet_plus_m_416.yml
@@ -0,0 +1,125 @@
+# nanodet-plus-m_416
+# COCO mAP(0.5:0.95) = 0.304
+#             AP_50  = 0.459
+#             AP_75  = 0.317
+#           AP_small = 0.106
+#               AP_m = 0.322
+#               AP_l = 0.477
+save_dir: ./workspace/nanodet_plus_m_416
+check_point_name: plus_m_416
+model:
+  weight_averager:
+    name: ExpMovingAverager
+    decay: 0.9998
+  arch:
+    name: NanoDetPlus
+    detach_epoch: 10
+    backbone:
+      name: ShuffleNetV2
+      model_size: 1.0x
+      out_stages: [2,3,4]
+      activation: LeakyReLU
+    fpn:
+      name: GhostPAN
+      in_channels: [116, 232, 464]
+      out_channels: 96
+      kernel_size: 5
+      num_extra_level: 1
+      use_depthwise: True
+      activation: LeakyReLU
+    head:
+      name: NanoDetPlusHead
+      num_classes: 80
+      input_channel: 96
+      feat_channels: 96
+      stacked_convs: 2
+      kernel_size: 5
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+      norm_cfg:
+        type: BN
+      loss:
+        loss_qfl:
+          name: QualityFocalLoss
+          use_sigmoid: True
+          beta: 2.0
+          loss_weight: 1.0
+        loss_dfl:
+          name: DistributionFocalLoss
+          loss_weight: 0.25
+        loss_bbox:
+          name: GIoULoss
+          loss_weight: 2.0
+    # Auxiliary head, only use in training time.
+    aux_head:
+      name: SimpleConvHead
+      num_classes: 80
+      input_channel: 192
+      feat_channels: 192
+      stacked_convs: 4
+      strides: [8, 16, 32, 64]
+      activation: LeakyReLU
+      reg_max: 7
+data:
+  train:
+    input_size: [416,416] #[w,h]
+    keep_ratio: False
+    pipeline:
+      perspective: 0.0
+      scale: [0.6, 1.4]
+      stretch: [[0.8, 1.2], [0.8, 1.2]]
+      rotation: 0
+      shear: 0
+      translate: 0.2
+      flip: 0.5
+      brightness: 0.2
+      contrast: [0.6, 1.4]
+      saturation: [0.5, 1.2]
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+  val:
+    input_size: [416,416] #[w,h]
+    keep_ratio: False
+    pipeline:
+      normalize: [[103.53, 116.28, 123.675], [57.375, 57.12, 58.395]]
+device:
+  gpu_ids: [0]
+  workers_per_gpu: 10
+  batchsize_per_gpu: 4 #96
+schedule:
+  resume: 0
+  optimizer:
+    name: AdamW
+    lr: 0.001
+    weight_decay: 0.05
+  warmup:
+    name: linear
+    steps: 500
+    ratio: 0.0001
+  total_epochs: 300
+  lr_schedule:
+    name: CosineAnnealingLR
+    T_max: 300
+    eta_min: 0.00005
+  val_intervals: 1
+grad_clip: 35
+evaluator:
+  name: CocoDetectionEvaluator
+  save_key: mAP
+log:
+  interval: 200
+
+class_names: ['person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
+              'train', 'truck', 'boat', 'traffic_light', 'fire_hydrant',
+              'stop_sign', 'parking_meter', 'bench', 'bird', 'cat', 'dog',
+              'horse', 'sheep', 'cow', 'elephant', 'bear', 'zebra', 'giraffe',
+              'backpack', 'umbrella', 'handbag', 'tie', 'suitcase', 'frisbee',
+              'skis', 'snowboard', 'sports_ball', 'kite', 'baseball_bat',
+              'baseball_glove', 'skateboard', 'surfboard', 'tennis_racket',
+              'bottle', 'wine_glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
+              'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot',
+              'hot_dog', 'pizza', 'donut', 'cake', 'chair', 'couch',
+              'potted_plant', 'bed', 'dining_table', 'toilet', 'tv', 'laptop',
+              'mouse', 'remote', 'keyboard', 'cell_phone', 'microwave',
+              'oven', 'toaster', 'sink', 'refrigerator', 'book', 'clock',
+              'vase', 'scissors', 'teddy_bear', 'hair_drier', 'toothbrush']
diff --git a/projects/perception/lightweight_open_pose/jetbot/results/.keep b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/__init__.py
similarity index 100%
rename from projects/perception/lightweight_open_pose/jetbot/results/.keep
rename to src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/__init__.py
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/batch_process.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/batch_process.py
new file mode 100644
index 0000000000000000000000000000000000000000..f84170a27524cf0e06db8e6e50379b41f3cb99da
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/batch_process.py
@@ -0,0 +1,37 @@
+from typing import Sequence
+
+import torch
+import torch.nn.functional as F
+
+
+def stack_batch_img(
+    img_tensors: Sequence[torch.Tensor], divisible: int = 0, pad_value: float = 0.0
+) -> torch.Tensor:
+    """
+    Args:
+        img_tensors (Sequence[torch.Tensor]):
+        divisible (int):
+        pad_value (float): value to pad
+
+    Returns:
+        torch.Tensor.
+    """
+    assert len(img_tensors) > 0
+    assert isinstance(img_tensors, (tuple, list))
+    assert divisible >= 0
+    img_heights = []
+    img_widths = []
+    for img in img_tensors:
+        assert img.shape[:-2] == img_tensors[0].shape[:-2]
+        img_heights.append(img.shape[-2])
+        img_widths.append(img.shape[-1])
+    max_h, max_w = max(img_heights), max(img_widths)
+    if divisible > 0:
+        max_h = (max_h + divisible - 1) // divisible * divisible
+        max_w = (max_w + divisible - 1) // divisible * divisible
+
+    batch_imgs = []
+    for img in img_tensors:
+        padding_size = [0, max_w - img.shape[-1], 0, max_h - img.shape[-2]]
+        batch_imgs.append(F.pad(img, padding_size, value=pad_value))
+    return torch.stack(batch_imgs, dim=0).contiguous()
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/collate.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/collate.py
new file mode 100644
index 0000000000000000000000000000000000000000..825272bbc339173e828347256656bdbd09827632
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/collate.py
@@ -0,0 +1,78 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import collections
+import re
+
+import torch
+from torch._six import string_classes
+
+np_str_obj_array_pattern = re.compile(r"[SaUO]")
+
+default_collate_err_msg_format = (
+    "default_collate: batch must contain tensors, numpy arrays, numbers, "
+    "dicts or lists; found {}"
+)
+
+
+def collate_function(batch):
+    r"""Puts each data field into a tensor with outer dimension batch size"""
+
+    elem = batch[0]
+    elem_type = type(elem)
+    if isinstance(elem, torch.Tensor):
+        out = None
+        if torch.utils.data.get_worker_info() is not None:
+            # If we're in a background process, concatenate directly into a
+            # shared memory tensor to avoid an extra copy
+            numel = sum([x.numel() for x in batch])
+            storage = elem.storage()._new_shared(numel)
+            out = elem.new(storage)
+        return torch.stack(batch, 0, out=out)
+    elif elem_type.__module__ == "numpy" and elem_type.__name__ != "str_" and elem_type.__name__ != "string_":
+        elem = batch[0]
+        if elem_type.__name__ == "ndarray":
+            # array of string classes and object
+            if np_str_obj_array_pattern.search(elem.dtype.str) is not None:
+                raise TypeError(default_collate_err_msg_format.format(elem.dtype))
+
+            return batch
+        elif elem.shape == ():  # scalars
+            return batch
+    elif isinstance(elem, float):
+        return torch.tensor(batch, dtype=torch.float64)
+    elif isinstance(elem, int):
+        return torch.tensor(batch)
+    elif isinstance(elem, string_classes):
+        return batch
+    elif isinstance(elem, collections.abc.Mapping):
+        return {key: collate_function([d[key] for d in batch]) for key in elem}
+    elif isinstance(elem, tuple) and hasattr(elem, "_fields"):  # namedtuple
+        return elem_type(*(collate_function(samples) for samples in zip(*batch)))
+    elif isinstance(elem, collections.abc.Sequence):
+        transposed = zip(*batch)
+        return [collate_function(samples) for samples in transposed]
+
+    raise TypeError(default_collate_err_msg_format.format(elem_type))
+
+
+def naive_collate(batch):
+    """Only collate dict value in to a list. E.g. meta data dict and img_info
+    dict will be collated."""
+
+    elem = batch[0]
+    if isinstance(elem, dict):
+        return {key: naive_collate([d[key] for d in batch]) for key in elem}
+    else:
+        return batch
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..b68b60e3894746e127cfbc4c6fa4c3993e626be5
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/__init__.py
@@ -0,0 +1,58 @@
+# Modifications Copyright 2021 - present, OpenDR European Project
+#
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import copy
+from opendr.engine.datasets import ExternalDataset
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.dataset.coco import CocoDataset
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.dataset.xml_dataset import XMLDataset
+
+
+def build_dataset(cfg, dataset, class_names, mode, verbose=True):
+        dataset_cfg = copy.deepcopy(cfg)
+        supported_datasets = ['coco', 'voc']
+        if isinstance(dataset, ExternalDataset):
+            if dataset.dataset_type.lower() not in supported_datasets:
+                raise UserWarning("ExternalDataset dataset_type must be one of: ", supported_datasets)
+
+            if verbose:
+                print("Loading {} type dataset...".format(dataset.dataset_type))
+                print("From {}".format(dataset.path))
+
+            if dataset.dataset_type.lower() == 'voc':
+                if mode == "train":
+                    img_path = "{}/train/JPEGImages".format(dataset.path)
+                    ann_path = "{}/train/Annotations".format(dataset.path)
+                else:
+                    img_path = "{}/val/JPEGImages".format(dataset.path)
+                    ann_path = "{}/val/Annotations".format(dataset.path)
+                dataset = XMLDataset(img_path=img_path, ann_path=ann_path, mode=mode,
+                                     class_names=class_names, **dataset_cfg)
+
+            elif dataset.dataset_type.lower() == 'coco':
+                if mode == "train":
+                    img_path = "{}/train2017".format(dataset.path)
+                    ann_path = "{}/annotations/instances_train2017.json".format(dataset.path)
+                else:
+                    img_path = "{}/val2017".format(dataset.path)
+                    ann_path = "{}/annotations/instances_val2017.json".format(dataset.path)
+                dataset = CocoDataset(img_path=img_path, ann_path=ann_path, mode=mode, **dataset_cfg)
+            if verbose:
+                print("ExternalDataset loaded.")
+            return dataset
+        else:
+            raise ValueError("Dataset type {} not supported".format(type(dataset)))
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/base.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/base.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a144a1d4aabed91f383ecb6cedeea10951228dd
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/base.py
@@ -0,0 +1,124 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import random
+from abc import ABCMeta, abstractmethod
+from typing import Tuple
+
+import numpy as np
+from torch.utils.data import Dataset
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.transform import Pipeline
+
+
+class BaseDataset(Dataset, metaclass=ABCMeta):
+    """
+    A base class of detection dataset. Referring from MMDetection.
+    A dataset should have images, annotations and preprocessing pipelines
+    NanoDet use [xmin, ymin, xmax, ymax] format for box and
+     [[x0,y0], [x1,y1] ... [xn,yn]] format for key points.
+    instance masks should decode into binary masks for each instance like
+    {
+        'bbox': [xmin,ymin,xmax,ymax],
+        'mask': mask
+     }
+    segmentation mask should decode into binary masks for each class.
+    Args:
+        img_path (str): image data folder
+        ann_path (str): annotation file path or folder
+        use_instance_mask (bool): load instance segmentation data
+        use_seg_mask (bool): load semantic segmentation data
+        use_keypoint (bool): load pose keypoint data
+        load_mosaic (bool): using mosaic data augmentation from yolov4
+        mode (str): 'train' or 'val' or 'test'
+        multi_scale (Tuple[float, float]): Multi-scale factor range.
+    """
+
+    def __init__(
+        self,
+        img_path,
+        ann_path,
+        input_size,
+        pipeline,
+        keep_ratio=True,
+        use_instance_mask=False,
+        use_seg_mask=False,
+        use_keypoint=False,
+        load_mosaic=False,
+        mode="train",
+        multi_scale=None,
+    ):
+        assert mode in ["train", "val", "test"]
+        self.img_path = img_path
+        self.ann_path = ann_path
+        self.input_size = input_size
+        self.pipeline = Pipeline(pipeline, keep_ratio)
+        self.keep_ratio = keep_ratio
+        self.use_instance_mask = use_instance_mask
+        self.use_seg_mask = use_seg_mask
+        self.use_keypoint = use_keypoint
+        self.load_mosaic = load_mosaic
+        self.multi_scale = multi_scale
+        self.mode = mode
+
+        print(ann_path)
+        self.data_info = self.get_data_info(ann_path)
+
+    def __len__(self):
+        return len(self.data_info)
+
+    def __getitem__(self, idx):
+        if self.mode == "val" or self.mode == "test":
+            return self.get_val_data(idx)
+        else:
+            while True:
+                data = self.get_train_data(idx)
+                if data is None:
+                    idx = self.get_another_id()
+                    continue
+                return data
+
+    @staticmethod
+    def get_random_size(
+        scale_range: Tuple[float, float], image_size: Tuple[int, int]
+    ) -> Tuple[int, int]:
+        """
+        Get random image shape by multi-scale factor and image_size.
+        Args:
+            scale_range (Tuple[float, float]): Multi-scale factor range.
+                Format in [(width, height), (width, height)]
+            image_size (Tuple[int, int]): Image size. Format in (width, height).
+
+        Returns:
+            Tuple[int, int]
+        """
+        assert len(scale_range) == 2
+        scale_factor = random.uniform(*scale_range)
+        width = int(image_size[0] * scale_factor)
+        height = int(image_size[1] * scale_factor)
+        return width, height
+
+    @abstractmethod
+    def get_data_info(self, ann_path):
+        pass
+
+    @abstractmethod
+    def get_train_data(self, idx):
+        pass
+
+    @abstractmethod
+    def get_val_data(self, idx):
+        pass
+
+    def get_another_id(self):
+        return np.random.random_integers(0, len(self.data_info) - 1)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/coco.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/coco.py
new file mode 100644
index 0000000000000000000000000000000000000000..a67ee7cb0c5e7a3ec7183304bca023e7142174ad
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/coco.py
@@ -0,0 +1,158 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+import cv2
+import numpy as np
+import torch
+from pycocotools.coco import COCO
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.dataset.base import BaseDataset
+
+
+class CocoDataset(BaseDataset):
+    def get_data_info(self, ann_path):
+        """
+        Load basic information of dataset such as image path, label and so on.
+        :param ann_path: coco json file path
+        :return: image info:
+        [{'license': 2,
+          'file_name': '000000000139.jpg',
+          'coco_url': 'http://images.cocodataset.org/val2017/000000000139.jpg',
+          'height': 426,
+          'width': 640,
+          'date_captured': '2013-11-21 01:34:01',
+          'flickr_url':
+              'http://farm9.staticflickr.com/8035/8024364858_9c41dc1666_z.jpg',
+          'id': 139},
+         ...
+        ]
+        """
+        self.coco_api = COCO(ann_path)
+        self.cat_ids = sorted(self.coco_api.getCatIds())
+        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+        self.cats = self.coco_api.loadCats(self.cat_ids)
+        self.class_names = [cat["name"] for cat in self.cats]
+        self.img_ids = sorted(self.coco_api.imgs.keys())
+        img_info = self.coco_api.loadImgs(self.img_ids)
+        return img_info
+
+    def get_per_img_info(self, idx):
+        img_info = self.data_info[idx]
+        file_name = img_info["file_name"]
+        height = img_info["height"]
+        width = img_info["width"]
+        id = img_info["id"]
+        if not isinstance(id, int):
+            raise TypeError("Image id must be int.")
+        info = {"file_name": file_name, "height": height, "width": width, "id": id}
+        return info
+
+    def get_img_annotation(self, idx):
+        """
+        load per image annotation
+        :param idx: index in dataloader
+        :return: annotation dict
+        """
+        img_id = self.img_ids[idx]
+        ann_ids = self.coco_api.getAnnIds([img_id])
+        anns = self.coco_api.loadAnns(ann_ids)
+        gt_bboxes = []
+        gt_labels = []
+        gt_bboxes_ignore = []
+        if self.use_instance_mask:
+            gt_masks = []
+        if self.use_keypoint:
+            gt_keypoints = []
+        for ann in anns:
+            if ann.get("ignore", False):
+                continue
+            x1, y1, w, h = ann["bbox"]
+            if ann["area"] <= 0 or w < 1 or h < 1:
+                continue
+            if ann["category_id"] not in self.cat_ids:
+                continue
+            bbox = [x1, y1, x1 + w, y1 + h]
+            if ann.get("iscrowd", False):
+                gt_bboxes_ignore.append(bbox)
+            else:
+                gt_bboxes.append(bbox)
+                gt_labels.append(self.cat2label[ann["category_id"]])
+                if self.use_instance_mask:
+                    gt_masks.append(self.coco_api.annToMask(ann))
+                if self.use_keypoint:
+                    gt_keypoints.append(ann["keypoints"])
+        if gt_bboxes:
+            gt_bboxes = np.array(gt_bboxes, dtype=np.float32)
+            gt_labels = np.array(gt_labels, dtype=np.int64)
+        else:
+            gt_bboxes = np.zeros((0, 4), dtype=np.float32)
+            gt_labels = np.array([], dtype=np.int64)
+        if gt_bboxes_ignore:
+            gt_bboxes_ignore = np.array(gt_bboxes_ignore, dtype=np.float32)
+        else:
+            gt_bboxes_ignore = np.zeros((0, 4), dtype=np.float32)
+        annotation = dict(
+            bboxes=gt_bboxes, labels=gt_labels, bboxes_ignore=gt_bboxes_ignore
+        )
+        if self.use_instance_mask:
+            annotation["masks"] = gt_masks
+        if self.use_keypoint:
+            if gt_keypoints:
+                annotation["keypoints"] = np.array(gt_keypoints, dtype=np.float32)
+            else:
+                annotation["keypoints"] = np.zeros((0, 51), dtype=np.float32)
+        return annotation
+
+    def get_train_data(self, idx):
+        """
+        Load image and annotation
+        :param idx:
+        :return: meta-data (a dict containing image, annotation and other information)
+        """
+        img_info = self.get_per_img_info(idx)
+        file_name = img_info["file_name"]
+        image_path = os.path.join(self.img_path, file_name)
+        img = cv2.imread(image_path)
+        if img is None:
+            print("image {} read failed.".format(image_path))
+            raise FileNotFoundError("Cant load image! Please check image path!")
+        ann = self.get_img_annotation(idx)
+        meta = dict(
+            img=img, img_info=img_info, gt_bboxes=ann["bboxes"], gt_labels=ann["labels"]
+        )
+        if self.use_instance_mask:
+            meta["gt_masks"] = ann["masks"]
+        if self.use_keypoint:
+            meta["gt_keypoints"] = ann["keypoints"]
+
+        input_size = self.input_size
+        if self.multi_scale:
+            input_size = self.get_random_size(self.multi_scale, input_size)
+
+        meta = self.pipeline(self, meta, input_size)
+
+        meta["img"] = torch.from_numpy(meta["img"].transpose(2, 0, 1))
+        return meta
+
+    def get_val_data(self, idx):
+        """
+        Currently no difference from get_train_data.
+        Not support TTA(testing time augmentation) yet.
+        :param idx:
+        :return:
+        """
+        # TODO: support TTA
+        return self.get_train_data(idx)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/xml_dataset.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/xml_dataset.py
new file mode 100644
index 0000000000000000000000000000000000000000..c5778e1302d2c0c0203278465c2e682a0e660ba9
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/dataset/xml_dataset.py
@@ -0,0 +1,157 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import time
+import xml.etree.ElementTree as ET
+from collections import defaultdict
+
+from pycocotools.coco import COCO
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.dataset.coco import CocoDataset
+
+
+def get_file_list(path, type=".xml"):
+    file_names = []
+    for maindir, subdir, file_name_list in os.walk(path):
+        for filename in file_name_list:
+            apath = os.path.join(maindir, filename)
+            ext = os.path.splitext(apath)[1]
+            if ext == type:
+                file_names.append(filename)
+    return file_names
+
+
+class CocoXML(COCO):
+    def __init__(self, annotation):
+        """
+        Constructor of Microsoft COCO helper class for
+        reading and visualizing annotations.
+        :param annotation: annotation dict
+        :return:
+        """
+        # load dataset
+        self.dataset, self.anns, self.cats, self.imgs = dict(), dict(), dict(), dict()
+        self.imgToAnns, self.catToImgs = defaultdict(list), defaultdict(list)
+        dataset = annotation
+        assert type(dataset) == dict, "annotation file format {} not supported".format(
+            type(dataset)
+        )
+        self.dataset = dataset
+        self.createIndex()
+
+
+class XMLDataset(CocoDataset):
+    def __init__(self, class_names, **kwargs):
+        self.class_names = class_names
+        super(XMLDataset, self).__init__(**kwargs)
+
+    def xml_to_coco(self, ann_path):
+        """
+        convert xml annotations to coco_api
+        :param ann_path:
+        :return:
+        """
+        logging.info("loading annotations into memory...")
+        tic = time.time()
+        ann_file_names = get_file_list(ann_path, type=".xml")
+        logging.info("Found {} annotation files.".format(len(ann_file_names)))
+        image_info = []
+        categories = []
+        annotations = []
+        for idx, supercat in enumerate(self.class_names):
+            categories.append(
+                {"supercategory": supercat, "id": idx + 1, "name": supercat}
+            )
+        ann_id = 1
+        for idx, xml_name in enumerate(ann_file_names):
+            tree = ET.parse(os.path.join(ann_path, xml_name))
+            root = tree.getroot()
+            file_name = root.find("filename").text
+            width = int(root.find("size").find("width").text)
+            height = int(root.find("size").find("height").text)
+            info = {
+                "file_name": file_name,
+                "height": height,
+                "width": width,
+                "id": idx + 1,
+            }
+            image_info.append(info)
+            for _object in root.findall("object"):
+                category = _object.find("name").text
+                if category not in self.class_names:
+                    logging.warning(
+                        "WARNING! {} is not in class_names! "
+                        "Pass this box annotation.".format(category)
+                    )
+                    continue
+                for cat in categories:
+                    if category == cat["name"]:
+                        cat_id = cat["id"]
+                xmin = int(_object.find("bndbox").find("xmin").text)
+                ymin = int(_object.find("bndbox").find("ymin").text)
+                xmax = int(_object.find("bndbox").find("xmax").text)
+                ymax = int(_object.find("bndbox").find("ymax").text)
+                w = xmax - xmin
+                h = ymax - ymin
+                if w < 0 or h < 0:
+                    logging.warning(
+                        "WARNING! Find error data in file {}! Box w and "
+                        "h should > 0. Pass this box annotation.".format(xml_name)
+                    )
+                    continue
+                coco_box = [max(xmin, 0), max(ymin, 0), min(w, width), min(h, height)]
+                ann = {
+                    "image_id": idx + 1,
+                    "bbox": coco_box,
+                    "category_id": cat_id,
+                    "iscrowd": 0,
+                    "id": ann_id,
+                    "area": coco_box[2] * coco_box[3],
+                }
+                annotations.append(ann)
+                ann_id += 1
+
+        coco_dict = {
+            "images": image_info,
+            "categories": categories,
+            "annotations": annotations,
+        }
+        logging.info(
+            "Load {} xml files and {} boxes".format(len(image_info), len(annotations))
+        )
+        logging.info("Done (t={:0.2f}s)".format(time.time() - tic))
+        return coco_dict
+
+    def get_data_info(self, ann_path):
+        """
+        Load basic information of dataset such as image path, label and so on.
+        :param ann_path: coco json file path
+        :return: image info:
+        [{'file_name': '000000000139.jpg',
+          'height': 426,
+          'width': 640,
+          'id': 139},
+         ...
+        ]
+        """
+        coco_dict = self.xml_to_coco(ann_path)
+        self.coco_api = CocoXML(coco_dict)
+        self.cat_ids = sorted(self.coco_api.getCatIds())
+        self.cat2label = {cat_id: i for i, cat_id in enumerate(self.cat_ids)}
+        self.cats = self.coco_api.loadCats(self.cat_ids)
+        self.img_ids = sorted(self.coco_api.imgs.keys())
+        img_info = self.coco_api.loadImgs(self.img_ids)
+        return img_info
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..c30ae7665b5536caa50cb5a5c6830c13f4875cf1
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/__init__.py
@@ -0,0 +1,17 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from .pipeline import Pipeline
+
+__all__ = ["Pipeline"]
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/color.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/color.py
new file mode 100644
index 0000000000000000000000000000000000000000..907b5337973797626712f19ba52c1ff257fc1ee1
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/color.py
@@ -0,0 +1,69 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import random
+
+import cv2
+import numpy as np
+
+
+def random_brightness(img, delta):
+    img += random.uniform(-delta, delta)
+    return img
+
+
+def random_contrast(img, alpha_low, alpha_up):
+    img *= random.uniform(alpha_low, alpha_up)
+    return img
+
+
+def random_saturation(img, alpha_low, alpha_up):
+    hsv_img = cv2.cvtColor(img.astype(np.float32), cv2.COLOR_BGR2HSV)
+    hsv_img[..., 1] *= random.uniform(alpha_low, alpha_up)
+    img = cv2.cvtColor(hsv_img, cv2.COLOR_HSV2BGR)
+    return img
+
+
+def normalize(meta, mean, std):
+    img = meta["img"].astype(np.float32)
+    mean = np.array(mean, dtype=np.float64).reshape(1, -1)
+    stdinv = 1 / np.array(std, dtype=np.float64).reshape(1, -1)
+    cv2.subtract(img, mean, img)
+    cv2.multiply(img, stdinv, img)
+    meta["img"] = img
+    return meta
+
+
+def _normalize(img, mean, std):
+    mean = np.array(mean, dtype=np.float32).reshape(1, 1, 3) / 255
+    std = np.array(std, dtype=np.float32).reshape(1, 1, 3) / 255
+    img = (img - mean) / std
+    return img
+
+
+def color_aug_and_norm(meta, kwargs):
+    img = meta["img"].astype(np.float32) / 255
+
+    if "brightness" in kwargs and random.randint(0, 1):
+        img = random_brightness(img, kwargs["brightness"])
+
+    if "contrast" in kwargs and random.randint(0, 1):
+        img = random_contrast(img, *kwargs["contrast"])
+
+    if "saturation" in kwargs and random.randint(0, 1):
+        img = random_saturation(img, *kwargs["saturation"])
+
+    img = _normalize(img, *kwargs["normalize"])
+    meta["img"] = img
+    return meta
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/pipeline.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/pipeline.py
new file mode 100644
index 0000000000000000000000000000000000000000..24acdb1880536d521ab34ed9cd61cdcc19a15389
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/pipeline.py
@@ -0,0 +1,59 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import functools
+import warnings
+from typing import Dict, Tuple
+
+from torch.utils.data import Dataset
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.transform.color import color_aug_and_norm
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.transform.warp import ShapeTransform, warp_and_resize
+
+
+class LegacyPipeline:
+    def __init__(self, cfg, keep_ratio):
+        warnings.warn(
+            "Deprecated warning! Pipeline from nanodet v0.x has been deprecated,"
+            "Please use new Pipeline and update your config!"
+        )
+        self.warp = functools.partial(
+            warp_and_resize, warp_kwargs=cfg, keep_ratio=keep_ratio
+        )
+        self.color = functools.partial(color_aug_and_norm, kwargs=cfg)
+
+    def __call__(self, meta, dst_shape):
+        meta = self.warp(meta, dst_shape=dst_shape)
+        meta = self.color(meta=meta)
+        return meta
+
+
+class Pipeline:
+    """Data process pipeline. Apply augmentation and pre-processing on
+    meta_data from dataset.
+
+    Args:
+        cfg (Dict): Data pipeline config.
+        keep_ratio (bool): Whether to keep aspect ratio when resizing image.
+
+    """
+
+    def __init__(self, cfg: Dict, keep_ratio: bool):
+        self.shape_transform = ShapeTransform(keep_ratio, **cfg)
+        self.color = functools.partial(color_aug_and_norm, kwargs=cfg)
+
+    def __call__(self, dataset: Dataset, meta: Dict, dst_shape: Tuple[int, int]):
+        meta = self.shape_transform(meta, dst_shape=dst_shape)
+        meta = self.color(meta=meta)
+        return meta
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/warp.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/warp.py
new file mode 100644
index 0000000000000000000000000000000000000000..6ffd1b66d32520acc0d8701106be3c6b0f423369
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/data/transform/warp.py
@@ -0,0 +1,330 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+import random
+from typing import Dict, Optional, Tuple
+
+import cv2
+import numpy as np
+
+
+def get_flip_matrix(prob=0.5):
+    F = np.eye(3)
+    if random.random() < prob:
+        F[0, 0] = -1
+    return F
+
+
+def get_perspective_matrix(perspective=0.0):
+    """
+
+    :param perspective:
+    :return:
+    """
+    P = np.eye(3)
+    P[2, 0] = random.uniform(-perspective, perspective)  # x perspective (about y)
+    P[2, 1] = random.uniform(-perspective, perspective)  # y perspective (about x)
+    return P
+
+
+def get_rotation_matrix(degree=0.0):
+    """
+
+    :param degree:
+    :return:
+    """
+    R = np.eye(3)
+    a = random.uniform(-degree, degree)
+    R[:2] = cv2.getRotationMatrix2D(angle=a, center=(0, 0), scale=1)
+    return R
+
+
+def get_scale_matrix(ratio=(1, 1)):
+    """
+
+    :param ratio:
+    """
+    Scl = np.eye(3)
+    scale = random.uniform(*ratio)
+    Scl[0, 0] *= scale
+    Scl[1, 1] *= scale
+    return Scl
+
+
+def get_stretch_matrix(width_ratio=(1, 1), height_ratio=(1, 1)):
+    """
+
+    :param width_ratio:
+    :param height_ratio:
+    """
+    Str = np.eye(3)
+    Str[0, 0] *= random.uniform(*width_ratio)
+    Str[1, 1] *= random.uniform(*height_ratio)
+    return Str
+
+
+def get_shear_matrix(degree):
+    """
+
+    :param degree:
+    :return:
+    """
+    Sh = np.eye(3)
+    Sh[0, 1] = math.tan(
+        random.uniform(-degree, degree) * math.pi / 180
+    )  # x shear (deg)
+    Sh[1, 0] = math.tan(
+        random.uniform(-degree, degree) * math.pi / 180
+    )  # y shear (deg)
+    return Sh
+
+
+def get_translate_matrix(translate, width, height):
+    """
+
+    :param translate:
+    :return:
+    """
+    T = np.eye(3)
+    T[0, 2] = random.uniform(0.5 - translate, 0.5 + translate) * width  # x translation
+    T[1, 2] = random.uniform(0.5 - translate, 0.5 + translate) * height  # y translation
+    return T
+
+
+def get_resize_matrix(raw_shape, dst_shape, keep_ratio):
+    """
+    Get resize matrix for resizing raw img to input size
+    :param raw_shape: (width, height) of raw image
+    :param dst_shape: (width, height) of input image
+    :param keep_ratio: whether keep original ratio
+    :return: 3x3 Matrix
+    """
+    r_w, r_h = raw_shape
+    d_w, d_h = dst_shape
+    Rs = np.eye(3)
+    if keep_ratio:
+        C = np.eye(3)
+        C[0, 2] = -r_w / 2
+        C[1, 2] = -r_h / 2
+
+        if r_w / r_h < d_w / d_h:
+            ratio = d_h / r_h
+        else:
+            ratio = d_w / r_w
+        Rs[0, 0] *= ratio
+        Rs[1, 1] *= ratio
+
+        T = np.eye(3)
+        T[0, 2] = 0.5 * d_w
+        T[1, 2] = 0.5 * d_h
+        return T @ Rs @ C
+    else:
+        Rs[0, 0] *= d_w / r_w
+        Rs[1, 1] *= d_h / r_h
+        return Rs
+
+
+def warp_and_resize(
+    meta: Dict,
+    warp_kwargs: Dict,
+    dst_shape: Tuple[int, int],
+    keep_ratio: bool = True,
+):
+    # TODO: background, type
+    raw_img = meta["img"]
+    height = raw_img.shape[0]  # shape(h,w,c)
+    width = raw_img.shape[1]
+
+    # center
+    C = np.eye(3)
+    C[0, 2] = -width / 2
+    C[1, 2] = -height / 2
+
+    # do not change the order of mat mul
+    if "perspective" in warp_kwargs and random.randint(0, 1):
+        P = get_perspective_matrix(warp_kwargs["perspective"])
+        C = P @ C
+    if "scale" in warp_kwargs and random.randint(0, 1):
+        Scl = get_scale_matrix(warp_kwargs["scale"])
+        C = Scl @ C
+    if "stretch" in warp_kwargs and random.randint(0, 1):
+        Str = get_stretch_matrix(*warp_kwargs["stretch"])
+        C = Str @ C
+    if "rotation" in warp_kwargs and random.randint(0, 1):
+        R = get_rotation_matrix(warp_kwargs["rotation"])
+        C = R @ C
+    if "shear" in warp_kwargs and random.randint(0, 1):
+        Sh = get_shear_matrix(warp_kwargs["shear"])
+        C = Sh @ C
+    if "flip" in warp_kwargs:
+        F = get_flip_matrix(warp_kwargs["flip"])
+        C = F @ C
+    if "translate" in warp_kwargs and random.randint(0, 1):
+        T = get_translate_matrix(warp_kwargs["translate"], width, height)
+    else:
+        T = get_translate_matrix(0, width, height)
+    M = T @ C
+    # M = T @ Sh @ R @ Str @ P @ C
+    ResizeM = get_resize_matrix((width, height), dst_shape, keep_ratio)
+    M = ResizeM @ M
+    img = cv2.warpPerspective(raw_img, M, dsize=tuple(dst_shape))
+    meta["img"] = img
+    meta["warp_matrix"] = M
+    if "gt_bboxes" in meta:
+        boxes = meta["gt_bboxes"]
+        meta["gt_bboxes"] = warp_boxes(boxes, M, dst_shape[0], dst_shape[1])
+    if "gt_masks" in meta:
+        for i, mask in enumerate(meta["gt_masks"]):
+            meta["gt_masks"][i] = cv2.warpPerspective(mask, M, dsize=tuple(dst_shape))
+
+    return meta
+
+
+def warp_boxes(boxes, M, width, height):
+    n = len(boxes)
+    if n:
+        # warp points
+        xy = np.ones((n * 4, 3))
+        xy[:, :2] = boxes[:, [0, 1, 2, 3, 0, 3, 2, 1]].reshape(
+            n * 4, 2
+        )  # x1y1, x2y2, x1y2, x2y1
+        xy = xy @ M.T  # transform
+        xy = (xy[:, :2] / xy[:, 2:3]).reshape(n, 8)  # rescale
+        # create new boxes
+        x = xy[:, [0, 2, 4, 6]]
+        y = xy[:, [1, 3, 5, 7]]
+        xy = np.concatenate((x.min(1), y.min(1), x.max(1), y.max(1))).reshape(4, n).T
+        # clip boxes
+        xy[:, [0, 2]] = xy[:, [0, 2]].clip(0, width)
+        xy[:, [1, 3]] = xy[:, [1, 3]].clip(0, height)
+        return xy.astype(np.float32)
+    else:
+        return boxes
+
+
+def get_minimum_dst_shape(
+    src_shape: Tuple[int, int],
+    dst_shape: Tuple[int, int],
+    divisible: Optional[int] = None,
+) -> Tuple[int, int]:
+    """Calculate minimum dst shape"""
+    src_w, src_h = src_shape
+    dst_w, dst_h = dst_shape
+
+    if src_w / src_h < dst_w / dst_h:
+        ratio = dst_h / src_h
+    else:
+        ratio = dst_w / src_w
+
+    dst_w = int(ratio * src_w)
+    dst_h = int(ratio * src_h)
+
+    if divisible and divisible > 0:
+        dst_w = max(divisible, int((dst_w + divisible - 1) // divisible * divisible))
+        dst_h = max(divisible, int((dst_h + divisible - 1) // divisible * divisible))
+    return dst_w, dst_h
+
+
+class ShapeTransform:
+    """Shape transforms including resize, random perspective, random scale,
+    random stretch, random rotation, random shear, random translate,
+    and random flip.
+
+    Args:
+        keep_ratio: Whether to keep aspect ratio of the image.
+        divisible: Make image height and width is divisible by a number.
+        perspective: Random perspective factor.
+        scale: Random scale ratio.
+        stretch: Width and height stretch ratio range.
+        rotation: Random rotate degree.
+        shear: Random shear degree.
+        translate: Random translate ratio.
+        flip: Random flip probability.
+    """
+
+    def __init__(
+        self,
+        keep_ratio,
+        divisible=0,
+        perspective=0.0,
+        scale=(1, 1),
+        stretch=((1, 1), (1, 1)),
+        rotation=0.0,
+        shear=0.0,
+        translate=0.0,
+        flip=0.0,
+        **kwargs
+    ):
+        self.keep_ratio = keep_ratio
+        self.divisible = divisible
+        self.perspective = perspective
+        self.scale_ratio = scale
+        self.stretch_ratio = stretch
+        self.rotation_degree = rotation
+        self.shear_degree = shear
+        self.flip_prob = flip
+        self.translate_ratio = translate
+
+    def __call__(self, meta_data, dst_shape):
+        raw_img = meta_data["img"]
+        height = raw_img.shape[0]  # shape(h,w,c)
+        width = raw_img.shape[1]
+
+        # center
+        C = np.eye(3)
+        C[0, 2] = -width / 2
+        C[1, 2] = -height / 2
+
+        P = get_perspective_matrix(self.perspective)
+        C = P @ C
+
+        Scl = get_scale_matrix(self.scale_ratio)
+        C = Scl @ C
+
+        Str = get_stretch_matrix(*self.stretch_ratio)
+        C = Str @ C
+
+        R = get_rotation_matrix(self.rotation_degree)
+        C = R @ C
+
+        Sh = get_shear_matrix(self.shear_degree)
+        C = Sh @ C
+
+        F = get_flip_matrix(self.flip_prob)
+        C = F @ C
+
+        T = get_translate_matrix(self.translate_ratio, width, height)
+        M = T @ C
+
+        if self.keep_ratio:
+            dst_shape = get_minimum_dst_shape(
+                (width, height), dst_shape, self.divisible
+            )
+
+        ResizeM = get_resize_matrix((width, height), dst_shape, self.keep_ratio)
+        M = ResizeM @ M
+        img = cv2.warpPerspective(raw_img, M, dsize=tuple(dst_shape))
+        meta_data["img"] = img
+        meta_data["warp_matrix"] = M
+        if "gt_bboxes" in meta_data:
+            boxes = meta_data["gt_bboxes"]
+            meta_data["gt_bboxes"] = warp_boxes(boxes, M, dst_shape[0], dst_shape[1])
+        if "gt_masks" in meta_data:
+            for i, mask in enumerate(meta_data["gt_masks"]):
+                meta_data["gt_masks"][i] = cv2.warpPerspective(
+                    mask, M, dsize=tuple(dst_shape)
+                )
+
+        return meta_data
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/evaluator/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/evaluator/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..2e2a2513e909547adca7e1a3f097216ffbc8c7fb
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/evaluator/__init__.py
@@ -0,0 +1,25 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import copy
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.evaluator.coco_detection import CocoDetectionEvaluator
+
+
+def build_evaluator(cfg, dataset):
+    evaluator_cfg = copy.deepcopy(cfg)
+    name = evaluator_cfg.pop("name")
+    if name == "CocoDetectionEvaluator":
+        return CocoDetectionEvaluator(dataset)
+    else:
+        raise NotImplementedError
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/evaluator/coco_detection.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/evaluator/coco_detection.py
new file mode 100644
index 0000000000000000000000000000000000000000..c408d996a682c80bf472bd09fe729c029445edc0
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/evaluator/coco_detection.py
@@ -0,0 +1,151 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+import contextlib
+import copy
+import io
+import itertools
+import json
+import logging
+import os
+import warnings
+
+import numpy as np
+from pycocotools.cocoeval import COCOeval
+from tabulate import tabulate
+
+logger = logging.getLogger("NanoDet")
+
+
+def xyxy2xywh(bbox):
+    """
+    change bbox to coco format
+    :param bbox: [x1, y1, x2, y2]
+    :return: [x, y, w, h]
+    """
+    return [
+        bbox[0],
+        bbox[1],
+        bbox[2] - bbox[0],
+        bbox[3] - bbox[1],
+    ]
+
+
+class CocoDetectionEvaluator:
+    def __init__(self, dataset):
+        assert hasattr(dataset, "coco_api")
+        self.class_names = dataset.class_names
+        self.coco_api = dataset.coco_api
+        self.cat_ids = dataset.cat_ids
+        self.metric_names = ["mAP", "AP_50", "AP_75", "AP_small", "AP_m", "AP_l"]
+
+    def results2json(self, results):
+        """
+        results: {image_id: {label: [bboxes...] } }
+        :return coco json format: {image_id:
+                                   category_id:
+                                   bbox:
+                                   score: }
+        """
+        json_results = []
+        for image_id, dets in results.items():
+            for label, bboxes in dets.items():
+                category_id = self.cat_ids[label]
+                for bbox in bboxes:
+                    score = float(bbox[4])
+                    detection = dict(
+                        image_id=int(image_id),
+                        category_id=int(category_id),
+                        bbox=xyxy2xywh(bbox),
+                        score=score,
+                    )
+                    json_results.append(detection)
+        return json_results
+
+    def evaluate(self, results, save_dir):  # rank=-1
+        results_json = self.results2json(results)
+        if len(results_json) == 0:
+            warnings.warn(
+                "Detection result is empty! Please check whether "
+                "training set is too small (need to increase val_interval "
+                "in config and train more epochs). Or check annotation "
+                "correctness."
+            )
+            empty_eval_results = {}
+            for key in self.metric_names:
+                empty_eval_results[key] = 0
+            return empty_eval_results
+        # json_path = os.path.join(save_dir, "results{}.json".format(rank))
+        json_path = os.path.join(save_dir, "results.json")
+        json.dump(results_json, open(json_path, "w"))
+        coco_dets = self.coco_api.loadRes(json_path)
+        coco_eval = COCOeval(
+            copy.deepcopy(self.coco_api), copy.deepcopy(coco_dets), "bbox"
+        )
+        coco_eval.evaluate()
+        coco_eval.accumulate()
+
+        # use logger to log coco eval results
+        redirect_string = io.StringIO()
+        with contextlib.redirect_stdout(redirect_string):
+            coco_eval.summarize()
+        logger.info("\n" + redirect_string.getvalue())
+
+        # print per class AP
+        headers = ["class", "AP50", "mAP"]
+        colums = 6
+        per_class_ap50s = []
+        per_class_maps = []
+        precisions = coco_eval.eval["precision"]
+        # dimension of precisions: [TxRxKxAxM]
+        # precision has dims (iou, recall, cls, area range, max dets)
+        assert len(self.class_names) == precisions.shape[2]
+
+        for idx, name in enumerate(self.class_names):
+            # area range index 0: all area ranges
+            # max dets index -1: typically 100 per image
+            precision_50 = precisions[0, :, idx, 0, -1]
+            precision_50 = precision_50[precision_50 > -1]
+            ap50 = np.mean(precision_50) if precision_50.size else float("nan")
+            per_class_ap50s.append(float(ap50 * 100))
+
+            precision = precisions[:, :, idx, 0, -1]
+            precision = precision[precision > -1]
+            ap = np.mean(precision) if precision.size else float("nan")
+            per_class_maps.append(float(ap * 100))
+
+        num_cols = min(colums, len(self.class_names) * len(headers))
+        flatten_results = []
+        for name, ap50, mAP in zip(self.class_names, per_class_ap50s, per_class_maps):
+            flatten_results += [name, ap50, mAP]
+
+        row_pair = itertools.zip_longest(
+            *[flatten_results[i::num_cols] for i in range(num_cols)]
+        )
+        table_headers = headers * (num_cols // len(headers))
+        table = tabulate(
+            row_pair,
+            tablefmt="pipe",
+            floatfmt=".1f",
+            headers=table_headers,
+            numalign="left",
+        )
+        logger.info("\n" + table)
+
+        aps = coco_eval.stats[:6]
+        eval_results = {}
+        for k, v in zip(self.metric_names, aps):
+            eval_results[k] = v
+        return eval_results
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/inferencer/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/inferencer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/inferencer/utilities.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/inferencer/utilities.py
new file mode 100644
index 0000000000000000000000000000000000000000..b20b891d58d00702e0eb5a79aa95a2ba8a1aefc5
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/inferencer/utilities.py
@@ -0,0 +1,69 @@
+# Modifications Copyright 2021 - present, OpenDR European Project
+#
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import torch
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.batch_process import stack_batch_img
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.collate import naive_collate
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.transform import Pipeline
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.arch import build_model
+
+image_ext = [".jpg", ".jpeg", ".webp", ".bmp", ".png"]
+video_ext = ["mp4", "mov", "avi", "mkv"]
+
+
+class Predictor(object):
+    def __init__(self, cfg, model, device="cuda"):
+        self.cfg = cfg
+        self.device = device
+
+        if self.cfg.model.arch.backbone.name == "RepVGG":
+            deploy_config = self.cfg.model
+            deploy_config.arch.backbone.update({"deploy": True})
+            deploy_model = build_model(deploy_config)
+            from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.repvgg\
+                import repvgg_det_model_convert
+            model = repvgg_det_model_convert(model, deploy_model)
+
+        self.model = model.to(device).eval()
+
+        self.pipeline = Pipeline(self.cfg.data.val.pipeline, self.cfg.data.val.keep_ratio)
+
+    def inference(self, img, verbose=True):
+        img_info = {"id": 0}
+        height, width = img.shape[:2]
+        img_info["height"] = height
+        img_info["width"] = width
+        meta = dict(img_info=img_info, raw_img=img, img=img)
+        meta = self.pipeline(None, meta, self.cfg.data.val.input_size)
+        meta["img"] = torch.from_numpy(meta["img"].transpose(2, 0, 1)).to(self.device)
+        meta = naive_collate([meta])
+        meta["img"] = stack_batch_img(meta["img"], divisible=32)
+        with torch.no_grad():
+            results = self.model.inference(meta, verbose)
+        return meta, results
+
+
+def get_image_list(path):
+    image_names = []
+    for maindir, subdir, file_name_list in os.walk(path):
+        for filename in file_name_list:
+            apath = os.path.join(maindir, filename)
+            ext = os.path.splitext(apath)[1]
+            if ext in image_ext:
+                image_names.append(apath)
+    return image_names
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..f0b10b8a01d63181241e76230089025caf7e9f47
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/__init__.py
@@ -0,0 +1,42 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import warnings
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.arch.nanodet_plus import NanoDetPlus
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.arch.one_stage_detector import OneStageDetector
+
+
+def build_model(model_cfg):
+    model_cfg = copy.deepcopy(model_cfg)
+    name = model_cfg.arch.pop("name")
+    if name == "GFL":
+        warnings.warn(
+            "Model architecture name is changed to 'OneStageDetector'. "
+            "The name 'GFL' is deprecated, please change the model->arch->name "
+            "in your YAML config file to OneStageDetector."
+        )
+        model = OneStageDetector(
+            model_cfg.arch.backbone, model_cfg.arch.fpn, model_cfg.arch.head
+        )
+    elif name == "OneStageDetector":
+        model = OneStageDetector(
+            model_cfg.arch.backbone, model_cfg.arch.fpn, model_cfg.arch.head
+        )
+    elif name == "NanoDetPlus":
+        model = NanoDetPlus(**model_cfg.arch)
+    else:
+        raise NotImplementedError
+    return model
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/nanodet_plus.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/nanodet_plus.py
new file mode 100644
index 0000000000000000000000000000000000000000..518c0af01ba042e3922554effb3d0e5f320123ab
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/nanodet_plus.py
@@ -0,0 +1,57 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+import torch
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head import build_head
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.arch.one_stage_detector import OneStageDetector
+
+
+class NanoDetPlus(OneStageDetector):
+    def __init__(
+        self,
+        backbone,
+        fpn,
+        aux_head,
+        head,
+        detach_epoch=0,
+    ):
+        super(NanoDetPlus, self).__init__(
+            backbone_cfg=backbone, fpn_cfg=fpn, head_cfg=head
+        )
+        self.aux_fpn = copy.deepcopy(self.fpn)
+        self.aux_head = build_head(aux_head)
+        self.detach_epoch = detach_epoch
+
+    def forward_train(self, gt_meta):
+        img = gt_meta["img"]
+        feat = self.backbone(img)
+        fpn_feat = self.fpn(feat)
+        if self.epoch >= self.detach_epoch:
+            aux_fpn_feat = self.aux_fpn([f.detach() for f in feat])
+            dual_fpn_feat = (
+                torch.cat([f.detach(), aux_f], dim=1)
+                for f, aux_f in zip(fpn_feat, aux_fpn_feat)
+            )
+        else:
+            aux_fpn_feat = self.aux_fpn(feat)
+            dual_fpn_feat = (
+                torch.cat([f, aux_f], dim=1) for f, aux_f in zip(fpn_feat, aux_fpn_feat)
+            )
+        head_out = self.head(fpn_feat)
+        aux_head_out = self.aux_head(dual_fpn_feat)
+        loss, loss_states = self.head.loss(head_out, gt_meta, aux_preds=aux_head_out)
+        return head_out, loss, loss_states
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/one_stage_detector.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/one_stage_detector.py
new file mode 100644
index 0000000000000000000000000000000000000000..e1ce7a650e19bb66e364845cad2b49456e72b779
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/arch/one_stage_detector.py
@@ -0,0 +1,59 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone import build_backbone
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.fpn import build_fpn
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head import build_head
+
+
+class OneStageDetector(nn.Module):
+    def __init__(
+        self,
+        backbone_cfg,
+        fpn_cfg=None,
+        head_cfg=None,
+    ):
+        super(OneStageDetector, self).__init__()
+        self.backbone = build_backbone(backbone_cfg)
+        if fpn_cfg is not None:
+            self.fpn = build_fpn(fpn_cfg)
+        if head_cfg is not None:
+            self.head = build_head(head_cfg)
+        self.epoch = 0
+
+    def forward(self, x):
+        x = self.backbone(x)
+        if hasattr(self, "fpn"):
+            x = self.fpn(x)
+        if hasattr(self, "head"):
+            x = self.head(x)
+        return x
+
+    def inference(self, meta, verbose=True):
+        with torch.no_grad():
+            preds = self(meta["img"])
+            results = self.head.post_process(preds, meta)
+        return results
+
+    def forward_train(self, gt_meta):
+        preds = self(gt_meta["img"])
+        loss, loss_states = self.head.loss(preds, gt_meta)
+
+        return preds, loss, loss_states
+
+    def set_epoch(self, epoch):
+        self.epoch = epoch
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/__init__.py
new file mode 100755
index 0000000000000000000000000000000000000000..414b8c245f81a257f8aa85d5233af0361d86649f
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/__init__.py
@@ -0,0 +1,44 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.custom_csp import CustomCspNet
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.efficientnet_lite import EfficientNetLite
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.ghostnet import GhostNet
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.mobilenetv2 import MobileNetV2
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.repvgg import RepVGG
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.resnet import ResNet
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.shufflenetv2 import ShuffleNetV2
+
+
+def build_backbone(cfg):
+    backbone_cfg = copy.deepcopy(cfg)
+    name = backbone_cfg.pop("name")
+    if name == "ResNet":
+        return ResNet(**backbone_cfg)
+    elif name == "ShuffleNetV2":
+        return ShuffleNetV2(**backbone_cfg)
+    elif name == "GhostNet":
+        return GhostNet(**backbone_cfg)
+    elif name == "MobileNetV2":
+        return MobileNetV2(**backbone_cfg)
+    elif name == "EfficientNetLite":
+        return EfficientNetLite(**backbone_cfg)
+    elif name == "CustomCspNet":
+        return CustomCspNet(**backbone_cfg)
+    elif name == "RepVGG":
+        return RepVGG(**backbone_cfg)
+    else:
+        raise NotImplementedError
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/custom_csp.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/custom_csp.py
new file mode 100755
index 0000000000000000000000000000000000000000..17cd08402e2c2dee55904eb1e3d6f4c16035f6f9
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/custom_csp.py
@@ -0,0 +1,168 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule
+
+
+class TinyResBlock(nn.Module):
+    def __init__(
+        self, in_channels, kernel_size, norm_cfg, activation, res_type="concat"
+    ):
+        super(TinyResBlock, self).__init__()
+        assert in_channels % 2 == 0
+        assert res_type in ["concat", "add"]
+        self.res_type = res_type
+        self.in_conv = ConvModule(
+            in_channels,
+            in_channels // 2,
+            kernel_size,
+            padding=(kernel_size - 1) // 2,
+            norm_cfg=norm_cfg,
+            activation=activation,
+        )
+        self.mid_conv = ConvModule(
+            in_channels // 2,
+            in_channels // 2,
+            kernel_size,
+            padding=(kernel_size - 1) // 2,
+            norm_cfg=norm_cfg,
+            activation=activation,
+        )
+        if res_type == "add":
+            self.out_conv = ConvModule(
+                in_channels // 2,
+                in_channels,
+                kernel_size,
+                padding=(kernel_size - 1) // 2,
+                norm_cfg=norm_cfg,
+                activation=activation,
+            )
+
+    def forward(self, x):
+        x = self.in_conv(x)
+        x1 = self.mid_conv(x)
+        if self.res_type == "add":
+            return self.out_conv(x + x1)
+        else:
+            return torch.cat((x1, x), dim=1)
+
+
+class CspBlock(nn.Module):
+    def __init__(
+        self,
+        in_channels,
+        num_res,
+        kernel_size=3,
+        stride=0,
+        norm_cfg=dict(type="BN", requires_grad=True),
+        activation="LeakyReLU",
+    ):
+        super(CspBlock, self).__init__()
+        assert in_channels % 2 == 0
+        self.in_conv = ConvModule(
+            in_channels,
+            in_channels,
+            kernel_size,
+            stride,
+            padding=(kernel_size - 1) // 2,
+            norm_cfg=norm_cfg,
+            activation=activation,
+        )
+        res_blocks = []
+        for i in range(num_res):
+            res_block = TinyResBlock(in_channels, kernel_size, norm_cfg, activation)
+            res_blocks.append(res_block)
+        self.res_blocks = nn.Sequential(*res_blocks)
+        self.res_out_conv = ConvModule(
+            in_channels,
+            in_channels,
+            kernel_size,
+            padding=(kernel_size - 1) // 2,
+            norm_cfg=norm_cfg,
+            activation=activation,
+        )
+
+    def forward(self, x):
+        x = self.in_conv(x)
+        x1 = self.res_blocks(x)
+        x1 = self.res_out_conv(x1)
+        out = torch.cat((x1, x), dim=1)
+        return out
+
+
+class CustomCspNet(nn.Module):
+    def __init__(
+        self,
+        net_cfg,
+        out_stages,
+        norm_cfg=dict(type="BN", requires_grad=True),
+        activation="LeakyReLU",
+    ):
+        super(CustomCspNet, self).__init__()
+        assert isinstance(net_cfg, list)
+        assert set(out_stages).issubset(i for i in range(len(net_cfg)))
+        self.out_stages = out_stages
+        self.activation = activation
+        self.stages = nn.ModuleList()
+        for stage_cfg in net_cfg:
+            if stage_cfg[0] == "Conv":
+                in_channels, out_channels, kernel_size, stride = stage_cfg[1:]
+                stage = ConvModule(
+                    in_channels,
+                    out_channels,
+                    kernel_size,
+                    stride,
+                    padding=(kernel_size - 1) // 2,
+                    norm_cfg=norm_cfg,
+                    activation=activation,
+                )
+            elif stage_cfg[0] == "CspBlock":
+                in_channels, num_res, kernel_size, stride = stage_cfg[1:]
+                stage = CspBlock(
+                    in_channels, num_res, kernel_size, stride, norm_cfg, activation
+                )
+            elif stage_cfg[0] == "MaxPool":
+                kernel_size, stride = stage_cfg[1:]
+                stage = nn.MaxPool2d(
+                    kernel_size, stride, padding=(kernel_size - 1) // 2
+                )
+            else:
+                raise ModuleNotFoundError
+            self.stages.append(stage)
+        self._init_weight()
+
+    def forward(self, x):
+        output = []
+        for i, stage in enumerate(self.stages):
+            x = stage(x)
+            if i in self.out_stages:
+                output.append(x)
+        return tuple(output)
+
+    def _init_weight(self):
+        for m in self.modules():
+            if self.activation == "LeakyReLU":
+                nonlinearity = "leaky_relu"
+            else:
+                nonlinearity = "relu"
+            if isinstance(m, nn.Conv2d):
+                nn.init.kaiming_normal_(
+                    m.weight, mode="fan_out", nonlinearity=nonlinearity
+                )
+            elif isinstance(m, nn.BatchNorm2d):
+                m.weight.data.fill_(1)
+                m.bias.data.zero_()
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/efficientnet_lite.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/efficientnet_lite.py
new file mode 100644
index 0000000000000000000000000000000000000000..9cd6e41baf7384138d233f586561cd23177ef9b6
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/efficientnet_lite.py
@@ -0,0 +1,283 @@
+import math
+
+import torch
+import torch.functional as F
+import torch.utils.model_zoo as model_zoo
+from torch import nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+
+efficientnet_lite_params = {
+    # width_coefficient, depth_coefficient, image_size, dropout_rate
+    "efficientnet_lite0": [1.0, 1.0, 224, 0.2],
+    "efficientnet_lite1": [1.0, 1.1, 240, 0.2],
+    "efficientnet_lite2": [1.1, 1.2, 260, 0.3],
+    "efficientnet_lite3": [1.2, 1.4, 280, 0.3],
+    "efficientnet_lite4": [1.4, 1.8, 300, 0.3],
+}
+
+model_urls = {
+    "efficientnet_lite0": "https://github.com/RangiLyu/EfficientNet-Lite/releases/download/v1.0/efficientnet_lite0.pth",  # noqa: E501
+    "efficientnet_lite1": "https://github.com/RangiLyu/EfficientNet-Lite/releases/download/v1.0/efficientnet_lite1.pth",  # noqa: E501
+    "efficientnet_lite2": "https://github.com/RangiLyu/EfficientNet-Lite/releases/download/v1.0/efficientnet_lite2.pth",  # noqa: E501
+    "efficientnet_lite3": "https://github.com/RangiLyu/EfficientNet-Lite/releases/download/v1.0/efficientnet_lite3.pth",  # noqa: E501
+    "efficientnet_lite4": "https://github.com/RangiLyu/EfficientNet-Lite/releases/download/v1.0/efficientnet_lite4.pth",  # noqa: E501
+}
+
+
+def round_filters(filters, multiplier, divisor=8, min_width=None):
+    """Calculate and round number of filters based on width multiplier."""
+    if not multiplier:
+        return filters
+    filters *= multiplier
+    min_width = min_width or divisor
+    new_filters = max(min_width, int(filters + divisor / 2) // divisor * divisor)
+    # Make sure that round down does not go down by more than 10%.
+    if new_filters < 0.9 * filters:
+        new_filters += divisor
+    return int(new_filters)
+
+
+def round_repeats(repeats, multiplier):
+    """Round number of filters based on depth multiplier."""
+    if not multiplier:
+        return repeats
+    return int(math.ceil(multiplier * repeats))
+
+
+def drop_connect(x, drop_connect_rate, training):
+    if not training:
+        return x
+    keep_prob = 1.0 - drop_connect_rate
+    batch_size = x.shape[0]
+    random_tensor = keep_prob
+    random_tensor += torch.rand([batch_size, 1, 1, 1], dtype=x.dtype, device=x.device)
+    binary_mask = torch.floor(random_tensor)
+    x = (x / keep_prob) * binary_mask
+    return x
+
+
+class MBConvBlock(nn.Module):
+    def __init__(
+        self,
+        inp,
+        final_oup,
+        k,
+        s,
+        expand_ratio,
+        se_ratio,
+        has_se=False,
+        activation="ReLU6",
+    ):
+        super(MBConvBlock, self).__init__()
+
+        self._momentum = 0.01
+        self._epsilon = 1e-3
+        self.input_filters = inp
+        self.output_filters = final_oup
+        self.stride = s
+        self.expand_ratio = expand_ratio
+        self.has_se = has_se
+        self.id_skip = True  # skip connection and drop connect
+
+        # Expansion phase
+        oup = inp * expand_ratio  # number of output channels
+        if expand_ratio != 1:
+            self._expand_conv = nn.Conv2d(
+                in_channels=inp, out_channels=oup, kernel_size=1, bias=False
+            )
+            self._bn0 = nn.BatchNorm2d(
+                num_features=oup, momentum=self._momentum, eps=self._epsilon
+            )
+
+        # Depthwise convolution phase
+        self._depthwise_conv = nn.Conv2d(
+            in_channels=oup,
+            out_channels=oup,
+            groups=oup,  # groups makes it depthwise
+            kernel_size=k,
+            padding=(k - 1) // 2,
+            stride=s,
+            bias=False,
+        )
+        self._bn1 = nn.BatchNorm2d(
+            num_features=oup, momentum=self._momentum, eps=self._epsilon
+        )
+
+        # Squeeze and Excitation layer, if desired
+        if self.has_se:
+            num_squeezed_channels = max(1, int(inp * se_ratio))
+            self._se_reduce = nn.Conv2d(
+                in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1
+            )
+            self._se_expand = nn.Conv2d(
+                in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1
+            )
+
+        # Output phase
+        self._project_conv = nn.Conv2d(
+            in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False
+        )
+        self._bn2 = nn.BatchNorm2d(
+            num_features=final_oup, momentum=self._momentum, eps=self._epsilon
+        )
+        self._relu = act_layers(activation)
+
+    def forward(self, x, drop_connect_rate=None):
+        """
+        :param x: input tensor
+        :param drop_connect_rate: drop connect rate (float, between 0 and 1)
+        :return: output of block
+        """
+
+        # Expansion and Depthwise Convolution
+        identity = x
+        if self.expand_ratio != 1:
+            x = self._relu(self._bn0(self._expand_conv(x)))
+        x = self._relu(self._bn1(self._depthwise_conv(x)))
+
+        # Squeeze and Excitation
+        if self.has_se:
+            x_squeezed = F.adaptive_avg_pool2d(x, 1)
+            x_squeezed = self._se_expand(self._relu(self._se_reduce(x_squeezed)))
+            x = torch.sigmoid(x_squeezed) * x
+
+        x = self._bn2(self._project_conv(x))
+
+        # Skip connection and drop connect
+        if self.id_skip and self.stride == 1 and self.input_filters == self.output_filters:
+            if drop_connect_rate:
+                x = drop_connect(x, drop_connect_rate, training=self.training)
+            x += identity  # skip connection
+        return x
+
+
+class EfficientNetLite(nn.Module):
+    def __init__(
+        self, model_name, out_stages=(2, 4, 6), activation="ReLU6", pretrain=True
+    ):
+        super(EfficientNetLite, self).__init__()
+        assert set(out_stages).issubset(i for i in range(0, 7))
+        assert model_name in efficientnet_lite_params
+
+        self.model_name = model_name
+        # Batch norm parameters
+        momentum = 0.01
+        epsilon = 1e-3
+        width_multiplier, depth_multiplier, _, dropout_rate = efficientnet_lite_params[
+            model_name
+        ]
+        self.drop_connect_rate = 0.2
+        self.out_stages = out_stages
+
+        mb_block_settings = [
+            # repeat|kernel_size|stride|expand|input|output|se_ratio
+            [1, 3, 1, 1, 32, 16, 0.25],  # stage0
+            [2, 3, 2, 6, 16, 24, 0.25],  # stage1 - 1/4
+            [2, 5, 2, 6, 24, 40, 0.25],  # stage2 - 1/8
+            [3, 3, 2, 6, 40, 80, 0.25],  # stage3
+            [3, 5, 1, 6, 80, 112, 0.25],  # stage4 - 1/16
+            [4, 5, 2, 6, 112, 192, 0.25],  # stage5
+            [1, 3, 1, 6, 192, 320, 0.25],  # stage6 - 1/32
+        ]
+
+        # Stem
+        out_channels = 32
+        self.stem = nn.Sequential(
+            nn.Conv2d(3, out_channels, kernel_size=3, stride=2, padding=1, bias=False),
+            nn.BatchNorm2d(num_features=out_channels, momentum=momentum, eps=epsilon),
+            act_layers(activation),
+        )
+
+        # Build blocks
+        self.blocks = nn.ModuleList([])
+        for i, stage_setting in enumerate(mb_block_settings):
+            stage = nn.ModuleList([])
+            (
+                num_repeat,
+                kernal_size,
+                stride,
+                expand_ratio,
+                input_filters,
+                output_filters,
+                se_ratio,
+            ) = stage_setting
+            # Update block input and output filters based on width multiplier.
+            input_filters = (
+                input_filters
+                if i == 0
+                else round_filters(input_filters, width_multiplier)
+            )
+            output_filters = round_filters(output_filters, width_multiplier)
+            num_repeat = (
+                num_repeat
+                if i == 0 or i == len(mb_block_settings) - 1
+                else round_repeats(num_repeat, depth_multiplier)
+            )
+
+            # The first block needs to take care of stride and filter size increase.
+            stage.append(
+                MBConvBlock(
+                    input_filters,
+                    output_filters,
+                    kernal_size,
+                    stride,
+                    expand_ratio,
+                    se_ratio,
+                    has_se=False,
+                )
+            )
+            if num_repeat > 1:
+                input_filters = output_filters
+                stride = 1
+            for _ in range(num_repeat - 1):
+                stage.append(
+                    MBConvBlock(
+                        input_filters,
+                        output_filters,
+                        kernal_size,
+                        stride,
+                        expand_ratio,
+                        se_ratio,
+                        has_se=False,
+                    )
+                )
+
+            self.blocks.append(stage)
+        self._initialize_weights(pretrain)
+
+    def forward(self, x):
+        x = self.stem(x)
+        output = []
+        idx = 0
+        for j, stage in enumerate(self.blocks):
+            for block in stage:
+                drop_connect_rate = self.drop_connect_rate
+                if drop_connect_rate:
+                    drop_connect_rate *= float(idx) / len(self.blocks)
+                x = block(x, drop_connect_rate)
+                idx += 1
+            if j in self.out_stages:
+                output.append(x)
+        return output
+
+    def _initialize_weights(self, pretrain=True):
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
+                m.weight.data.normal_(0, math.sqrt(2.0 / n))
+                if m.bias is not None:
+                    m.bias.data.zero_()
+            elif isinstance(m, nn.BatchNorm2d):
+                m.weight.data.fill_(1)
+                m.bias.data.zero_()
+        if pretrain:
+            url = model_urls[self.model_name]
+            if url is not None:
+                pretrained_state_dict = model_zoo.load_url(url)
+                print("=> loading pretrained model {}".format(url))
+                self.load_state_dict(pretrained_state_dict, strict=False)
+
+    def load_pretrain(self, path):
+        state_dict = torch.load(path)
+        self.load_state_dict(state_dict, strict=True)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/ghostnet.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/ghostnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..353939fe5df1ca9dbf2be3fb9aeff7f783a15a51
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/ghostnet.py
@@ -0,0 +1,348 @@
+"""
+2020.06.09-Changed for building GhostNet
+Huawei Technologies Co., Ltd. <foss@huawei.com>
+Creates a GhostNet Model as defined in:
+GhostNet: More Features from Cheap Operations By Kai Han, Yunhe Wang,
+Qi Tian, Jianyuan Guo, Chunjing Xu, Chang Xu.
+https://arxiv.org/abs/1911.11907
+Modified from https://github.com/d-li14/mobilenetv3.pytorch
+and https://github.com/rwightman/pytorch-image-models
+"""
+import logging
+import math
+import warnings
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+
+
+def get_url(width_mult=1.0):
+    if width_mult == 1.0:
+        return "https://raw.githubusercontent.com/huawei-noah/CV-Backbones/master/ghostnet_pytorch/models/state_dict_73.98.pth"  # noqa E501
+    else:
+        logging.info("GhostNet only has 1.0 pretrain model. ")
+        return None
+
+
+def _make_divisible(v, divisor, min_value=None):
+    """
+    This function is taken from the original tf repo.
+    It ensures that all layers have a channel number that is divisible by 8
+    It can be seen here:
+    https://github.com/tensorflow/models/blob/master/research/slim/nets/mobilenet/mobilenet.py
+    """
+    if min_value is None:
+        min_value = divisor
+    new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)
+    # Make sure that round down does not go down by more than 10%.
+    if new_v < 0.9 * v:
+        new_v += divisor
+    return new_v
+
+
+def hard_sigmoid(x, inplace: bool = False):
+    if inplace:
+        return x.add_(3.0).clamp_(0.0, 6.0).div_(6.0)
+    else:
+        return F.relu6(x + 3.0) / 6.0
+
+
+class SqueezeExcite(nn.Module):
+    def __init__(
+        self,
+        in_chs,
+        se_ratio=0.25,
+        reduced_base_chs=None,
+        activation="ReLU",
+        gate_fn=hard_sigmoid,
+        divisor=4,
+        **_
+    ):
+        super(SqueezeExcite, self).__init__()
+        self.gate_fn = gate_fn
+        reduced_chs = _make_divisible((reduced_base_chs or in_chs) * se_ratio, divisor)
+        self.avg_pool = nn.AdaptiveAvgPool2d(1)
+        self.conv_reduce = nn.Conv2d(in_chs, reduced_chs, 1, bias=True)
+        self.act1 = act_layers(activation)
+        self.conv_expand = nn.Conv2d(reduced_chs, in_chs, 1, bias=True)
+
+    def forward(self, x):
+        x_se = self.avg_pool(x)
+        x_se = self.conv_reduce(x_se)
+        x_se = self.act1(x_se)
+        x_se = self.conv_expand(x_se)
+        x = x * self.gate_fn(x_se)
+        return x
+
+
+class ConvBnAct(nn.Module):
+    def __init__(self, in_chs, out_chs, kernel_size, stride=1, activation="ReLU"):
+        super(ConvBnAct, self).__init__()
+        self.conv = nn.Conv2d(
+            in_chs, out_chs, kernel_size, stride, kernel_size // 2, bias=False
+        )
+        self.bn1 = nn.BatchNorm2d(out_chs)
+        self.act1 = act_layers(activation)
+
+    def forward(self, x):
+        x = self.conv(x)
+        x = self.bn1(x)
+        x = self.act1(x)
+        return x
+
+
+class GhostModule(nn.Module):
+    def __init__(
+        self, inp, oup, kernel_size=1, ratio=2, dw_size=3, stride=1, activation="ReLU"
+    ):
+        super(GhostModule, self).__init__()
+        self.oup = oup
+        init_channels = math.ceil(oup / ratio)
+        new_channels = init_channels * (ratio - 1)
+
+        self.primary_conv = nn.Sequential(
+            nn.Conv2d(
+                inp, init_channels, kernel_size, stride, kernel_size // 2, bias=False
+            ),
+            nn.BatchNorm2d(init_channels),
+            act_layers(activation) if activation else nn.Sequential(),
+        )
+
+        self.cheap_operation = nn.Sequential(
+            nn.Conv2d(
+                init_channels,
+                new_channels,
+                dw_size,
+                1,
+                dw_size // 2,
+                groups=init_channels,
+                bias=False,
+            ),
+            nn.BatchNorm2d(new_channels),
+            act_layers(activation) if activation else nn.Sequential(),
+        )
+
+    def forward(self, x):
+        x1 = self.primary_conv(x)
+        x2 = self.cheap_operation(x1)
+        out = torch.cat([x1, x2], dim=1)
+        return out
+
+
+class GhostBottleneck(nn.Module):
+    """Ghost bottleneck w/ optional SE"""
+
+    def __init__(
+        self,
+        in_chs,
+        mid_chs,
+        out_chs,
+        dw_kernel_size=3,
+        stride=1,
+        activation="ReLU",
+        se_ratio=0.0,
+    ):
+        super(GhostBottleneck, self).__init__()
+        has_se = se_ratio is not None and se_ratio > 0.0
+        self.stride = stride
+
+        # Point-wise expansion
+        self.ghost1 = GhostModule(in_chs, mid_chs, activation=activation)
+
+        # Depth-wise convolution
+        if self.stride > 1:
+            self.conv_dw = nn.Conv2d(
+                mid_chs,
+                mid_chs,
+                dw_kernel_size,
+                stride=stride,
+                padding=(dw_kernel_size - 1) // 2,
+                groups=mid_chs,
+                bias=False,
+            )
+            self.bn_dw = nn.BatchNorm2d(mid_chs)
+
+        # Squeeze-and-excitation
+        if has_se:
+            self.se = SqueezeExcite(mid_chs, se_ratio=se_ratio)
+        else:
+            self.se = None
+
+        # Point-wise linear projection
+        self.ghost2 = GhostModule(mid_chs, out_chs, activation=None)
+
+        # shortcut
+        if in_chs == out_chs and self.stride == 1:
+            self.shortcut = nn.Sequential()
+        else:
+            self.shortcut = nn.Sequential(
+                nn.Conv2d(
+                    in_chs,
+                    in_chs,
+                    dw_kernel_size,
+                    stride=stride,
+                    padding=(dw_kernel_size - 1) // 2,
+                    groups=in_chs,
+                    bias=False,
+                ),
+                nn.BatchNorm2d(in_chs),
+                nn.Conv2d(in_chs, out_chs, 1, stride=1, padding=0, bias=False),
+                nn.BatchNorm2d(out_chs),
+            )
+
+    def forward(self, x):
+        residual = x
+
+        # 1st ghost bottleneck
+        x = self.ghost1(x)
+
+        # Depth-wise convolution
+        if self.stride > 1:
+            x = self.conv_dw(x)
+            x = self.bn_dw(x)
+
+        # Squeeze-and-excitation
+        if self.se is not None:
+            x = self.se(x)
+
+        # 2nd ghost bottleneck
+        x = self.ghost2(x)
+
+        x += self.shortcut(residual)
+        return x
+
+
+class GhostNet(nn.Module):
+    def __init__(
+        self,
+        width_mult=1.0,
+        out_stages=(4, 6, 9),
+        activation="ReLU",
+        pretrain=True,
+        act=None,
+    ):
+        super(GhostNet, self).__init__()
+        assert set(out_stages).issubset(i for i in range(10))
+        self.width_mult = width_mult
+        self.out_stages = out_stages
+        # setting of inverted residual blocks
+        self.cfgs = [
+            # k, t,   c,  SE, s
+            # stage1
+            [[3, 16, 16, 0, 1]],  # 0
+            # stage2
+            [[3, 48, 24, 0, 2]],  # 1
+            [[3, 72, 24, 0, 1]],  # 2  1/4
+            # stage3
+            [[5, 72, 40, 0.25, 2]],  # 3
+            [[5, 120, 40, 0.25, 1]],  # 4  1/8
+            # stage4
+            [[3, 240, 80, 0, 2]],  # 5
+            [
+                [3, 200, 80, 0, 1],
+                [3, 184, 80, 0, 1],
+                [3, 184, 80, 0, 1],
+                [3, 480, 112, 0.25, 1],
+                [3, 672, 112, 0.25, 1],
+            ],  # 6  1/16
+            # stage5
+            [[5, 672, 160, 0.25, 2]],  # 7
+            [
+                [5, 960, 160, 0, 1],
+                [5, 960, 160, 0.25, 1],
+                [5, 960, 160, 0, 1],
+                [5, 960, 160, 0.25, 1],
+            ],  # 8
+        ]
+        #  ------conv+bn+act----------# 9  1/32
+
+        self.activation = activation
+        if act is not None:
+            warnings.warn(
+                "Warning! act argument has been deprecated, " "use activation instead!"
+            )
+            self.activation = act
+
+        # building first layer
+        output_channel = _make_divisible(16 * width_mult, 4)
+        self.conv_stem = nn.Conv2d(3, output_channel, 3, 2, 1, bias=False)
+        self.bn1 = nn.BatchNorm2d(output_channel)
+        self.act1 = act_layers(self.activation)
+        input_channel = output_channel
+
+        # building inverted residual blocks
+        stages = []
+        block = GhostBottleneck
+        for cfg in self.cfgs:
+            layers = []
+            for k, exp_size, c, se_ratio, s in cfg:
+                output_channel = _make_divisible(c * width_mult, 4)
+                hidden_channel = _make_divisible(exp_size * width_mult, 4)
+                layers.append(
+                    block(
+                        input_channel,
+                        hidden_channel,
+                        output_channel,
+                        k,
+                        s,
+                        activation=self.activation,
+                        se_ratio=se_ratio,
+                    )
+                )
+                input_channel = output_channel
+            stages.append(nn.Sequential(*layers))
+
+        output_channel = _make_divisible(exp_size * width_mult, 4)
+        stages.append(
+            nn.Sequential(
+                ConvBnAct(input_channel, output_channel, 1, activation=self.activation)
+            )
+        )  # 9
+
+        self.blocks = nn.Sequential(*stages)
+
+        self._initialize_weights(pretrain)
+
+    def forward(self, x):
+        x = self.conv_stem(x)
+        x = self.bn1(x)
+        x = self.act1(x)
+        output = []
+        for i in range(10):
+            x = self.blocks[i](x)
+            if i in self.out_stages:
+                output.append(x)
+        return tuple(output)
+
+    def _initialize_weights(self, pretrain=True):
+        print("init weights...")
+        for name, m in self.named_modules():
+            if isinstance(m, nn.Conv2d):
+                if "conv_stem" in name:
+                    nn.init.normal_(m.weight, 0, 0.01)
+                else:
+                    nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.BatchNorm2d):
+                nn.init.constant_(m.weight, 1)
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0.0001)
+                nn.init.constant_(m.running_mean, 0)
+            elif isinstance(m, nn.BatchNorm1d):
+                nn.init.constant_(m.weight, 1)
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0.0001)
+                nn.init.constant_(m.running_mean, 0)
+            elif isinstance(m, nn.Linear):
+                nn.init.normal_(m.weight, 0, 0.01)
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0)
+        if pretrain:
+            url = get_url(self.width_mult)
+            if url is not None:
+                state_dict = torch.hub.load_state_dict_from_url(url, progress=True)
+                self.load_state_dict(state_dict, strict=False)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/mobilenetv2.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/mobilenetv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..19fcae379ed4ef676c53719488df0e4fab93abb3
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/mobilenetv2.py
@@ -0,0 +1,176 @@
+from __future__ import absolute_import, division, print_function
+
+import warnings
+
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+
+
+class ConvBNReLU(nn.Sequential):
+    def __init__(
+        self,
+        in_planes,
+        out_planes,
+        kernel_size=3,
+        stride=1,
+        groups=1,
+        activation="ReLU",
+    ):
+        padding = (kernel_size - 1) // 2
+        super(ConvBNReLU, self).__init__(
+            nn.Conv2d(
+                in_planes,
+                out_planes,
+                kernel_size,
+                stride,
+                padding,
+                groups=groups,
+                bias=False,
+            ),
+            nn.BatchNorm2d(out_planes),
+            act_layers(activation),
+        )
+
+
+class InvertedResidual(nn.Module):
+    def __init__(self, inp, oup, stride, expand_ratio, activation="ReLU"):
+        super(InvertedResidual, self).__init__()
+        self.stride = stride
+        assert stride in [1, 2]
+
+        hidden_dim = int(round(inp * expand_ratio))
+        self.use_res_connect = self.stride == 1 and inp == oup
+
+        layers = []
+        if expand_ratio != 1:
+            # pw
+            layers.append(
+                ConvBNReLU(inp, hidden_dim, kernel_size=1, activation=activation)
+            )
+        layers.extend(
+            [
+                # dw
+                ConvBNReLU(
+                    hidden_dim,
+                    hidden_dim,
+                    stride=stride,
+                    groups=hidden_dim,
+                    activation=activation,
+                ),
+                # pw-linear
+                nn.Conv2d(hidden_dim, oup, 1, 1, 0, bias=False),
+                nn.BatchNorm2d(oup),
+            ]
+        )
+        self.conv = nn.Sequential(*layers)
+
+    def forward(self, x):
+        if self.use_res_connect:
+            return x + self.conv(x)
+        else:
+            return self.conv(x)
+
+
+class MobileNetV2(nn.Module):
+    def __init__(
+        self,
+        width_mult=1.0,
+        out_stages=(1, 2, 4, 6),
+        last_channel=1280,
+        activation="ReLU",
+        act=None,
+    ):
+        super(MobileNetV2, self).__init__()
+        # TODO: support load torchvison pretrained weight
+        assert set(out_stages).issubset(i for i in range(7))
+        self.width_mult = width_mult
+        self.out_stages = out_stages
+        input_channel = 32
+        self.last_channel = last_channel
+        self.activation = activation
+        if act is not None:
+            warnings.warn(
+                "Warning! act argument has been deprecated, " "use activation instead!"
+            )
+            self.activation = act
+        self.interverted_residual_setting = [
+            # t, c, n, s
+            [1, 16, 1, 1],
+            [6, 24, 2, 2],
+            [6, 32, 3, 2],
+            [6, 64, 4, 2],
+            [6, 96, 3, 1],
+            [6, 160, 3, 2],
+            [6, 320, 1, 1],
+        ]
+
+        # building first layer
+        self.input_channel = int(input_channel * width_mult)
+        self.first_layer = ConvBNReLU(
+            3, self.input_channel, stride=2, activation=self.activation
+        )
+        # building inverted residual blocks
+        for i in range(7):
+            name = "stage{}".format(i)
+            setattr(self, name, self.build_mobilenet_stage(stage_num=i))
+
+        self._initialize_weights()
+
+    def build_mobilenet_stage(self, stage_num):
+        stage = []
+        t, c, n, s = self.interverted_residual_setting[stage_num]
+        output_channel = int(c * self.width_mult)
+        for i in range(n):
+            if i == 0:
+                stage.append(
+                    InvertedResidual(
+                        self.input_channel,
+                        output_channel,
+                        s,
+                        expand_ratio=t,
+                        activation=self.activation,
+                    )
+                )
+            else:
+                stage.append(
+                    InvertedResidual(
+                        self.input_channel,
+                        output_channel,
+                        1,
+                        expand_ratio=t,
+                        activation=self.activation,
+                    )
+                )
+            self.input_channel = output_channel
+        if stage_num == 6:
+            last_layer = ConvBNReLU(
+                self.input_channel,
+                self.last_channel,
+                kernel_size=1,
+                activation=self.activation,
+            )
+            stage.append(last_layer)
+        stage = nn.Sequential(*stage)
+        return stage
+
+    def forward(self, x):
+        x = self.first_layer(x)
+        output = []
+        for i in range(0, 7):
+            stage = getattr(self, "stage{}".format(i))
+            x = stage(x)
+            if i in self.out_stages:
+                output.append(x)
+
+        return tuple(output)
+
+    def _initialize_weights(self):
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                nn.init.normal_(m.weight, std=0.001)
+                if m.bias is not None:
+                    m.bias.data.zero_()
+            elif isinstance(m, nn.BatchNorm2d):
+                m.weight.data.fill_(1)
+                m.bias.data.zero_()
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/repvgg.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/repvgg.py
new file mode 100644
index 0000000000000000000000000000000000000000..fa30508f13def22c0bed2c565eb1bf56245fe37d
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/repvgg.py
@@ -0,0 +1,234 @@
+"""
+@article{ding2101repvgg,
+  title={RepVGG: Making VGG-style ConvNets Great Again},
+  author={Ding, Xiaohan and Zhang, Xiangyu and Ma, Ningning and Han,
+          Jungong and Ding, Guiguang and Sun, Jian},
+  journal={arXiv preprint arXiv:2101.03697}}
+RepVGG Backbone from paper RepVGG: Making VGG-style ConvNets Great Again
+Code from https://github.com/DingXiaoH/RepVGG
+"""
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import RepVGGConvModule
+
+optional_groupwise_layers = [2, 4, 6, 8, 10, 12, 14, 16, 18, 20, 22, 24, 26]
+g2_map = {layer: 2 for layer in optional_groupwise_layers}
+g4_map = {layer: 4 for layer in optional_groupwise_layers}
+
+model_param = {
+    "RepVGG-A0": dict(
+        num_blocks=[2, 4, 14, 1],
+        width_multiplier=[0.75, 0.75, 0.75, 2.5],
+        override_groups_map=None,
+    ),
+    "RepVGG-A1": dict(
+        num_blocks=[2, 4, 14, 1],
+        width_multiplier=[1, 1, 1, 2.5],
+        override_groups_map=None,
+    ),
+    "RepVGG-A2": dict(
+        num_blocks=[2, 4, 14, 1],
+        width_multiplier=[1.5, 1.5, 1.5, 2.75],
+        override_groups_map=None,
+    ),
+    "RepVGG-B0": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[1, 1, 1, 2.5],
+        override_groups_map=None,
+    ),
+    "RepVGG-B1": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[2, 2, 2, 4],
+        override_groups_map=None,
+    ),
+    "RepVGG-B1g2": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[2, 2, 2, 4],
+        override_groups_map=g2_map,
+    ),
+    "RepVGG-B1g4": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[2, 2, 2, 4],
+        override_groups_map=g4_map,
+    ),
+    "RepVGG-B2": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[2.5, 2.5, 2.5, 5],
+        override_groups_map=None,
+    ),
+    "RepVGG-B2g2": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[2.5, 2.5, 2.5, 5],
+        override_groups_map=g2_map,
+    ),
+    "RepVGG-B2g4": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[2.5, 2.5, 2.5, 5],
+        override_groups_map=g4_map,
+    ),
+    "RepVGG-B3": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[3, 3, 3, 5],
+        override_groups_map=None,
+    ),
+    "RepVGG-B3g2": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[3, 3, 3, 5],
+        override_groups_map=g2_map,
+    ),
+    "RepVGG-B3g4": dict(
+        num_blocks=[4, 6, 16, 1],
+        width_multiplier=[3, 3, 3, 5],
+        override_groups_map=g4_map,
+    ),
+}
+
+
+def conv_bn(in_channels, out_channels, kernel_size, stride, padding, groups=1):
+    result = nn.Sequential()
+    result.add_module(
+        "conv",
+        nn.Conv2d(
+            in_channels=in_channels,
+            out_channels=out_channels,
+            kernel_size=kernel_size,
+            stride=stride,
+            padding=padding,
+            groups=groups,
+            bias=False,
+        ),
+    )
+    result.add_module("bn", nn.BatchNorm2d(num_features=out_channels))
+    return result
+
+
+class RepVGG(nn.Module):
+    def __init__(
+        self,
+        arch,
+        out_stages=(1, 2, 3, 4),
+        activation="ReLU",
+        deploy=False,
+        last_channel=None,
+    ):
+        super(RepVGG, self).__init__()
+        # TODO: Update code to Xiaohan's repo
+        model_name = "RepVGG-" + arch
+        assert model_name in model_param
+        assert set(out_stages).issubset((1, 2, 3, 4))
+        num_blocks = model_param[model_name]["num_blocks"]
+        width_multiplier = model_param[model_name]["width_multiplier"]
+        assert len(width_multiplier) == 4
+        self.out_stages = out_stages
+        self.activation = activation
+        self.deploy = deploy
+        self.override_groups_map = (
+            model_param[model_name]["override_groups_map"] or dict()
+        )
+
+        assert 0 not in self.override_groups_map
+
+        self.in_planes = min(64, int(64 * width_multiplier[0]))
+
+        self.stage0 = RepVGGConvModule(
+            in_channels=3,
+            out_channels=self.in_planes,
+            kernel_size=3,
+            stride=2,
+            padding=1,
+            activation=activation,
+            deploy=self.deploy,
+        )
+        self.cur_layer_idx = 1
+        self.stage1 = self._make_stage(
+            int(64 * width_multiplier[0]), num_blocks[0], stride=2
+        )
+        self.stage2 = self._make_stage(
+            int(128 * width_multiplier[1]), num_blocks[1], stride=2
+        )
+        self.stage3 = self._make_stage(
+            int(256 * width_multiplier[2]), num_blocks[2], stride=2
+        )
+        out_planes = last_channel if last_channel else int(512 * width_multiplier[3])
+        self.stage4 = self._make_stage(out_planes, num_blocks[3], stride=2)
+
+    def _make_stage(self, planes, num_blocks, stride):
+        strides = [stride] + [1] * (num_blocks - 1)
+        blocks = []
+        for stride in strides:
+            cur_groups = self.override_groups_map.get(self.cur_layer_idx, 1)
+            blocks.append(
+                RepVGGConvModule(
+                    in_channels=self.in_planes,
+                    out_channels=planes,
+                    kernel_size=3,
+                    stride=stride,
+                    padding=1,
+                    groups=cur_groups,
+                    activation=self.activation,
+                    deploy=self.deploy,
+                )
+            )
+            self.in_planes = planes
+            self.cur_layer_idx += 1
+        return nn.Sequential(*blocks)
+
+    def forward(self, x):
+        x = self.stage0(x)
+        output = []
+        for i in range(1, 5):
+            stage = getattr(self, "stage{}".format(i))
+            x = stage(x)
+            if i in self.out_stages:
+                output.append(x)
+        return tuple(output)
+
+
+def repvgg_model_convert(model, deploy_model, save_path=None):
+    """
+    Examples:
+        >>> train_model = RepVGG(arch='A0', deploy=False)
+        >>> deploy_model = RepVGG(arch='A0', deploy=True)
+        >>> deploy_model = repvgg_model_convert(
+        >>>     train_model, deploy_model, save_path='repvgg_deploy.pth')
+    """
+    converted_weights = {}
+    for name, module in model.named_modules():
+        if hasattr(module, "repvgg_convert"):
+            kernel, bias = module.repvgg_convert()
+            converted_weights[name + ".rbr_reparam.weight"] = kernel
+            converted_weights[name + ".rbr_reparam.bias"] = bias
+        elif isinstance(module, torch.nn.Linear):
+            converted_weights[name + ".weight"] = module.weight.detach().cpu().numpy()
+            converted_weights[name + ".bias"] = module.bias.detach().cpu().numpy()
+    del model
+
+    for name, param in deploy_model.named_parameters():
+        print("deploy param: ", name, param.size(), np.mean(converted_weights[name]))
+        param.data = torch.from_numpy(converted_weights[name]).float()
+
+    if save_path is not None:
+        torch.save(deploy_model.state_dict(), save_path)
+
+    return deploy_model
+
+
+def repvgg_det_model_convert(model, deploy_model):
+    converted_weights = {}
+    deploy_model.load_state_dict(model.state_dict(), strict=False)
+    for name, module in model.backbone.named_modules():
+        if hasattr(module, "repvgg_convert"):
+            kernel, bias = module.repvgg_convert()
+            converted_weights[name + ".rbr_reparam.weight"] = kernel
+            converted_weights[name + ".rbr_reparam.bias"] = bias
+        elif isinstance(module, torch.nn.Linear):
+            converted_weights[name + ".weight"] = module.weight.detach().cpu().numpy()
+            converted_weights[name + ".bias"] = module.bias.detach().cpu().numpy()
+    del model
+    for name, param in deploy_model.backbone.named_parameters():
+        print("deploy param: ", name, param.size(), np.mean(converted_weights[name]))
+        param.data = torch.from_numpy(converted_weights[name]).float()
+    return deploy_model
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/resnet.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/resnet.py
new file mode 100644
index 0000000000000000000000000000000000000000..cbd84f7546223f46bb6827cc3a7a88aca202f0e0
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/resnet.py
@@ -0,0 +1,196 @@
+from __future__ import absolute_import, division, print_function
+
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+
+model_urls = {
+    "resnet18": "https://download.pytorch.org/models/resnet18-5c106cde.pth",
+    "resnet34": "https://download.pytorch.org/models/resnet34-333f7ec4.pth",
+    "resnet50": "https://download.pytorch.org/models/resnet50-19c8e357.pth",
+    "resnet101": "https://download.pytorch.org/models/resnet101-5d3b4d8f.pth",
+    "resnet152": "https://download.pytorch.org/models/resnet152-b121ed2d.pth",
+}
+
+
+def conv3x3(in_planes, out_planes, stride=1):
+    """3x3 convolution with padding"""
+    return nn.Conv2d(
+        in_planes, out_planes, kernel_size=3, stride=stride, padding=1, bias=False
+    )
+
+
+class BasicBlock(nn.Module):
+    expansion = 1
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None, activation="ReLU"):
+        super(BasicBlock, self).__init__()
+        self.conv1 = conv3x3(inplanes, planes, stride)
+        self.bn1 = nn.BatchNorm2d(planes)
+        self.act = act_layers(activation)
+        self.conv2 = conv3x3(planes, planes)
+        self.bn2 = nn.BatchNorm2d(planes)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        residual = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.act(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out += residual
+        out = self.act(out)
+
+        return out
+
+
+class Bottleneck(nn.Module):
+    expansion = 4
+
+    def __init__(self, inplanes, planes, stride=1, downsample=None, activation="ReLU"):
+        super(Bottleneck, self).__init__()
+        self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
+        self.bn1 = nn.BatchNorm2d(planes)
+        self.conv2 = nn.Conv2d(
+            planes, planes, kernel_size=3, stride=stride, padding=1, bias=False
+        )
+        self.bn2 = nn.BatchNorm2d(planes)
+        self.conv3 = nn.Conv2d(
+            planes, planes * self.expansion, kernel_size=1, bias=False
+        )
+        self.bn3 = nn.BatchNorm2d(planes * self.expansion)
+        self.act = act_layers(activation)
+        self.downsample = downsample
+        self.stride = stride
+
+    def forward(self, x):
+        residual = x
+
+        out = self.conv1(x)
+        out = self.bn1(out)
+        out = self.act(out)
+
+        out = self.conv2(out)
+        out = self.bn2(out)
+        out = self.act(out)
+
+        out = self.conv3(out)
+        out = self.bn3(out)
+
+        if self.downsample is not None:
+            residual = self.downsample(x)
+
+        out += residual
+        out = self.act(out)
+
+        return out
+
+
+def fill_fc_weights(layers):
+    for m in layers.modules():
+        if isinstance(m, nn.Conv2d):
+            nn.init.normal_(m.weight, std=0.001)
+            # torch.nn.init.kaiming_normal_(m.weight.data, nonlinearity='relu')
+            # torch.nn.init.xavier_normal_(m.weight.data)
+            if m.bias is not None:
+                nn.init.constant_(m.bias, 0)
+
+
+class ResNet(nn.Module):
+    resnet_spec = {
+        18: (BasicBlock, [2, 2, 2, 2]),
+        34: (BasicBlock, [3, 4, 6, 3]),
+        50: (Bottleneck, [3, 4, 6, 3]),
+        101: (Bottleneck, [3, 4, 23, 3]),
+        152: (Bottleneck, [3, 8, 36, 3]),
+    }
+
+    def __init__(
+        self, depth, out_stages=(1, 2, 3, 4), activation="ReLU", pretrain=True
+    ):
+        super(ResNet, self).__init__()
+        if depth not in self.resnet_spec:
+            raise KeyError("invalid resnet depth {}".format(depth))
+        assert set(out_stages).issubset((1, 2, 3, 4))
+        self.activation = activation
+        block, layers = self.resnet_spec[depth]
+        self.depth = depth
+        self.inplanes = 64
+        self.out_stages = out_stages
+
+        self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3, bias=False)
+        self.bn1 = nn.BatchNorm2d(64)
+        self.act = act_layers(self.activation)
+        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+        self.layer1 = self._make_layer(block, 64, layers[0])
+        self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
+        self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
+        self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
+        self.init_weights(pretrain=pretrain)
+
+    def _make_layer(self, block, planes, blocks, stride=1):
+        downsample = None
+        if stride != 1 or self.inplanes != planes * block.expansion:
+            downsample = nn.Sequential(
+                nn.Conv2d(
+                    self.inplanes,
+                    planes * block.expansion,
+                    kernel_size=1,
+                    stride=stride,
+                    bias=False,
+                ),
+                nn.BatchNorm2d(planes * block.expansion),
+            )
+
+        layers = []
+        layers.append(
+            block(self.inplanes, planes, stride, downsample, activation=self.activation)
+        )
+        self.inplanes = planes * block.expansion
+        for i in range(1, blocks):
+            layers.append(block(self.inplanes, planes, activation=self.activation))
+
+        return nn.Sequential(*layers)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.bn1(x)
+        x = self.act(x)
+        x = self.maxpool(x)
+        output = []
+        for i in range(1, 5):
+            res_layer = getattr(self, "layer{}".format(i))
+            x = res_layer(x)
+            if i in self.out_stages:
+                output.append(x)
+
+        return tuple(output)
+
+    def init_weights(self, pretrain=True):
+        if pretrain:
+            url = model_urls["resnet{}".format(self.depth)]
+            pretrained_state_dict = model_zoo.load_url(url)
+            print("=> loading pretrained model {}".format(url))
+            self.load_state_dict(pretrained_state_dict, strict=False)
+        else:
+            for m in self.modules():
+                if self.activation == "LeakyReLU":
+                    nonlinearity = "leaky_relu"
+                else:
+                    nonlinearity = "relu"
+                if isinstance(m, nn.Conv2d):
+                    nn.init.kaiming_normal_(
+                        m.weight, mode="fan_out", nonlinearity=nonlinearity
+                    )
+                elif isinstance(m, nn.BatchNorm2d):
+                    m.weight.data.fill_(1)
+                    m.bias.data.zero_()
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/shufflenetv2.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/shufflenetv2.py
new file mode 100644
index 0000000000000000000000000000000000000000..013f22a8c1efbe3b93f735d8bf5d0b8f49b9c4af
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/backbone/shufflenetv2.py
@@ -0,0 +1,207 @@
+import torch
+import torch.nn as nn
+import torch.utils.model_zoo as model_zoo
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+
+model_urls = {
+    "shufflenetv2_0.5x": "https://download.pytorch.org/models/shufflenetv2_x0.5-f707e7126e.pth",  # noqa: E501
+    "shufflenetv2_1.0x": "https://download.pytorch.org/models/shufflenetv2_x1-5666bf0f80.pth",  # noqa: E501
+    "shufflenetv2_1.5x": None,
+    "shufflenetv2_2.0x": None,
+}
+
+
+def channel_shuffle(x, groups):
+    # type: (torch.Tensor, int) -> torch.Tensor
+    batchsize, num_channels, height, width = x.data.size()
+    channels_per_group = num_channels // groups
+
+    # reshape
+    x = x.view(batchsize, groups, channels_per_group, height, width)
+
+    x = torch.transpose(x, 1, 2).contiguous()
+
+    # flatten
+    x = x.view(batchsize, -1, height, width)
+
+    return x
+
+
+class ShuffleV2Block(nn.Module):
+    def __init__(self, inp, oup, stride, activation="ReLU"):
+        super(ShuffleV2Block, self).__init__()
+
+        if not (1 <= stride <= 3):
+            raise ValueError("illegal stride value")
+        self.stride = stride
+
+        branch_features = oup // 2
+        assert (self.stride != 1) or (inp == branch_features << 1)
+
+        if self.stride > 1:
+            self.branch1 = nn.Sequential(
+                self.depthwise_conv(
+                    inp, inp, kernel_size=3, stride=self.stride, padding=1
+                ),
+                nn.BatchNorm2d(inp),
+                nn.Conv2d(
+                    inp, branch_features, kernel_size=1, stride=1, padding=0, bias=False
+                ),
+                nn.BatchNorm2d(branch_features),
+                act_layers(activation),
+            )
+        else:
+            self.branch1 = nn.Sequential()
+
+        self.branch2 = nn.Sequential(
+            nn.Conv2d(
+                inp if (self.stride > 1) else branch_features,
+                branch_features,
+                kernel_size=1,
+                stride=1,
+                padding=0,
+                bias=False,
+            ),
+            nn.BatchNorm2d(branch_features),
+            act_layers(activation),
+            self.depthwise_conv(
+                branch_features,
+                branch_features,
+                kernel_size=3,
+                stride=self.stride,
+                padding=1,
+            ),
+            nn.BatchNorm2d(branch_features),
+            nn.Conv2d(
+                branch_features,
+                branch_features,
+                kernel_size=1,
+                stride=1,
+                padding=0,
+                bias=False,
+            ),
+            nn.BatchNorm2d(branch_features),
+            act_layers(activation),
+        )
+
+    @staticmethod
+    def depthwise_conv(i, o, kernel_size, stride=1, padding=0, bias=False):
+        return nn.Conv2d(i, o, kernel_size, stride, padding, bias=bias, groups=i)
+
+    def forward(self, x):
+        if self.stride == 1:
+            x1, x2 = x.chunk(2, dim=1)
+            out = torch.cat((x1, self.branch2(x2)), dim=1)
+        else:
+            out = torch.cat((self.branch1(x), self.branch2(x)), dim=1)
+
+        out = channel_shuffle(out, 2)
+
+        return out
+
+
+class ShuffleNetV2(nn.Module):
+    def __init__(
+        self,
+        model_size="1.5x",
+        out_stages=(2, 3, 4),
+        with_last_conv=False,
+        kernal_size=3,
+        activation="ReLU",
+        pretrain=True,
+    ):
+        super(ShuffleNetV2, self).__init__()
+        # out_stages can only be a subset of (2, 3, 4)
+        assert set(out_stages).issubset((2, 3, 4))
+
+        print("model size is ", model_size)
+
+        self.stage_repeats = [4, 8, 4]
+        self.model_size = model_size
+        self.out_stages = out_stages
+        self.with_last_conv = with_last_conv
+        self.kernal_size = kernal_size
+        self.activation = activation
+        if model_size == "0.5x":
+            self._stage_out_channels = [24, 48, 96, 192, 1024]
+        elif model_size == "1.0x":
+            self._stage_out_channels = [24, 116, 232, 464, 1024]
+        elif model_size == "1.5x":
+            self._stage_out_channels = [24, 176, 352, 704, 1024]
+        elif model_size == "2.0x":
+            self._stage_out_channels = [24, 244, 488, 976, 2048]
+        else:
+            raise NotImplementedError
+
+        # building first layer
+        input_channels = 3
+        output_channels = self._stage_out_channels[0]
+        self.conv1 = nn.Sequential(
+            nn.Conv2d(input_channels, output_channels, 3, 2, 1, bias=False),
+            nn.BatchNorm2d(output_channels),
+            act_layers(activation),
+        )
+        input_channels = output_channels
+
+        self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
+
+        stage_names = ["stage{}".format(i) for i in [2, 3, 4]]
+        for name, repeats, output_channels in zip(
+            stage_names, self.stage_repeats, self._stage_out_channels[1:]
+        ):
+            seq = [
+                ShuffleV2Block(
+                    input_channels, output_channels, 2, activation=activation
+                )
+            ]
+            for i in range(repeats - 1):
+                seq.append(
+                    ShuffleV2Block(
+                        output_channels, output_channels, 1, activation=activation
+                    )
+                )
+            setattr(self, name, nn.Sequential(*seq))
+            input_channels = output_channels
+        output_channels = self._stage_out_channels[-1]
+        if self.with_last_conv:
+            conv5 = nn.Sequential(
+                nn.Conv2d(input_channels, output_channels, 1, 1, 0, bias=False),
+                nn.BatchNorm2d(output_channels),
+                act_layers(activation),
+            )
+            self.stage4.add_module("conv5", conv5)
+        self._initialize_weights(pretrain)
+
+    def forward(self, x):
+        x = self.conv1(x)
+        x = self.maxpool(x)
+        output = []
+        for i in range(2, 5):
+            stage = getattr(self, "stage{}".format(i))
+            x = stage(x)
+            if i in self.out_stages:
+                output.append(x)
+        return tuple(output)
+
+    def _initialize_weights(self, pretrain=True):
+        print("init weights...")
+        for name, m in self.named_modules():
+            if isinstance(m, nn.Conv2d):
+                if "first" in name:
+                    nn.init.normal_(m.weight, 0, 0.01)
+                else:
+                    nn.init.normal_(m.weight, 0, 1.0 / m.weight.shape[1])
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.BatchNorm2d):
+                nn.init.constant_(m.weight, 1)
+                if m.bias is not None:
+                    nn.init.constant_(m.bias, 0.0001)
+                nn.init.constant_(m.running_mean, 0)
+        if pretrain:
+            url = model_urls["shufflenetv2_{}".format(self.model_size)]
+            if url is not None:
+                pretrained_state_dict = model_zoo.load_url(url)
+                print("=> loading pretrained model {}".format(url))
+                self.load_state_dict(pretrained_state_dict, strict=False)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..233fd18103f3d98a928b621435023dc6bcb4c715
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.fpn.fpn import FPN
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.fpn.ghost_pan import GhostPAN
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.fpn.pan import PAN
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.fpn.tan import TAN
+
+
+def build_fpn(cfg):
+    fpn_cfg = copy.deepcopy(cfg)
+    name = fpn_cfg.pop("name")
+    if name == "FPN":
+        return FPN(**fpn_cfg)
+    elif name == "PAN":
+        return PAN(**fpn_cfg)
+    elif name == "TAN":
+        return TAN(**fpn_cfg)
+    elif name == "GhostPAN":
+        return GhostPAN(**fpn_cfg)
+    else:
+        raise NotImplementedError
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/fpn.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/fpn.py
new file mode 100644
index 0000000000000000000000000000000000000000..4549c7409e9ce79d61fff6ac0f7731e43d155247
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/fpn.py
@@ -0,0 +1,100 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch.nn as nn
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights import xavier_init
+
+
+class FPN(nn.Module):
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        num_outs,
+        start_level=0,
+        end_level=-1,
+        conv_cfg=None,
+        norm_cfg=None,
+        activation=None,
+    ):
+        super(FPN, self).__init__()
+        assert isinstance(in_channels, list)
+        self.in_channels = in_channels
+        self.out_channels = out_channels
+        self.num_ins = len(in_channels)
+        self.num_outs = num_outs
+        self.fp16_enabled = False
+
+        if end_level == -1:
+            self.backbone_end_level = self.num_ins
+            assert num_outs >= self.num_ins - start_level
+        else:
+            # if end_level < inputs, no extra level is allowed
+            self.backbone_end_level = end_level
+            assert end_level <= len(in_channels)
+            assert num_outs == end_level - start_level
+        self.start_level = start_level
+        self.end_level = end_level
+        self.lateral_convs = nn.ModuleList()
+
+        for i in range(self.start_level, self.backbone_end_level):
+            l_conv = ConvModule(
+                in_channels[i],
+                out_channels,
+                1,
+                conv_cfg=conv_cfg,
+                norm_cfg=norm_cfg,
+                activation=activation,
+                inplace=False,
+            )
+
+            self.lateral_convs.append(l_conv)
+        self.init_weights()
+
+    # default init_weights for conv(msra) and norm in ConvModule
+    def init_weights(self):
+        for m in self.modules():
+            if isinstance(m, nn.Conv2d):
+                xavier_init(m, distribution="uniform")
+
+    def forward(self, inputs):
+        assert len(inputs) == len(self.in_channels)
+
+        # build laterals
+        laterals = [
+            lateral_conv(inputs[i + self.start_level])
+            for i, lateral_conv in enumerate(self.lateral_convs)
+        ]
+
+        # build top-down path
+        used_backbone_levels = len(laterals)
+        for i in range(used_backbone_levels - 1, 0, -1):
+            laterals[i - 1] += F.interpolate(
+                laterals[i], scale_factor=2, mode="bilinear"
+            )
+
+        # build outputs
+        outs = [
+            # self.fpn_convs[i](laterals[i]) for i in range(used_backbone_levels)
+            laterals[i]
+            for i in range(used_backbone_levels)
+        ]
+        return tuple(outs)
+
+
+# if __name__ == '__main__':
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/ghost_pan.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/ghost_pan.py
new file mode 100644
index 0000000000000000000000000000000000000000..76e043179cb848492ac6d900687e8e1e7bf633f7
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/ghost_pan.py
@@ -0,0 +1,244 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.backbone.ghostnet import GhostBottleneck
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule, DepthwiseConvModule
+
+
+class GhostBlocks(nn.Module):
+    """Stack of GhostBottleneck used in GhostPAN.
+
+    Args:
+        in_channels (int): Number of input channels.
+        out_channels (int): Number of output channels.
+        expand (int): Expand ratio of GhostBottleneck. Default: 1.
+        kernel_size (int): Kernel size of depthwise convolution. Default: 5.
+        num_blocks (int): Number of GhostBottlecneck blocks. Default: 1.
+        use_res (bool): Whether to use residual connection. Default: False.
+        activation (str): Name of activation function. Default: LeakyReLU.
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        expand=1,
+        kernel_size=5,
+        num_blocks=1,
+        use_res=False,
+        activation="LeakyReLU",
+    ):
+        super(GhostBlocks, self).__init__()
+        self.use_res = use_res
+        if use_res:
+            self.reduce_conv = ConvModule(
+                in_channels,
+                out_channels,
+                kernel_size=1,
+                stride=1,
+                padding=0,
+                activation=activation,
+            )
+        blocks = []
+        for _ in range(num_blocks):
+            blocks.append(
+                GhostBottleneck(
+                    in_channels,
+                    int(out_channels * expand),
+                    out_channels,
+                    dw_kernel_size=kernel_size,
+                    activation=activation,
+                )
+            )
+        self.blocks = nn.Sequential(*blocks)
+
+    def forward(self, x):
+        out = self.blocks(x)
+        if self.use_res:
+            out = out + self.reduce_conv(x)
+        return out
+
+
+class GhostPAN(nn.Module):
+    """Path Aggregation Network with Ghost block.
+
+    Args:
+        in_channels (List[int]): Number of input channels per scale.
+        out_channels (int): Number of output channels (used at each scale)
+        num_csp_blocks (int): Number of bottlenecks in CSPLayer. Default: 3
+        use_depthwise (bool): Whether to depthwise separable convolution in
+            blocks. Default: False
+        kernel_size (int): Kernel size of depthwise convolution. Default: 5.
+        expand (int): Expand ratio of GhostBottleneck. Default: 1.
+        num_blocks (int): Number of GhostBottlecneck blocks. Default: 1.
+        use_res (bool): Whether to use residual connection. Default: False.
+        num_extra_level (int): Number of extra conv layers for more feature levels.
+            Default: 0.
+        upsample_cfg (dict): Config dict for interpolate layer.
+            Default: `dict(scale_factor=2, mode='nearest')`
+        norm_cfg (dict): Config dict for normalization layer.
+            Default: dict(type='BN')
+        activation (str): Activation layer name.
+            Default: LeakyReLU.
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        use_depthwise=False,
+        kernel_size=5,
+        expand=1,
+        num_blocks=1,
+        use_res=False,
+        num_extra_level=0,
+        upsample_cfg=dict(scale_factor=2, mode="bilinear"),
+        norm_cfg=dict(type="BN"),
+        activation="LeakyReLU",
+    ):
+        super(GhostPAN, self).__init__()
+        assert num_extra_level >= 0
+        assert num_blocks >= 1
+        self.in_channels = in_channels
+        self.out_channels = out_channels
+
+        conv = DepthwiseConvModule if use_depthwise else ConvModule
+
+        # build top-down blocks
+        self.upsample = nn.Upsample(**upsample_cfg)
+        self.reduce_layers = nn.ModuleList()
+        for idx in range(len(in_channels)):
+            self.reduce_layers.append(
+                ConvModule(
+                    in_channels[idx],
+                    out_channels,
+                    1,
+                    norm_cfg=norm_cfg,
+                    activation=activation,
+                )
+            )
+        self.top_down_blocks = nn.ModuleList()
+        for idx in range(len(in_channels) - 1, 0, -1):
+            self.top_down_blocks.append(
+                GhostBlocks(
+                    out_channels * 2,
+                    out_channels,
+                    expand,
+                    kernel_size=kernel_size,
+                    num_blocks=num_blocks,
+                    use_res=use_res,
+                    activation=activation,
+                )
+            )
+
+        # build bottom-up blocks
+        self.downsamples = nn.ModuleList()
+        self.bottom_up_blocks = nn.ModuleList()
+        for idx in range(len(in_channels) - 1):
+            self.downsamples.append(
+                conv(
+                    out_channels,
+                    out_channels,
+                    kernel_size,
+                    stride=2,
+                    padding=kernel_size // 2,
+                    norm_cfg=norm_cfg,
+                    activation=activation,
+                )
+            )
+            self.bottom_up_blocks.append(
+                GhostBlocks(
+                    out_channels * 2,
+                    out_channels,
+                    expand,
+                    kernel_size=kernel_size,
+                    num_blocks=num_blocks,
+                    use_res=use_res,
+                    activation=activation,
+                )
+            )
+
+        # extra layers
+        self.extra_lvl_in_conv = nn.ModuleList()
+        self.extra_lvl_out_conv = nn.ModuleList()
+        for i in range(num_extra_level):
+            self.extra_lvl_in_conv.append(
+                conv(
+                    out_channels,
+                    out_channels,
+                    kernel_size,
+                    stride=2,
+                    padding=kernel_size // 2,
+                    norm_cfg=norm_cfg,
+                    activation=activation,
+                )
+            )
+            self.extra_lvl_out_conv.append(
+                conv(
+                    out_channels,
+                    out_channels,
+                    kernel_size,
+                    stride=2,
+                    padding=kernel_size // 2,
+                    norm_cfg=norm_cfg,
+                    activation=activation,
+                )
+            )
+
+    def forward(self, inputs):
+        """
+        Args:
+            inputs (tuple[Tensor]): input features.
+        Returns:
+            tuple[Tensor]: multi level features.
+        """
+        assert len(inputs) == len(self.in_channels)
+        inputs = [
+            reduce(input_x) for input_x, reduce in zip(inputs, self.reduce_layers)
+        ]
+        # top-down path
+        inner_outs = [inputs[-1]]
+        for idx in range(len(self.in_channels) - 1, 0, -1):
+            feat_heigh = inner_outs[0]
+            feat_low = inputs[idx - 1]
+
+            inner_outs[0] = feat_heigh
+
+            upsample_feat = self.upsample(feat_heigh)
+
+            inner_out = self.top_down_blocks[len(self.in_channels) - 1 - idx](
+                torch.cat([upsample_feat, feat_low], 1)
+            )
+            inner_outs.insert(0, inner_out)
+
+        # bottom-up path
+        outs = [inner_outs[0]]
+        for idx in range(len(self.in_channels) - 1):
+            feat_low = outs[-1]
+            feat_height = inner_outs[idx + 1]
+            downsample_feat = self.downsamples[idx](feat_low)
+            out = self.bottom_up_blocks[idx](
+                torch.cat([downsample_feat, feat_height], 1)
+            )
+            outs.append(out)
+
+        # extra layers
+        for extra_in_layer, extra_out_layer in zip(
+            self.extra_lvl_in_conv, self.extra_lvl_out_conv
+        ):
+            outs.append(extra_in_layer(inputs[-1]) + extra_out_layer(outs[-1]))
+
+        return tuple(outs)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/pan.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/pan.py
new file mode 100644
index 0000000000000000000000000000000000000000..c12482f294b6a3928a9816156069254fa6b6fac4
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/pan.py
@@ -0,0 +1,94 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.fpn.fpn import FPN
+
+
+class PAN(FPN):
+    """Path Aggregation Network for Instance Segmentation.
+
+    This is an implementation of the `PAN in Path Aggregation Network
+    <https://arxiv.org/abs/1803.01534>`_.
+
+    Args:
+        in_channels (List[int]): Number of input channels per scale.
+        out_channels (int): Number of output channels (used at each scale)
+        num_outs (int): Number of output scales.
+        start_level (int): Index of the start input backbone level used to
+            build the feature pyramid. Default: 0.
+        end_level (int): Index of the end input backbone level (exclusive) to
+            build the feature pyramid. Default: -1, which means the last level.
+        conv_cfg (dict): Config dict for convolution layer. Default: None.
+        norm_cfg (dict): Config dict for normalization layer. Default: None.
+        activation (str): Config dict for activation layer in ConvModule.
+            Default: None.
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        num_outs,
+        start_level=0,
+        end_level=-1,
+        conv_cfg=None,
+        norm_cfg=None,
+        activation=None,
+    ):
+        super(PAN, self).__init__(
+            in_channels,
+            out_channels,
+            num_outs,
+            start_level,
+            end_level,
+            conv_cfg,
+            norm_cfg,
+            activation,
+        )
+        self.init_weights()
+
+    def forward(self, inputs):
+        """Forward function."""
+        assert len(inputs) == len(self.in_channels)
+
+        # build laterals
+        laterals = [
+            lateral_conv(inputs[i + self.start_level])
+            for i, lateral_conv in enumerate(self.lateral_convs)
+        ]
+
+        # build top-down path
+        used_backbone_levels = len(laterals)
+        for i in range(used_backbone_levels - 1, 0, -1):
+            laterals[i - 1] += F.interpolate(
+                laterals[i], scale_factor=2, mode="bilinear"
+            )
+
+        # build outputs
+        # part 1: from original levels
+        inter_outs = [laterals[i] for i in range(used_backbone_levels)]
+
+        # part 2: add bottom-up path
+        for i in range(0, used_backbone_levels - 1):
+            inter_outs[i + 1] += F.interpolate(
+                inter_outs[i], scale_factor=0.5, mode="bilinear"
+            )
+
+        outs = []
+        outs.append(inter_outs[0])
+        outs.extend([inter_outs[i] for i in range(1, used_backbone_levels)])
+        return tuple(outs)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/tan.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/tan.py
new file mode 100644
index 0000000000000000000000000000000000000000..42efd128b97b3a5ad28f3046daade2afb182bf05
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/fpn/tan.py
@@ -0,0 +1,121 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights import normal_init
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.transformer import TransformerBlock
+
+
+class TAN(nn.Module):
+    """
+    Transformer Attention Network.
+
+    :param in_channels: Number of input channels per scale.
+    :param out_channels: Number of output channel.
+    :param feature_hw: Size of feature map input to transformer.
+    :param num_heads: Number of attention heads.
+    :param num_encoders: Number of transformer encoder layers.
+    :param mlp_ratio: Hidden layer dimension expand ratio in MLP.
+    :param dropout_ratio: Probability of an element to be zeroed.
+    :param activation: Activation layer type.
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        feature_hw,
+        num_heads,
+        num_encoders,
+        mlp_ratio,
+        dropout_ratio,
+        activation="LeakyReLU",
+    ):
+        super(TAN, self).__init__()
+        assert isinstance(in_channels, list)
+        self.in_channels = in_channels
+        self.out_channels = out_channels
+        self.num_ins = len(in_channels)
+        assert self.num_ins == 3
+
+        self.lateral_convs = nn.ModuleList()
+        for i in range(self.num_ins):
+            l_conv = ConvModule(
+                in_channels[i],
+                out_channels,
+                1,
+                norm_cfg=dict(type="BN"),
+                activation=activation,
+                inplace=False,
+            )
+            self.lateral_convs.append(l_conv)
+        self.transformer = TransformerBlock(
+            out_channels * self.num_ins,
+            out_channels,
+            num_heads,
+            num_encoders,
+            mlp_ratio,
+            dropout_ratio,
+            activation=activation,
+        )
+        self.pos_embed = nn.Parameter(
+            torch.zeros(feature_hw[0] * feature_hw[1], 1, out_channels)
+        )
+
+        self.init_weights()
+
+    def init_weights(self):
+        torch.nn.init.trunc_normal_(self.pos_embed, std=0.02)
+        for m in self.modules():
+            if isinstance(m, nn.Linear):
+                torch.nn.init.trunc_normal_(m.weight, std=0.02)
+                if isinstance(m, nn.Linear) and m.bias is not None:
+                    nn.init.constant_(m.bias, 0)
+            elif isinstance(m, nn.LayerNorm):
+                nn.init.constant_(m.bias, 0)
+                nn.init.constant_(m.weight, 1.0)
+            elif isinstance(m, nn.Conv2d):
+                normal_init(m, 0.01)
+
+    def forward(self, inputs):
+        assert len(inputs) == len(self.in_channels)
+
+        # build laterals
+        laterals = [
+            lateral_conv(inputs[i]) for i, lateral_conv in enumerate(self.lateral_convs)
+        ]
+
+        # transformer attention
+        mid_shape = laterals[1].shape[2:]
+        mid_lvl = torch.cat(
+            (
+                F.interpolate(laterals[0], size=mid_shape, mode="bilinear"),
+                laterals[1],
+                F.interpolate(laterals[2], size=mid_shape, mode="bilinear"),
+            ),
+            dim=1,
+        )
+        mid_lvl = self.transformer(mid_lvl, self.pos_embed)
+
+        # build outputs
+        outs = [
+            laterals[0] + F.interpolate(mid_lvl, size=laterals[0].shape[2:], mode="bilinear"),
+            laterals[1] + mid_lvl,
+            laterals[2] + F.interpolate(mid_lvl, size=laterals[2].shape[2:], mode="bilinear"),
+        ]
+        return tuple(outs)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..7a756751588f426b4824966040523c32d1d655dc
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/__init__.py
@@ -0,0 +1,21 @@
+import copy
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.gfl_head import GFLHead
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.nanodet_head import NanoDetHead
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.nanodet_plus_head import NanoDetPlusHead
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.simple_conv_head import SimpleConvHead
+
+
+def build_head(cfg):
+    head_cfg = copy.deepcopy(cfg)
+    name = head_cfg.pop("name")
+    if name == "GFLHead":
+        return GFLHead(**head_cfg)
+    elif name == "NanoDetHead":
+        return NanoDetHead(**head_cfg)
+    elif name == "NanoDetPlusHead":
+        return NanoDetPlusHead(**head_cfg)
+    elif name == "SimpleConvHead":
+        return SimpleConvHead(**head_cfg)
+    else:
+        raise NotImplementedError
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/assign_result.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/assign_result.py
new file mode 100644
index 0000000000000000000000000000000000000000..ca32f0c8f735bfe3c465c9c2b4c50803840b1260
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/assign_result.py
@@ -0,0 +1,228 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util import util_mixins
+
+
+class AssignResult(util_mixins.NiceRepr):
+    """
+    Stores assignments between predicted and truth boxes.
+
+    Attributes:
+        num_gts (int): the number of truth boxes considered when computing this
+            assignment
+
+        gt_inds (LongTensor): for each predicted box indicates the 1-based
+            index of the assigned truth box. 0 means unassigned and -1 means
+            ignore.
+
+        max_overlaps (FloatTensor): the iou between the predicted box and its
+            assigned truth box.
+
+        labels (None | LongTensor): If specified, for each predicted box
+            indicates the category label of the assigned truth box.
+
+    Example:
+        >>> # An assign result between 4 predicted boxes and 9 true boxes
+        >>> # where only two boxes were assigned.
+        >>> num_gts = 9
+        >>> max_overlaps = torch.LongTensor([0, .5, .9, 0])
+        >>> gt_inds = torch.LongTensor([-1, 1, 2, 0])
+        >>> labels = torch.LongTensor([0, 3, 4, 0])
+        >>> self = AssignResult(num_gts, gt_inds, max_overlaps, labels)
+        >>> print(str(self))  # xdoctest: +IGNORE_WANT
+        <AssignResult(num_gts=9, gt_inds.shape=(4,), max_overlaps.shape=(4,),
+                      labels.shape=(4,))>
+        >>> # Force addition of gt labels (when adding gt as proposals)
+        >>> new_labels = torch.LongTensor([3, 4, 5])
+        >>> self.add_gt_(new_labels)
+        >>> print(str(self))  # xdoctest: +IGNORE_WANT
+        <AssignResult(num_gts=9, gt_inds.shape=(7,), max_overlaps.shape=(7,),
+                      labels.shape=(7,))>
+    """
+
+    def __init__(self, num_gts, gt_inds, max_overlaps, labels=None):
+        self.num_gts = num_gts
+        self.gt_inds = gt_inds
+        self.max_overlaps = max_overlaps
+        self.labels = labels
+        # Interface for possible user-defined properties
+        self._extra_properties = {}
+
+    @property
+    def num_preds(self):
+        """int: the number of predictions in this assignment"""
+        return len(self.gt_inds)
+
+    def set_extra_property(self, key, value):
+        """Set user-defined new property."""
+        assert key not in self.info
+        self._extra_properties[key] = value
+
+    def get_extra_property(self, key):
+        """Get user-defined property."""
+        return self._extra_properties.get(key, None)
+
+    @property
+    def info(self):
+        """dict: a dictionary of info about the object"""
+        basic_info = {
+            "num_gts": self.num_gts,
+            "num_preds": self.num_preds,
+            "gt_inds": self.gt_inds,
+            "max_overlaps": self.max_overlaps,
+            "labels": self.labels,
+        }
+        basic_info.update(self._extra_properties)
+        return basic_info
+
+    def __nice__(self):
+        """str: a "nice" summary string describing this assign result"""
+        parts = []
+        parts.append(f"num_gts={self.num_gts!r}")
+        if self.gt_inds is None:
+            parts.append(f"gt_inds={self.gt_inds!r}")
+        else:
+            parts.append(f"gt_inds.shape={tuple(self.gt_inds.shape)!r}")
+        if self.max_overlaps is None:
+            parts.append(f"max_overlaps={self.max_overlaps!r}")
+        else:
+            parts.append("max_overlaps.shape=" f"{tuple(self.max_overlaps.shape)!r}")
+        if self.labels is None:
+            parts.append(f"labels={self.labels!r}")
+        else:
+            parts.append(f"labels.shape={tuple(self.labels.shape)!r}")
+        return ", ".join(parts)
+
+    @classmethod
+    def random(cls, **kwargs):
+        """Create random AssignResult for tests or debugging.
+
+        Args:
+            num_preds: number of predicted boxes
+            num_gts: number of true boxes
+            p_ignore (float): probability of a predicted box assinged to an
+                ignored truth
+            p_assigned (float): probability of a predicted box not being
+                assigned
+            p_use_label (float | bool): with labels or not
+            rng (None | int | numpy.random.RandomState): seed or state
+
+        Returns:
+            :obj:`AssignResult`: Randomly generated assign results.
+
+        Example:
+            >>> from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head\
+            .assigner.assign_result import AssignResult
+            >>> self = AssignResult.random()
+            >>> print(self.info)
+        """
+        rng = kwargs.get("rng", None)
+        num_gts = kwargs.get("num_gts", None)
+        num_preds = kwargs.get("num_preds", None)
+        p_ignore = kwargs.get("p_ignore", 0.3)
+        p_assigned = kwargs.get("p_assigned", 0.7)
+        p_use_label = kwargs.get("p_use_label", 0.5)
+        num_classes = kwargs.get("p_use_label", 3)
+
+        import numpy as np
+
+        if rng is None:
+            rng = np.random.mtrand._rand
+        elif isinstance(rng, int):
+            rng = np.random.RandomState(rng)
+        else:
+            rng = rng
+        if num_gts is None:
+            num_gts = rng.randint(0, 8)
+        if num_preds is None:
+            num_preds = rng.randint(0, 16)
+
+        if num_gts == 0:
+            max_overlaps = torch.zeros(num_preds, dtype=torch.float32)
+            gt_inds = torch.zeros(num_preds, dtype=torch.int64)
+            if p_use_label is True or p_use_label < rng.rand():
+                labels = torch.zeros(num_preds, dtype=torch.int64)
+            else:
+                labels = None
+        else:
+            import numpy as np
+
+            # Create an overlap for each predicted box
+            max_overlaps = torch.from_numpy(rng.rand(num_preds))
+
+            # Construct gt_inds for each predicted box
+            is_assigned = torch.from_numpy(rng.rand(num_preds) < p_assigned)
+            # maximum number of assignments constraints
+            n_assigned = min(num_preds, min(num_gts, is_assigned.sum()))
+
+            assigned_idxs = np.where(is_assigned)[0]
+            rng.shuffle(assigned_idxs)
+            assigned_idxs = assigned_idxs[0:n_assigned]
+            assigned_idxs.sort()
+
+            is_assigned[:] = 0
+            is_assigned[assigned_idxs] = True
+
+            is_ignore = torch.from_numpy(rng.rand(num_preds) < p_ignore) & is_assigned
+
+            gt_inds = torch.zeros(num_preds, dtype=torch.int64)
+
+            true_idxs = np.arange(num_gts)
+            rng.shuffle(true_idxs)
+            true_idxs = torch.from_numpy(true_idxs)
+            gt_inds[is_assigned] = true_idxs[:n_assigned]
+
+            gt_inds = torch.from_numpy(rng.randint(1, num_gts + 1, size=num_preds))
+            gt_inds[is_ignore] = -1
+            gt_inds[~is_assigned] = 0
+            max_overlaps[~is_assigned] = 0
+
+            if p_use_label is True or p_use_label < rng.rand():
+                if num_classes == 0:
+                    labels = torch.zeros(num_preds, dtype=torch.int64)
+                else:
+                    labels = torch.from_numpy(
+                        # remind that we set FG labels to [0, num_class-1]
+                        # since mmdet v2.0
+                        # BG cat_id: num_class
+                        rng.randint(0, num_classes, size=num_preds)
+                    )
+                    labels[~is_assigned] = 0
+            else:
+                labels = None
+
+        self = cls(num_gts, gt_inds, max_overlaps, labels)
+        return self
+
+    def add_gt_(self, gt_labels):
+        """Add ground truth as assigned results.
+
+        Args:
+            gt_labels (torch.Tensor): Labels of gt boxes
+        """
+        self_inds = torch.arange(
+            1, len(gt_labels) + 1, dtype=torch.long, device=gt_labels.device
+        )
+        self.gt_inds = torch.cat([self_inds, self.gt_inds])
+
+        self.max_overlaps = torch.cat(
+            [self.max_overlaps.new_ones(len(gt_labels)), self.max_overlaps]
+        )
+
+        if self.labels is not None:
+            self.labels = torch.cat([gt_labels, self.labels])
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/atss_assigner.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/atss_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..ab4c8cf86e43b563c2a5ea55ffcbbaf8559f4180
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/atss_assigner.py
@@ -0,0 +1,174 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.iou_loss import bbox_overlaps
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.assigner.assign_result import AssignResult
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.assigner.base_assigner import BaseAssigner
+
+
+class ATSSAssigner(BaseAssigner):
+    """Assign a corresponding gt bbox or background to each bbox.
+
+    Each proposals will be assigned with `0` or a positive integer
+    indicating the ground truth index.
+
+    - 0: negative sample, no assigned gt
+    - positive integer: positive sample, index (1-based) of assigned gt
+
+    Args:
+        topk (float): number of bbox selected in each level
+    """
+
+    def __init__(self, topk):
+        self.topk = topk
+
+    # https://github.com/sfzhang15/ATSS/blob/master/atss_core/modeling/rpn/atss/loss.py
+
+    def assign(
+        self, bboxes, num_level_bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None
+    ):
+        """Assign gt to bboxes.
+
+        The assignment is done in following steps
+
+        1. compute iou between all bbox (bbox of all pyramid levels) and gt
+        2. compute center distance between all bbox and gt
+        3. on each pyramid level, for each gt, select k bbox whose center
+           are closest to the gt center, so we total select k*l bbox as
+           candidates for each gt
+        4. get corresponding iou for the these candidates, and compute the
+           mean and std, set mean + std as the iou threshold
+        5. select these candidates whose iou are greater than or equal to
+           the threshold as postive
+        6. limit the positive sample's center in gt
+
+
+        Args:
+            bboxes (Tensor): Bounding boxes to be assigned, shape(n, 4).
+            num_level_bboxes (List): num of bboxes in each level
+            gt_bboxes (Tensor): Groundtruth boxes, shape (k, 4).
+            gt_bboxes_ignore (Tensor, optional): Ground truth bboxes that are
+                labelled as `ignored`, e.g., crowd boxes in COCO.
+            gt_labels (Tensor, optional): Label of gt_bboxes, shape (k, ).
+
+        Returns:
+            :obj:`AssignResult`: The assign result.
+        """
+        INF = 100000000
+        bboxes = bboxes[:, :4]
+        num_gt, num_bboxes = gt_bboxes.size(0), bboxes.size(0)
+
+        # compute iou between all bbox and gt
+        overlaps = bbox_overlaps(bboxes, gt_bboxes)
+
+        # assign 0 by default
+        assigned_gt_inds = overlaps.new_full((num_bboxes,), 0, dtype=torch.long)
+
+        if num_gt == 0 or num_bboxes == 0:
+            # No ground truth or boxes, return empty assignment
+            max_overlaps = overlaps.new_zeros((num_bboxes,))
+            if num_gt == 0:
+                # No truth, assign everything to background
+                assigned_gt_inds[:] = 0
+            if gt_labels is None:
+                assigned_labels = None
+            else:
+                assigned_labels = overlaps.new_full((num_bboxes,), -1, dtype=torch.long)
+            return AssignResult(
+                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
+            )
+
+        # compute center distance between all bbox and gt
+        gt_cx = (gt_bboxes[:, 0] + gt_bboxes[:, 2]) / 2.0
+        gt_cy = (gt_bboxes[:, 1] + gt_bboxes[:, 3]) / 2.0
+        gt_points = torch.stack((gt_cx, gt_cy), dim=1)
+
+        bboxes_cx = (bboxes[:, 0] + bboxes[:, 2]) / 2.0
+        bboxes_cy = (bboxes[:, 1] + bboxes[:, 3]) / 2.0
+        bboxes_points = torch.stack((bboxes_cx, bboxes_cy), dim=1)
+
+        distances = (
+            (bboxes_points[:, None, :] - gt_points[None, :, :]).pow(2).sum(-1).sqrt()
+        )
+
+        # Selecting candidates based on the center distance
+        candidate_idxs = []
+        start_idx = 0
+        for level, bboxes_per_level in enumerate(num_level_bboxes):
+            # on each pyramid level, for each gt,
+            # select k bbox whose center are closest to the gt center
+            end_idx = start_idx + bboxes_per_level
+            distances_per_level = distances[start_idx:end_idx, :]
+            selectable_k = min(self.topk, bboxes_per_level)
+            _, topk_idxs_per_level = distances_per_level.topk(
+                selectable_k, dim=0, largest=False
+            )
+            candidate_idxs.append(topk_idxs_per_level + start_idx)
+            start_idx = end_idx
+        candidate_idxs = torch.cat(candidate_idxs, dim=0)
+
+        # get corresponding iou for the these candidates, and compute the
+        # mean and std, set mean + std as the iou threshold
+        candidate_overlaps = overlaps[candidate_idxs, torch.arange(num_gt)]
+        overlaps_mean_per_gt = candidate_overlaps.mean(0)
+        overlaps_std_per_gt = candidate_overlaps.std(0)
+        overlaps_thr_per_gt = overlaps_mean_per_gt + overlaps_std_per_gt
+
+        is_pos = candidate_overlaps >= overlaps_thr_per_gt[None, :]
+
+        # limit the positive sample's center in gt
+        for gt_idx in range(num_gt):
+            candidate_idxs[:, gt_idx] += gt_idx * num_bboxes
+        ep_bboxes_cx = (
+            bboxes_cx.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1)
+        )
+        ep_bboxes_cy = (
+            bboxes_cy.view(1, -1).expand(num_gt, num_bboxes).contiguous().view(-1)
+        )
+        candidate_idxs = candidate_idxs.view(-1)
+
+        # calculate the left, top, right, bottom distance between positive
+        # bbox center and gt side
+        l_ = ep_bboxes_cx[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 0]
+        t_ = ep_bboxes_cy[candidate_idxs].view(-1, num_gt) - gt_bboxes[:, 1]
+        r_ = gt_bboxes[:, 2] - ep_bboxes_cx[candidate_idxs].view(-1, num_gt)
+        b_ = gt_bboxes[:, 3] - ep_bboxes_cy[candidate_idxs].view(-1, num_gt)
+        is_in_gts = torch.stack([l_, t_, r_, b_], dim=1).min(dim=1)[0] > 0.01
+        is_pos = is_pos & is_in_gts
+
+        # if an anchor box is assigned to multiple gts,
+        # the one with the highest IoU will be selected.
+        overlaps_inf = torch.full_like(overlaps, -INF).t().contiguous().view(-1)
+        index = candidate_idxs.view(-1)[is_pos.view(-1)]
+        overlaps_inf[index] = overlaps.t().contiguous().view(-1)[index]
+        overlaps_inf = overlaps_inf.view(num_gt, -1).t()
+
+        max_overlaps, argmax_overlaps = overlaps_inf.max(dim=1)
+        assigned_gt_inds[max_overlaps != -INF] = (
+            argmax_overlaps[max_overlaps != -INF] + 1
+        )
+
+        if gt_labels is not None:
+            assigned_labels = assigned_gt_inds.new_full((num_bboxes,), -1)
+            pos_inds = torch.nonzero(assigned_gt_inds > 0, as_tuple=False).squeeze()
+            if pos_inds.numel() > 0:
+                assigned_labels[pos_inds] = gt_labels[assigned_gt_inds[pos_inds] - 1]
+        else:
+            assigned_labels = None
+        return AssignResult(
+            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
+        )
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/base_assigner.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/base_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..8a9094faa577283322e81b11815feca640ec856b
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/base_assigner.py
@@ -0,0 +1,7 @@
+from abc import ABCMeta, abstractmethod
+
+
+class BaseAssigner(metaclass=ABCMeta):
+    @abstractmethod
+    def assign(self, bboxes, gt_bboxes, gt_bboxes_ignore=None, gt_labels=None):
+        pass
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/dsl_assigner.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/dsl_assigner.py
new file mode 100644
index 0000000000000000000000000000000000000000..a75bf1fbc9285f529de9d4d31a6911c9e3bac7b4
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/assigner/dsl_assigner.py
@@ -0,0 +1,154 @@
+import torch
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.iou_loss import bbox_overlaps
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.assigner.assign_result import AssignResult
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.assigner.base_assigner import BaseAssigner
+
+
+class DynamicSoftLabelAssigner(BaseAssigner):
+    """Computes matching between predictions and ground truth with
+    dynamic soft label assignment.
+
+    Args:
+        topk (int): Select top-k predictions to calculate dynamic k
+            best matchs for each gt. Default 13.
+        iou_factor (float): The scale factor of iou cost. Default 3.0.
+    """
+
+    def __init__(self, topk=13, iou_factor=3.0):
+        self.topk = topk
+        self.iou_factor = iou_factor
+
+    def assign(
+        self,
+        pred_scores,
+        priors,
+        decoded_bboxes,
+        gt_bboxes,
+        gt_labels,
+    ):
+        """Assign gt to priors with dynamic soft label assignment.
+        Args:
+            pred_scores (Tensor): Classification scores of one image,
+                a 2D-Tensor with shape [num_priors, num_classes]
+            priors (Tensor): All priors of one image, a 2D-Tensor with shape
+                [num_priors, 4] in [cx, xy, stride_w, stride_y] format.
+            decoded_bboxes (Tensor): Predicted bboxes, a 2D-Tensor with shape
+                [num_priors, 4] in [tl_x, tl_y, br_x, br_y] format.
+            gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor
+                with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.
+            gt_labels (Tensor): Ground truth labels of one image, a Tensor
+                with shape [num_gts].
+
+        Returns:
+            :obj:`AssignResult`: The assigned result.
+        """
+        INF = 100000000
+        num_gt = gt_bboxes.size(0)
+        num_bboxes = decoded_bboxes.size(0)
+
+        # assign 0 by default
+        assigned_gt_inds = decoded_bboxes.new_full((num_bboxes,), 0, dtype=torch.long)
+
+        prior_center = priors[:, :2]
+        lt_ = prior_center[:, None] - gt_bboxes[:, :2]
+        rb_ = gt_bboxes[:, 2:] - prior_center[:, None]
+
+        deltas = torch.cat([lt_, rb_], dim=-1)
+        is_in_gts = deltas.min(dim=-1).values > 0
+        valid_mask = is_in_gts.sum(dim=1) > 0
+
+        valid_decoded_bbox = decoded_bboxes[valid_mask]
+        valid_pred_scores = pred_scores[valid_mask]
+        num_valid = valid_decoded_bbox.size(0)
+
+        if num_gt == 0 or num_bboxes == 0 or num_valid == 0:
+            # No ground truth or boxes, return empty assignment
+            max_overlaps = decoded_bboxes.new_zeros((num_bboxes,))
+            if num_gt == 0:
+                # No truth, assign everything to background
+                assigned_gt_inds[:] = 0
+            if gt_labels is None:
+                assigned_labels = None
+            else:
+                assigned_labels = decoded_bboxes.new_full(
+                    (num_bboxes,), -1, dtype=torch.long
+                )
+            return AssignResult(
+                num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
+            )
+
+        pairwise_ious = bbox_overlaps(valid_decoded_bbox, gt_bboxes)
+        iou_cost = -torch.log(pairwise_ious + 1e-7)
+
+        gt_onehot_label = (
+            F.one_hot(gt_labels.to(torch.int64), pred_scores.shape[-1])
+            .float()
+            .unsqueeze(0)
+            .repeat(num_valid, 1, 1)
+        )
+        valid_pred_scores = valid_pred_scores.unsqueeze(1).repeat(1, num_gt, 1)
+
+        soft_label = gt_onehot_label * pairwise_ious[..., None]
+        scale_factor = soft_label - valid_pred_scores
+
+        cls_cost = F.binary_cross_entropy(
+            valid_pred_scores, soft_label, reduction="none"
+        ) * scale_factor.abs().pow(2.0)
+
+        cls_cost = cls_cost.sum(dim=-1)
+
+        cost_matrix = cls_cost + iou_cost * self.iou_factor
+
+        matched_pred_ious, matched_gt_inds = self.dynamic_k_matching(
+            cost_matrix, pairwise_ious, num_gt, valid_mask
+        )
+
+        # convert to AssignResult format
+        assigned_gt_inds[valid_mask] = matched_gt_inds + 1
+        assigned_labels = assigned_gt_inds.new_full((num_bboxes,), -1)
+        assigned_labels[valid_mask] = gt_labels[matched_gt_inds].long()
+        max_overlaps = assigned_gt_inds.new_full(
+            (num_bboxes,), -INF, dtype=torch.float32
+        )
+        max_overlaps[valid_mask] = matched_pred_ious
+        return AssignResult(
+            num_gt, assigned_gt_inds, max_overlaps, labels=assigned_labels
+        )
+
+    def dynamic_k_matching(self, cost, pairwise_ious, num_gt, valid_mask):
+        """Use sum of topk pred iou as dynamic k. Refer from OTA and YOLOX.
+
+        Args:
+            cost (Tensor): Cost matrix.
+            pairwise_ious (Tensor): Pairwise iou matrix.
+            num_gt (int): Number of gt.
+            valid_mask (Tensor): Mask for valid bboxes.
+        """
+        matching_matrix = torch.zeros_like(cost)
+        # select candidate topk ious for dynamic-k calculation
+        candidate_topk = min(self.topk, pairwise_ious.size(0))
+        topk_ious, _ = torch.topk(pairwise_ious, candidate_topk, dim=0)
+        # calculate dynamic k for each gt
+        dynamic_ks = torch.clamp(topk_ious.sum(0).int(), min=1)
+        for gt_idx in range(num_gt):
+            _, pos_idx = torch.topk(
+                cost[:, gt_idx], k=dynamic_ks[gt_idx].item(), largest=False
+            )
+            matching_matrix[:, gt_idx][pos_idx] = 1.0
+
+        del topk_ious, dynamic_ks, pos_idx
+
+        prior_match_gt_mask = matching_matrix.sum(1) > 1
+        if prior_match_gt_mask.sum() > 0:
+            cost_min, cost_argmin = torch.min(cost[prior_match_gt_mask, :], dim=1)
+            matching_matrix[prior_match_gt_mask, :] *= 0.0
+            matching_matrix[prior_match_gt_mask, cost_argmin] = 1.0
+        # get foreground mask inside box and center prior
+        fg_mask_inboxes = matching_matrix.sum(1) > 0.0
+        valid_mask[valid_mask.clone()] = fg_mask_inboxes
+
+        matched_gt_inds = matching_matrix[fg_mask_inboxes, :].argmax(1)
+        matched_pred_ious = (matching_matrix * pairwise_ious).sum(1)[fg_mask_inboxes]
+        return matched_pred_ious, matched_gt_inds
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/gfl_head.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/gfl_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..e26e083b37699d34be4b3b142f7ebe85caf63433
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/gfl_head.py
@@ -0,0 +1,700 @@
+import math
+
+import numpy as np
+import torch
+import torch.distributed as dist
+import torch.nn as nn
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util import (
+    bbox2distance,
+    distance2bbox,
+    images_to_levels,
+    multi_apply,
+)
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.transform.warp import warp_boxes
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.gfocal_loss\
+    import DistributionFocalLoss, QualityFocalLoss
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.iou_loss import GIoULoss, bbox_overlaps
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights import normal_init
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.nms import multiclass_nms
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.scale import Scale
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.assigner.atss_assigner\
+    import ATSSAssigner
+
+
+def reduce_mean(tensor):
+    if not (dist.is_available() and dist.is_initialized()):
+        return tensor
+    tensor = tensor.clone()
+    dist.all_reduce(tensor.true_divide(dist.get_world_size()), op=dist.ReduceOp.SUM)
+    return tensor
+
+
+class Integral(nn.Module):
+    """A fixed layer for calculating integral result from distribution.
+    This layer calculates the target location by :math: `sum{P(y_i) * y_i}`,
+    P(y_i) denotes the softmax vector that represents the discrete distribution
+    y_i denotes the discrete set, usually {0, 1, 2, ..., reg_max}
+    Args:
+        reg_max (int): The maximal value of the discrete set. Default: 16. You
+            may want to reset it according to your new dataset or related
+            settings.
+    """
+
+    def __init__(self, reg_max=16):
+        super(Integral, self).__init__()
+        self.reg_max = reg_max
+        self.register_buffer(
+            "project", torch.linspace(0, self.reg_max, self.reg_max + 1)
+        )
+
+    def forward(self, x):
+        """Forward feature from the regression head to get integral result of
+        bounding box location.
+        Args:
+            x (Tensor): Features of the regression head, shape (N, 4*(n+1)),
+                n is self.reg_max.
+        Returns:
+            x (Tensor): Integral result of box locations, i.e., distance
+                offsets from the box center in four directions, shape (N, 4).
+        """
+        shape = x.size()
+        x = F.softmax(x.reshape(*shape[:-1], 4, self.reg_max + 1), dim=-1)
+        x = F.linear(x, self.project.type_as(x)).reshape(*shape[:-1], 4)
+        return x
+
+
+class GFLHead(nn.Module):
+    """Generalized Focal Loss: Learning Qualified and Distributed Bounding
+    Boxes for Dense Object Detection.
+
+    GFL head structure is similar with ATSS, however GFL uses
+    1) joint representation for classification and localization quality, and
+    2) flexible General distribution for bounding box locations,
+    which are supervised by
+    Quality Focal Loss (QFL) and Distribution Focal Loss (DFL), respectively
+
+    https://arxiv.org/abs/2006.04388
+
+    :param num_classes: Number of categories excluding the background category.
+    :param loss: Config of all loss functions.
+    :param input_channel: Number of channels in the input feature map.
+    :param feat_channels: Number of conv layers in cls and reg tower. Default: 4.
+    :param stacked_convs: Number of conv layers in cls and reg tower. Default: 4.
+    :param octave_base_scale: Scale factor of grid cells.
+    :param strides: Down sample strides of all level feature map
+    :param conv_cfg: Dictionary to construct and config conv layer. Default: None.
+    :param norm_cfg: Dictionary to construct and config norm layer.
+    :param reg_max: Max value of integral set :math: `{0, ..., reg_max}`
+                    in QFL setting. Default: 16.
+    :param kwargs:
+    """
+
+    def __init__(
+        self,
+        num_classes,
+        loss,
+        input_channel,
+        feat_channels=256,
+        stacked_convs=4,
+        octave_base_scale=4,
+        strides=[8, 16, 32],
+        conv_cfg=None,
+        norm_cfg=dict(type="GN", num_groups=32, requires_grad=True),
+        reg_max=16,
+        **kwargs
+    ):
+        super(GFLHead, self).__init__()
+        self.num_classes = num_classes
+        self.in_channels = input_channel
+        self.feat_channels = feat_channels
+        self.stacked_convs = stacked_convs
+        self.grid_cell_scale = octave_base_scale
+        self.strides = strides
+        self.reg_max = reg_max
+
+        self.loss_cfg = loss
+        self.conv_cfg = conv_cfg
+        self.norm_cfg = norm_cfg
+        self.use_sigmoid = self.loss_cfg.loss_qfl.use_sigmoid
+        if self.use_sigmoid:
+            self.cls_out_channels = num_classes
+        else:
+            self.cls_out_channels = num_classes + 1
+
+        self.assigner = ATSSAssigner(topk=9)
+        self.distribution_project = Integral(self.reg_max)
+
+        self.loss_qfl = QualityFocalLoss(
+            use_sigmoid=self.use_sigmoid,
+            beta=self.loss_cfg.loss_qfl.beta,
+            loss_weight=self.loss_cfg.loss_qfl.loss_weight,
+        )
+        self.loss_dfl = DistributionFocalLoss(
+            loss_weight=self.loss_cfg.loss_dfl.loss_weight
+        )
+        self.loss_bbox = GIoULoss(loss_weight=self.loss_cfg.loss_bbox.loss_weight)
+        self._init_layers()
+        self.init_weights()
+
+    def _init_layers(self):
+        self.relu = nn.ReLU(inplace=True)
+        self.cls_convs = nn.ModuleList()
+        self.reg_convs = nn.ModuleList()
+        for i in range(self.stacked_convs):
+            chn = self.in_channels if i == 0 else self.feat_channels
+            self.cls_convs.append(
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=self.conv_cfg,
+                    norm_cfg=self.norm_cfg,
+                )
+            )
+            self.reg_convs.append(
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=self.conv_cfg,
+                    norm_cfg=self.norm_cfg,
+                )
+            )
+        self.gfl_cls = nn.Conv2d(
+            self.feat_channels, self.cls_out_channels, 3, padding=1
+        )
+        self.gfl_reg = nn.Conv2d(
+            self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1
+        )
+        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+    def init_weights(self):
+        for m in self.cls_convs:
+            normal_init(m.conv, std=0.01)
+        for m in self.reg_convs:
+            normal_init(m.conv, std=0.01)
+        bias_cls = -4.595
+        normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
+        normal_init(self.gfl_reg, std=0.01)
+
+    def forward(self, feats):
+        if torch.onnx.is_in_onnx_export():
+            return self._forward_onnx(feats)
+        outputs = []
+        for x, scale in zip(feats, self.scales):
+            cls_feat = x
+            reg_feat = x
+            for cls_conv in self.cls_convs:
+                cls_feat = cls_conv(cls_feat)
+            for reg_conv in self.reg_convs:
+                reg_feat = reg_conv(reg_feat)
+            cls_score = self.gfl_cls(cls_feat)
+            bbox_pred = scale(self.gfl_reg(reg_feat)).float()
+            output = torch.cat([cls_score, bbox_pred], dim=1)
+            outputs.append(output.flatten(start_dim=2))
+        outputs = torch.cat(outputs, dim=2).permute(0, 2, 1)
+        return outputs
+
+    def loss(self, preds, gt_meta):
+        cls_scores, bbox_preds = preds.split(
+            [self.num_classes, 4 * (self.reg_max + 1)], dim=-1
+        )
+        device = cls_scores.device
+        gt_bboxes = gt_meta["gt_bboxes"]
+        gt_labels = gt_meta["gt_labels"]
+        input_height, input_width = gt_meta["img"].shape[2:]
+        gt_bboxes_ignore = None
+
+        featmap_sizes = [
+            (math.ceil(input_height / stride), math.ceil(input_width) / stride)
+            for stride in self.strides
+        ]
+
+        cls_reg_targets = self.target_assign(
+            cls_scores,
+            bbox_preds,
+            featmap_sizes,
+            gt_bboxes,
+            gt_bboxes_ignore,
+            gt_labels,
+            device=device,
+        )
+        if cls_reg_targets is None:
+            return None
+
+        (
+            cls_preds_list,
+            reg_preds_list,
+            grid_cells_list,
+            labels_list,
+            label_weights_list,
+            bbox_targets_list,
+            bbox_weights_list,
+            num_total_pos,
+            num_total_neg,
+        ) = cls_reg_targets
+
+        num_total_samples = reduce_mean(torch.tensor(num_total_pos).to(device)).item()
+        num_total_samples = max(num_total_samples, 1.0)
+
+        losses_qfl, losses_bbox, losses_dfl, avg_factor = multi_apply(
+            self.loss_single,
+            grid_cells_list,
+            cls_preds_list,
+            reg_preds_list,
+            labels_list,
+            label_weights_list,
+            bbox_targets_list,
+            self.strides,
+            num_total_samples=num_total_samples,
+        )
+
+        avg_factor = sum(avg_factor)
+        avg_factor = reduce_mean(avg_factor).item()
+        if avg_factor <= 0:
+            loss_qfl = torch.tensor(0, dtype=torch.float32, requires_grad=True).to(
+                device
+            )
+            loss_bbox = torch.tensor(0, dtype=torch.float32, requires_grad=True).to(
+                device
+            )
+            loss_dfl = torch.tensor(0, dtype=torch.float32, requires_grad=True).to(
+                device
+            )
+        else:
+            losses_bbox = list(map(lambda x: x / avg_factor, losses_bbox))
+            losses_dfl = list(map(lambda x: x / avg_factor, losses_dfl))
+
+            loss_qfl = sum(losses_qfl)
+            loss_bbox = sum(losses_bbox)
+            loss_dfl = sum(losses_dfl)
+
+        loss = loss_qfl + loss_bbox + loss_dfl
+        loss_states = dict(loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
+
+        return loss, loss_states
+
+    def loss_single(
+        self,
+        grid_cells,
+        cls_score,
+        bbox_pred,
+        labels,
+        label_weights,
+        bbox_targets,
+        stride,
+        num_total_samples,
+    ):
+        grid_cells = grid_cells.reshape(-1, 4)
+        cls_score = cls_score.reshape(-1, self.cls_out_channels)
+        bbox_pred = bbox_pred.reshape(-1, 4 * (self.reg_max + 1))
+        bbox_targets = bbox_targets.reshape(-1, 4)
+        labels = labels.reshape(-1)
+        label_weights = label_weights.reshape(-1)
+
+        # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+        bg_class_ind = self.num_classes
+        pos_inds = torch.nonzero(
+            (labels >= 0) & (labels < bg_class_ind), as_tuple=False
+        ).squeeze(1)
+
+        score = label_weights.new_zeros(labels.shape)
+
+        if len(pos_inds) > 0:
+            pos_bbox_targets = bbox_targets[pos_inds]
+            pos_bbox_pred = bbox_pred[pos_inds]  # (n, 4 * (reg_max + 1))
+            pos_grid_cells = grid_cells[pos_inds]
+            pos_grid_cell_centers = self.grid_cells_to_center(pos_grid_cells) / stride
+
+            weight_targets = cls_score.detach().sigmoid()
+            weight_targets = weight_targets.max(dim=1)[0][pos_inds]
+            pos_bbox_pred_corners = self.distribution_project(pos_bbox_pred)
+            pos_decode_bbox_pred = distance2bbox(
+                pos_grid_cell_centers, pos_bbox_pred_corners
+            )
+            pos_decode_bbox_targets = pos_bbox_targets / stride
+            score[pos_inds] = bbox_overlaps(
+                pos_decode_bbox_pred.detach(), pos_decode_bbox_targets, is_aligned=True
+            )
+            pred_corners = pos_bbox_pred.reshape(-1, self.reg_max + 1)
+            target_corners = bbox2distance(
+                pos_grid_cell_centers, pos_decode_bbox_targets, self.reg_max
+            ).reshape(-1)
+
+            # regression loss
+            loss_bbox = self.loss_bbox(
+                pos_decode_bbox_pred,
+                pos_decode_bbox_targets,
+                weight=weight_targets,
+                avg_factor=1.0,
+            )
+
+            # dfl loss
+            loss_dfl = self.loss_dfl(
+                pred_corners,
+                target_corners,
+                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+                avg_factor=4.0,
+            )
+        else:
+            loss_bbox = bbox_pred.sum() * 0
+            loss_dfl = bbox_pred.sum() * 0
+            weight_targets = torch.tensor(0).to(cls_score.device)
+
+        # qfl loss
+        loss_qfl = self.loss_qfl(
+            cls_score,
+            (labels, score),
+            weight=label_weights,
+            avg_factor=num_total_samples,
+        )
+
+        return loss_qfl, loss_bbox, loss_dfl, weight_targets.sum()
+
+    def target_assign(
+        self,
+        cls_preds,
+        reg_preds,
+        featmap_sizes,
+        gt_bboxes_list,
+        gt_bboxes_ignore_list,
+        gt_labels_list,
+        device,
+    ):
+        """
+        Assign target for a batch of images.
+        :param batch_size: num of images in one batch
+        :param featmap_sizes: A list of all grid cell boxes in all image
+        :param gt_bboxes_list: A list of ground truth boxes in all image
+        :param gt_bboxes_ignore_list: A list of all ignored boxes in all image
+        :param gt_labels_list: A list of all ground truth label in all image
+        :param device: pytorch device
+        :return: Assign results of all images.
+        """
+        batch_size = cls_preds.shape[0]
+        # get grid cells of one image
+        multi_level_grid_cells = [
+            self.get_grid_cells(
+                featmap_sizes[i],
+                self.grid_cell_scale,
+                stride,
+                dtype=torch.float32,
+                device=device,
+            )
+            for i, stride in enumerate(self.strides)
+        ]
+        mlvl_grid_cells_list = [multi_level_grid_cells for i in range(batch_size)]
+
+        # pixel cell number of multi-level feature maps
+        num_level_cells = [grid_cells.size(0) for grid_cells in mlvl_grid_cells_list[0]]
+        num_level_cells_list = [num_level_cells] * batch_size
+        # concat all level cells and to a single tensor
+        for i in range(batch_size):
+            mlvl_grid_cells_list[i] = torch.cat(mlvl_grid_cells_list[i])
+        # compute targets for each image
+        if gt_bboxes_ignore_list is None:
+            gt_bboxes_ignore_list = [None for _ in range(batch_size)]
+        if gt_labels_list is None:
+            gt_labels_list = [None for _ in range(batch_size)]
+        # target assign on all images, get list of tensors
+        # list length = batch size
+        # tensor first dim = num of all grid cell
+        (
+            all_grid_cells,
+            all_labels,
+            all_label_weights,
+            all_bbox_targets,
+            all_bbox_weights,
+            pos_inds_list,
+            neg_inds_list,
+        ) = multi_apply(
+            self.target_assign_single_img,
+            mlvl_grid_cells_list,
+            num_level_cells_list,
+            gt_bboxes_list,
+            gt_bboxes_ignore_list,
+            gt_labels_list,
+        )
+        # no valid cells
+        if any([labels is None for labels in all_labels]):
+            return None
+        # sampled cells of all images
+        num_total_pos = sum([max(inds.numel(), 1) for inds in pos_inds_list])
+        num_total_neg = sum([max(inds.numel(), 1) for inds in neg_inds_list])
+        # merge list of targets tensors into one batch then split to multi levels
+        mlvl_cls_preds = images_to_levels([c for c in cls_preds], num_level_cells)
+        mlvl_reg_preds = images_to_levels([r for r in reg_preds], num_level_cells)
+        mlvl_grid_cells = images_to_levels(all_grid_cells, num_level_cells)
+        mlvl_labels = images_to_levels(all_labels, num_level_cells)
+        mlvl_label_weights = images_to_levels(all_label_weights, num_level_cells)
+        mlvl_bbox_targets = images_to_levels(all_bbox_targets, num_level_cells)
+        mlvl_bbox_weights = images_to_levels(all_bbox_weights, num_level_cells)
+        return (
+            mlvl_cls_preds,
+            mlvl_reg_preds,
+            mlvl_grid_cells,
+            mlvl_labels,
+            mlvl_label_weights,
+            mlvl_bbox_targets,
+            mlvl_bbox_weights,
+            num_total_pos,
+            num_total_neg,
+        )
+
+    def target_assign_single_img(
+        self, grid_cells, num_level_cells, gt_bboxes, gt_bboxes_ignore, gt_labels
+    ):
+        """
+        Using ATSS Assigner to assign target on one image.
+        :param grid_cells: Grid cell boxes of all pixels on feature map
+        :param num_level_cells: numbers of grid cells on each level's feature map
+        :param gt_bboxes: Ground truth boxes
+        :param gt_bboxes_ignore: Ground truths which are ignored
+        :param gt_labels: Ground truth labels
+        :return: Assign results of a single image
+        """
+        device = grid_cells.device
+        gt_bboxes = torch.from_numpy(gt_bboxes).to(device)
+        gt_labels = torch.from_numpy(gt_labels).to(device)
+
+        assign_result = self.assigner.assign(
+            grid_cells, num_level_cells, gt_bboxes, gt_bboxes_ignore, gt_labels
+        )
+
+        pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds = self.sample(
+            assign_result, gt_bboxes
+        )
+
+        num_cells = grid_cells.shape[0]
+        bbox_targets = torch.zeros_like(grid_cells)
+        bbox_weights = torch.zeros_like(grid_cells)
+        labels = grid_cells.new_full((num_cells,), self.num_classes, dtype=torch.long)
+        label_weights = grid_cells.new_zeros(num_cells, dtype=torch.float)
+
+        if len(pos_inds) > 0:
+            pos_bbox_targets = pos_gt_bboxes
+            bbox_targets[pos_inds, :] = pos_bbox_targets
+            bbox_weights[pos_inds, :] = 1.0
+            if gt_labels is None:
+                # Only rpn gives gt_labels as None
+                # Foreground is the first class
+                labels[pos_inds] = 0
+            else:
+                labels[pos_inds] = gt_labels[pos_assigned_gt_inds]
+
+            label_weights[pos_inds] = 1.0
+        if len(neg_inds) > 0:
+            label_weights[neg_inds] = 1.0
+
+        return (
+            grid_cells,
+            labels,
+            label_weights,
+            bbox_targets,
+            bbox_weights,
+            pos_inds,
+            neg_inds,
+        )
+
+    def sample(self, assign_result, gt_bboxes):
+        pos_inds = (
+            torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+            .squeeze(-1)
+            .unique()
+        )
+        neg_inds = (
+            torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+            .squeeze(-1)
+            .unique()
+        )
+        pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
+
+        if gt_bboxes.numel() == 0:
+            # hack for index error case
+            assert pos_assigned_gt_inds.numel() == 0
+            pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
+        else:
+            if len(gt_bboxes.shape) < 2:
+                gt_bboxes = gt_bboxes.view(-1, 4)
+            pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]
+        return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds
+
+    def post_process(self, preds, meta):
+        cls_scores, bbox_preds = preds.split(
+            [self.num_classes, 4 * (self.reg_max + 1)], dim=-1
+        )
+        result_list = self.get_bboxes(cls_scores, bbox_preds, meta)
+        det_results = {}
+        warp_matrixes = (
+            meta["warp_matrix"]
+            if isinstance(meta["warp_matrix"], list)
+            else meta["warp_matrix"]
+        )
+        img_heights = (
+            meta["img_info"]["height"].cpu().numpy()
+            if isinstance(meta["img_info"]["height"], torch.Tensor)
+            else meta["img_info"]["height"]
+        )
+        img_widths = (
+            meta["img_info"]["width"].cpu().numpy()
+            if isinstance(meta["img_info"]["width"], torch.Tensor)
+            else meta["img_info"]["width"]
+        )
+        img_ids = (
+            meta["img_info"]["id"].cpu().numpy()
+            if isinstance(meta["img_info"]["id"], torch.Tensor)
+            else meta["img_info"]["id"]
+        )
+
+        for result, img_width, img_height, img_id, warp_matrix in zip(
+            result_list, img_widths, img_heights, img_ids, warp_matrixes
+        ):
+            det_result = {}
+            det_bboxes, det_labels = result
+            det_bboxes = det_bboxes.detach().cpu().numpy()
+            det_bboxes[:, :4] = warp_boxes(
+                det_bboxes[:, :4], np.linalg.inv(warp_matrix), img_width, img_height
+            )
+            classes = det_labels.detach().cpu().numpy()
+            for i in range(self.num_classes):
+                inds = classes == i
+                det_result[i] = np.concatenate(
+                    [
+                        det_bboxes[inds, :4].astype(np.float32),
+                        det_bboxes[inds, 4:5].astype(np.float32),
+                    ],
+                    axis=1,
+                ).tolist()
+            det_results[img_id] = det_result
+        return det_results
+
+    def get_bboxes(self, cls_preds, reg_preds, img_metas):
+        """Decode the outputs to bboxes.
+        Args:
+            cls_preds (Tensor): Shape (num_imgs, num_points, num_classes).
+            reg_preds (Tensor): Shape (num_imgs, num_points, 4 * (regmax + 1)).
+            img_metas (dict): Dict of image info.
+
+        Returns:
+            results_list (list[tuple]): List of detection bboxes and labels.
+        """
+        device = cls_preds.device
+        b = cls_preds.shape[0]
+        input_height, input_width = img_metas["img"].shape[2:]
+        input_shape = (input_height, input_width)
+
+        featmap_sizes = [
+            (math.ceil(input_height / stride), math.ceil(input_width) / stride)
+            for stride in self.strides
+        ]
+        # get grid cells of one image
+        mlvl_center_priors = []
+        for i, stride in enumerate(self.strides):
+            y, x = self.get_single_level_center_point(
+                featmap_sizes[i], stride, torch.float32, device
+            )
+            strides = x.new_full((x.shape[0],), stride)
+            proiors = torch.stack([x, y, strides, strides], dim=-1)
+            mlvl_center_priors.append(proiors.unsqueeze(0).repeat(b, 1, 1))
+
+        center_priors = torch.cat(mlvl_center_priors, dim=1)
+        dis_preds = self.distribution_project(reg_preds) * center_priors[..., 2, None]
+        bboxes = distance2bbox(center_priors[..., :2], dis_preds, max_shape=input_shape)
+        scores = cls_preds.sigmoid()
+        result_list = []
+        for i in range(b):
+            # add a dummy background class at the end of all labels
+            # same with mmdetection2.0
+            score, bbox = scores[i], bboxes[i]
+            padding = score.new_zeros(score.shape[0], 1)
+            score = torch.cat([score, padding], dim=1)
+            results = multiclass_nms(
+                bbox,
+                score,
+                score_thr=0.05,
+                nms_cfg=dict(type="nms", iou_threshold=0.6),
+                max_num=100,
+            )
+            result_list.append(results)
+        return result_list
+
+    def get_single_level_center_point(
+        self, featmap_size, stride, dtype, device, flatten=True
+    ):
+        """
+        Generate pixel centers of a single stage feature map.
+        :param featmap_size: height and width of the feature map
+        :param stride: down sample stride of the feature map
+        :param dtype: data type of the tensors
+        :param device: device of the tensors
+        :param flatten: flatten the x and y tensors
+        :return: y and x of the center points
+        """
+        h, w = featmap_size
+        x_range = (torch.arange(w, dtype=dtype, device=device) + 0.5) * stride
+        y_range = (torch.arange(h, dtype=dtype, device=device) + 0.5) * stride
+        y, x = torch.meshgrid(y_range, x_range)
+        if flatten:
+            y = y.flatten()
+            x = x.flatten()
+        return y, x
+
+    def get_grid_cells(self, featmap_size, scale, stride, dtype, device):
+        """
+        Generate grid cells of a feature map for target assignment.
+        :param featmap_size: Size of a single level feature map.
+        :param scale: Grid cell scale.
+        :param stride: Down sample stride of the feature map.
+        :param dtype: Data type of the tensors.
+        :param device: Device of the tensors.
+        :return: Grid_cells xyxy position. Size should be [feat_w * feat_h, 4]
+        """
+        cell_size = stride * scale
+        y, x = self.get_single_level_center_point(
+            featmap_size, stride, dtype, device, flatten=True
+        )
+        grid_cells = torch.stack(
+            [
+                x - 0.5 * cell_size,
+                y - 0.5 * cell_size,
+                x + 0.5 * cell_size,
+                y + 0.5 * cell_size,
+            ],
+            dim=-1,
+        )
+        return grid_cells
+
+    def grid_cells_to_center(self, grid_cells):
+        """
+        Get center location of each gird cell
+        :param grid_cells: grid cells of a feature map
+        :return: center points
+        """
+        cells_cx = (grid_cells[:, 2] + grid_cells[:, 0]) / 2
+        cells_cy = (grid_cells[:, 3] + grid_cells[:, 1]) / 2
+        return torch.stack([cells_cx, cells_cy], dim=-1)
+
+    def _forward_onnx(self, feats):
+        """only used for onnx export"""
+        outputs = []
+        for x, scale in zip(feats, self.scales):
+            cls_feat = x
+            reg_feat = x
+            for cls_conv in self.cls_convs:
+                cls_feat = cls_conv(cls_feat)
+            for reg_conv in self.reg_convs:
+                reg_feat = reg_conv(reg_feat)
+            cls_pred = self.gfl_cls(cls_feat)
+            reg_pred = scale(self.gfl_reg(reg_feat))
+            cls_pred = cls_pred.sigmoid()
+            out = torch.cat([cls_pred, reg_pred], dim=1)
+            outputs.append(out.flatten(start_dim=2))
+        return torch.cat(outputs, dim=2).permute(0, 2, 1)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/nanodet_head.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/nanodet_head.py
new file mode 100755
index 0000000000000000000000000000000000000000..01eac4146eb78c5aadec83b2c8137161ec6465e2
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/nanodet_head.py
@@ -0,0 +1,185 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule, DepthwiseConvModule
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights import normal_init
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.gfl_head import GFLHead
+
+
+class NanoDetHead(GFLHead):
+    """
+    Modified from GFL, use same loss functions but much lightweight convolution heads
+    """
+
+    def __init__(
+        self,
+        num_classes,
+        loss,
+        input_channel,
+        stacked_convs=2,
+        octave_base_scale=5,
+        conv_type="DWConv",
+        conv_cfg=None,
+        norm_cfg=dict(type="BN"),
+        reg_max=16,
+        share_cls_reg=False,
+        activation="LeakyReLU",
+        feat_channels=256,
+        strides=[8, 16, 32],
+        **kwargs
+    ):
+        self.share_cls_reg = share_cls_reg
+        self.activation = activation
+        self.ConvModule = ConvModule if conv_type == "Conv" else DepthwiseConvModule
+        super(NanoDetHead, self).__init__(
+            num_classes,
+            loss,
+            input_channel,
+            feat_channels,
+            stacked_convs,
+            octave_base_scale,
+            strides,
+            conv_cfg,
+            norm_cfg,
+            reg_max,
+            **kwargs
+        )
+
+    def _init_layers(self):
+        self.cls_convs = nn.ModuleList()
+        self.reg_convs = nn.ModuleList()
+        for _ in self.strides:
+            cls_convs, reg_convs = self._buid_not_shared_head()
+            self.cls_convs.append(cls_convs)
+            self.reg_convs.append(reg_convs)
+
+        self.gfl_cls = nn.ModuleList(
+            [
+                nn.Conv2d(
+                    self.feat_channels,
+                    self.cls_out_channels + 4 * (self.reg_max + 1)
+                    if self.share_cls_reg
+                    else self.cls_out_channels,
+                    1,
+                    padding=0,
+                )
+                for _ in self.strides
+            ]
+        )
+        # TODO: if
+        self.gfl_reg = nn.ModuleList(
+            [
+                nn.Conv2d(self.feat_channels, 4 * (self.reg_max + 1), 1, padding=0)
+                for _ in self.strides
+            ]
+        )
+
+    def _buid_not_shared_head(self):
+        cls_convs = nn.ModuleList()
+        reg_convs = nn.ModuleList()
+        for i in range(self.stacked_convs):
+            chn = self.in_channels if i == 0 else self.feat_channels
+            cls_convs.append(
+                self.ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    norm_cfg=self.norm_cfg,
+                    bias=self.norm_cfg is None,
+                    activation=self.activation,
+                )
+            )
+            if not self.share_cls_reg:
+                reg_convs.append(
+                    self.ConvModule(
+                        chn,
+                        self.feat_channels,
+                        3,
+                        stride=1,
+                        padding=1,
+                        norm_cfg=self.norm_cfg,
+                        bias=self.norm_cfg is None,
+                        activation=self.activation,
+                    )
+                )
+
+        return cls_convs, reg_convs
+
+    def init_weights(self):
+        for m in self.cls_convs.modules():
+            if isinstance(m, nn.Conv2d):
+                normal_init(m, std=0.01)
+        for m in self.reg_convs.modules():
+            if isinstance(m, nn.Conv2d):
+                normal_init(m, std=0.01)
+        # init cls head with confidence = 0.01
+        bias_cls = -4.595
+        for i in range(len(self.strides)):
+            normal_init(self.gfl_cls[i], std=0.01, bias=bias_cls)
+            normal_init(self.gfl_reg[i], std=0.01)
+        print("Finish initialize NanoDet Head.")
+
+    def forward(self, feats):
+        if torch.onnx.is_in_onnx_export():
+            return self._forward_onnx(feats)
+        outputs = []
+        for x, cls_convs, reg_convs, gfl_cls, gfl_reg in zip(
+            feats, self.cls_convs, self.reg_convs, self.gfl_cls, self.gfl_reg
+        ):
+            cls_feat = x
+            reg_feat = x
+            for cls_conv in cls_convs:
+                cls_feat = cls_conv(cls_feat)
+            for reg_conv in reg_convs:
+                reg_feat = reg_conv(reg_feat)
+            if self.share_cls_reg:
+                output = gfl_cls(cls_feat)
+            else:
+                cls_score = gfl_cls(cls_feat)
+                bbox_pred = gfl_reg(reg_feat)
+                output = torch.cat([cls_score, bbox_pred], dim=1)
+            outputs.append(output.flatten(start_dim=2))
+        outputs = torch.cat(outputs, dim=2).permute(0, 2, 1)
+        return outputs
+
+    def _forward_onnx(self, feats):
+        """only used for onnx export"""
+        outputs = []
+        for x, cls_convs, reg_convs, gfl_cls, gfl_reg in zip(
+            feats, self.cls_convs, self.reg_convs, self.gfl_cls, self.gfl_reg
+        ):
+            cls_feat = x
+            reg_feat = x
+            for cls_conv in cls_convs:
+                cls_feat = cls_conv(cls_feat)
+            for reg_conv in reg_convs:
+                reg_feat = reg_conv(reg_feat)
+            if self.share_cls_reg:
+                output = gfl_cls(cls_feat)
+                cls_pred, reg_pred = output.split(
+                    [self.num_classes, 4 * (self.reg_max + 1)], dim=1
+                )
+            else:
+                cls_pred = gfl_cls(cls_feat)
+                reg_pred = gfl_reg(reg_feat)
+
+            cls_pred = cls_pred.sigmoid()
+            out = torch.cat([cls_pred, reg_pred], dim=1)
+            outputs.append(out.flatten(start_dim=2))
+        return torch.cat(outputs, dim=2).permute(0, 2, 1)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/nanodet_plus_head.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/nanodet_plus_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..5d853d5ecfb20666abb25a24acbf93ff4bf11d36
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/nanodet_plus_head.py
@@ -0,0 +1,510 @@
+import math
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util\
+    import bbox2distance, distance2bbox, multi_apply
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.transform.warp import warp_boxes
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.gfocal_loss \
+    import DistributionFocalLoss, QualityFocalLoss
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.iou_loss import GIoULoss
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv \
+    import ConvModule, DepthwiseConvModule
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights import normal_init
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.nms import multiclass_nms
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.assigner.dsl_assigner \
+    import DynamicSoftLabelAssigner
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.head.gfl_head import Integral, reduce_mean
+
+
+class NanoDetPlusHead(nn.Module):
+    """Detection head used in NanoDet-Plus.
+
+    Args:
+        num_classes (int): Number of categories excluding the background
+            category.
+        loss (dict): Loss config.
+        input_channel (int): Number of channels of the input feature.
+        feat_channels (int): Number of channels of the feature.
+            Default: 96.
+        stacked_convs (int): Number of conv layers in the stacked convs.
+            Default: 2.
+        kernel_size (int): Size of the convolving kernel. Default: 5.
+        strides (list[int]): Strides of input multi-level feature maps.
+            Default: [8, 16, 32].
+        conv_type (str): Type of the convolution.
+            Default: "DWConv".
+        norm_cfg (dict): Dictionary to construct and config norm layer.
+            Default: dict(type='BN').
+        reg_max (int): The maximal value of the discrete set. Default: 7.
+        activation (str): Type of activation function. Default: "LeakyReLU".
+        assigner_cfg (dict): Config dict of the assigner. Default: dict(topk=13).
+    """
+
+    def __init__(
+        self,
+        num_classes,
+        loss,
+        input_channel,
+        feat_channels=96,
+        stacked_convs=2,
+        kernel_size=5,
+        strides=[8, 16, 32],
+        conv_type="DWConv",
+        norm_cfg=dict(type="BN"),
+        reg_max=7,
+        activation="LeakyReLU",
+        assigner_cfg=dict(topk=13),
+        **kwargs
+    ):
+        super(NanoDetPlusHead, self).__init__()
+        self.num_classes = num_classes
+        self.in_channels = input_channel
+        self.feat_channels = feat_channels
+        self.stacked_convs = stacked_convs
+        self.kernel_size = kernel_size
+        self.strides = strides
+        self.reg_max = reg_max
+        self.activation = activation
+        self.ConvModule = ConvModule if conv_type == "Conv" else DepthwiseConvModule
+
+        self.loss_cfg = loss
+        self.norm_cfg = norm_cfg
+
+        self.assigner = DynamicSoftLabelAssigner(**assigner_cfg)
+        self.distribution_project = Integral(self.reg_max)
+
+        self.loss_qfl = QualityFocalLoss(
+            beta=self.loss_cfg.loss_qfl.beta,
+            loss_weight=self.loss_cfg.loss_qfl.loss_weight,
+        )
+        self.loss_dfl = DistributionFocalLoss(
+            loss_weight=self.loss_cfg.loss_dfl.loss_weight
+        )
+        self.loss_bbox = GIoULoss(loss_weight=self.loss_cfg.loss_bbox.loss_weight)
+        self._init_layers()
+        self.init_weights()
+
+    def _init_layers(self):
+        self.cls_convs = nn.ModuleList()
+        for _ in self.strides:
+            cls_convs = self._buid_not_shared_head()
+            self.cls_convs.append(cls_convs)
+
+        self.gfl_cls = nn.ModuleList(
+            [
+                nn.Conv2d(
+                    self.feat_channels,
+                    self.num_classes + 4 * (self.reg_max + 1),
+                    1,
+                    padding=0,
+                )
+                for _ in self.strides
+            ]
+        )
+
+    def _buid_not_shared_head(self):
+        cls_convs = nn.ModuleList()
+        for i in range(self.stacked_convs):
+            chn = self.in_channels if i == 0 else self.feat_channels
+            cls_convs.append(
+                self.ConvModule(
+                    chn,
+                    self.feat_channels,
+                    self.kernel_size,
+                    stride=1,
+                    padding=self.kernel_size // 2,
+                    norm_cfg=self.norm_cfg,
+                    bias=self.norm_cfg is None,
+                    activation=self.activation,
+                )
+            )
+        return cls_convs
+
+    def init_weights(self):
+        for m in self.cls_convs.modules():
+            if isinstance(m, nn.Conv2d):
+                normal_init(m, std=0.01)
+        # init cls head with confidence = 0.01
+        bias_cls = -4.595
+        for i in range(len(self.strides)):
+            normal_init(self.gfl_cls[i], std=0.01, bias=bias_cls)
+        print("Finish initialize NanoDet-Plus Head.")
+
+    def forward(self, feats):
+        if torch.onnx.is_in_onnx_export():
+            return self._forward_onnx(feats)
+        outputs = []
+        for feat, cls_convs, gfl_cls in zip(
+            feats,
+            self.cls_convs,
+            self.gfl_cls,
+        ):
+            for conv in cls_convs:
+                feat = conv(feat)
+            output = gfl_cls(feat)
+            outputs.append(output.flatten(start_dim=2))
+        outputs = torch.cat(outputs, dim=2).permute(0, 2, 1)
+        return outputs
+
+    def loss(self, preds, gt_meta, aux_preds=None):
+        """Compute losses.
+        Args:
+            preds (Tensor): Prediction output.
+            gt_meta (dict): Ground truth information.
+            aux_preds (tuple[Tensor], optional): Auxiliary head prediction output.
+
+        Returns:
+            loss (Tensor): Loss tensor.
+            loss_states (dict): State dict of each loss.
+        """
+        gt_bboxes = gt_meta["gt_bboxes"]
+        gt_labels = gt_meta["gt_labels"]
+        device = preds.device
+        batch_size = preds.shape[0]
+        input_height, input_width = gt_meta["img"].shape[2:]
+        featmap_sizes = [
+            (math.ceil(input_height / stride), math.ceil(input_width) / stride)
+            for stride in self.strides
+        ]
+        # get grid cells of one image
+        mlvl_center_priors = [
+            self.get_single_level_center_priors(
+                batch_size,
+                featmap_sizes[i],
+                stride,
+                dtype=torch.float32,
+                device=device,
+            )
+            for i, stride in enumerate(self.strides)
+        ]
+        center_priors = torch.cat(mlvl_center_priors, dim=1)
+
+        cls_preds, reg_preds = preds.split(
+            [self.num_classes, 4 * (self.reg_max + 1)], dim=-1
+        )
+        dis_preds = self.distribution_project(reg_preds) * center_priors[..., 2, None]
+        decoded_bboxes = distance2bbox(center_priors[..., :2], dis_preds)
+
+        if aux_preds is not None:
+            # use auxiliary head to assign
+            aux_cls_preds, aux_reg_preds = aux_preds.split(
+                [self.num_classes, 4 * (self.reg_max + 1)], dim=-1
+            )
+            aux_dis_preds = (
+                self.distribution_project(aux_reg_preds) * center_priors[..., 2, None]
+            )
+            aux_decoded_bboxes = distance2bbox(center_priors[..., :2], aux_dis_preds)
+            batch_assign_res = multi_apply(
+                self.target_assign_single_img,
+                aux_cls_preds.detach(),
+                center_priors,
+                aux_decoded_bboxes.detach(),
+                gt_bboxes,
+                gt_labels,
+            )
+        else:
+            # use self prediction to assign
+            batch_assign_res = multi_apply(
+                self.target_assign_single_img,
+                cls_preds.detach(),
+                center_priors,
+                decoded_bboxes.detach(),
+                gt_bboxes,
+                gt_labels,
+            )
+
+        loss, loss_states = self._get_loss_from_assign(
+            cls_preds, reg_preds, decoded_bboxes, batch_assign_res
+        )
+
+        if aux_preds is not None:
+            aux_loss, aux_loss_states = self._get_loss_from_assign(
+                aux_cls_preds, aux_reg_preds, aux_decoded_bboxes, batch_assign_res
+            )
+            loss = loss + aux_loss
+            for k, v in aux_loss_states.items():
+                loss_states["aux_" + k] = v
+        return loss, loss_states
+
+    def _get_loss_from_assign(self, cls_preds, reg_preds, decoded_bboxes, assign):
+        device = cls_preds.device
+        labels, label_scores, bbox_targets, dist_targets, num_pos = assign
+        num_total_samples = max(
+            reduce_mean(torch.tensor(sum(num_pos)).to(device)).item(), 1.0
+        )
+
+        labels = torch.cat(labels, dim=0)
+        label_scores = torch.cat(label_scores, dim=0)
+        bbox_targets = torch.cat(bbox_targets, dim=0)
+        cls_preds = cls_preds.reshape(-1, self.num_classes)
+        reg_preds = reg_preds.reshape(-1, 4 * (self.reg_max + 1))
+        decoded_bboxes = decoded_bboxes.reshape(-1, 4)
+        loss_qfl = self.loss_qfl(
+            cls_preds, (labels, label_scores), avg_factor=num_total_samples
+        )
+
+        pos_inds = torch.nonzero(
+            (labels >= 0) & (labels < self.num_classes), as_tuple=False
+        ).squeeze(1)
+
+        if len(pos_inds) > 0:
+            weight_targets = cls_preds[pos_inds].detach().sigmoid().max(dim=1)[0]
+            bbox_avg_factor = max(reduce_mean(weight_targets.sum()).item(), 1.0)
+
+            loss_bbox = self.loss_bbox(
+                decoded_bboxes[pos_inds],
+                bbox_targets[pos_inds],
+                weight=weight_targets,
+                avg_factor=bbox_avg_factor,
+            )
+
+            dist_targets = torch.cat(dist_targets, dim=0)
+            loss_dfl = self.loss_dfl(
+                reg_preds[pos_inds].reshape(-1, self.reg_max + 1),
+                dist_targets[pos_inds].reshape(-1),
+                weight=weight_targets[:, None].expand(-1, 4).reshape(-1),
+                avg_factor=4.0 * bbox_avg_factor,
+            )
+        else:
+            loss_bbox = reg_preds.sum() * 0
+            loss_dfl = reg_preds.sum() * 0
+
+        loss = loss_qfl + loss_bbox + loss_dfl
+        loss_states = dict(loss_qfl=loss_qfl, loss_bbox=loss_bbox, loss_dfl=loss_dfl)
+        return loss, loss_states
+
+    @torch.no_grad()
+    def target_assign_single_img(
+        self, cls_preds, center_priors, decoded_bboxes, gt_bboxes, gt_labels
+    ):
+        """Compute classification, regression, and objectness targets for
+        priors in a single image.
+        Args:
+            cls_preds (Tensor): Classification predictions of one image,
+                a 2D-Tensor with shape [num_priors, num_classes]
+            center_priors (Tensor): All priors of one image, a 2D-Tensor with
+                shape [num_priors, 4] in [cx, xy, stride_w, stride_y] format.
+            decoded_bboxes (Tensor): Decoded bboxes predictions of one image,
+                a 2D-Tensor with shape [num_priors, 4] in [tl_x, tl_y,
+                br_x, br_y] format.
+            gt_bboxes (Tensor): Ground truth bboxes of one image, a 2D-Tensor
+                with shape [num_gts, 4] in [tl_x, tl_y, br_x, br_y] format.
+            gt_labels (Tensor): Ground truth labels of one image, a Tensor
+                with shape [num_gts].
+        """
+
+        num_priors = center_priors.size(0)
+        device = center_priors.device
+        gt_bboxes = torch.from_numpy(gt_bboxes).to(device)
+        gt_labels = torch.from_numpy(gt_labels).to(device)
+        num_gts = gt_labels.size(0)
+        gt_bboxes = gt_bboxes.to(decoded_bboxes.dtype)
+
+        bbox_targets = torch.zeros_like(center_priors)
+        dist_targets = torch.zeros_like(center_priors)
+        labels = center_priors.new_full(
+            (num_priors,), self.num_classes, dtype=torch.long
+        )
+        label_scores = center_priors.new_zeros(labels.shape, dtype=torch.float)
+        # No target
+        if num_gts == 0:
+            return labels, label_scores, bbox_targets, dist_targets, 0
+
+        assign_result = self.assigner.assign(
+            cls_preds.sigmoid(), center_priors, decoded_bboxes, gt_bboxes, gt_labels
+        )
+        pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds = self.sample(
+            assign_result, gt_bboxes
+        )
+        num_pos_per_img = pos_inds.size(0)
+        pos_ious = assign_result.max_overlaps[pos_inds]
+
+        if len(pos_inds) > 0:
+            bbox_targets[pos_inds, :] = pos_gt_bboxes
+            dist_targets[pos_inds, :] = bbox2distance(center_priors[pos_inds, :2],
+                                                      pos_gt_bboxes) / center_priors[pos_inds, None, 2]
+            dist_targets = dist_targets.clamp(min=0, max=self.reg_max - 0.1)
+            labels[pos_inds] = gt_labels[pos_assigned_gt_inds]
+            label_scores[pos_inds] = pos_ious
+        return (
+            labels,
+            label_scores,
+            bbox_targets,
+            dist_targets,
+            num_pos_per_img,
+        )
+
+    def sample(self, assign_result, gt_bboxes):
+        """Sample positive and negative bboxes."""
+        pos_inds = (
+            torch.nonzero(assign_result.gt_inds > 0, as_tuple=False)
+            .squeeze(-1)
+            .unique()
+        )
+        neg_inds = (
+            torch.nonzero(assign_result.gt_inds == 0, as_tuple=False)
+            .squeeze(-1)
+            .unique()
+        )
+        pos_assigned_gt_inds = assign_result.gt_inds[pos_inds] - 1
+
+        if gt_bboxes.numel() == 0:
+            # hack for index error case
+            assert pos_assigned_gt_inds.numel() == 0
+            pos_gt_bboxes = torch.empty_like(gt_bboxes).view(-1, 4)
+        else:
+            if len(gt_bboxes.shape) < 2:
+                gt_bboxes = gt_bboxes.view(-1, 4)
+            pos_gt_bboxes = gt_bboxes[pos_assigned_gt_inds, :]
+        return pos_inds, neg_inds, pos_gt_bboxes, pos_assigned_gt_inds
+
+    def post_process(self, preds, meta):
+        """Prediction results post processing. Decode bboxes and rescale
+        to original image size.
+        Args:
+            preds (Tensor): Prediction output.
+            meta (dict): Meta info.
+        """
+        cls_scores, bbox_preds = preds.split(
+            [self.num_classes, 4 * (self.reg_max + 1)], dim=-1
+        )
+        result_list = self.get_bboxes(cls_scores, bbox_preds, meta)
+        det_results = {}
+        warp_matrixes = (
+            meta["warp_matrix"]
+            if isinstance(meta["warp_matrix"], list)
+            else meta["warp_matrix"]
+        )
+        img_heights = (
+            meta["img_info"]["height"].cpu().numpy()
+            if isinstance(meta["img_info"]["height"], torch.Tensor)
+            else meta["img_info"]["height"]
+        )
+        img_widths = (
+            meta["img_info"]["width"].cpu().numpy()
+            if isinstance(meta["img_info"]["width"], torch.Tensor)
+            else meta["img_info"]["width"]
+        )
+        img_ids = (
+            meta["img_info"]["id"].cpu().numpy()
+            if isinstance(meta["img_info"]["id"], torch.Tensor)
+            else meta["img_info"]["id"]
+        )
+
+        for result, img_width, img_height, img_id, warp_matrix in zip(
+            result_list, img_widths, img_heights, img_ids, warp_matrixes
+        ):
+            det_result = {}
+            det_bboxes, det_labels = result
+            det_bboxes = det_bboxes.detach().cpu().numpy()
+            det_bboxes[:, :4] = warp_boxes(
+                det_bboxes[:, :4], np.linalg.inv(warp_matrix), img_width, img_height
+            )
+            classes = det_labels.detach().cpu().numpy()
+            for i in range(self.num_classes):
+                inds = classes == i
+                det_result[i] = np.concatenate(
+                    [
+                        det_bboxes[inds, :4].astype(np.float32),
+                        det_bboxes[inds, 4:5].astype(np.float32),
+                    ],
+                    axis=1,
+                ).tolist()
+            det_results[img_id] = det_result
+        return det_results
+
+    def get_bboxes(self, cls_preds, reg_preds, img_metas):
+        """Decode the outputs to bboxes.
+        Args:
+            cls_preds (Tensor): Shape (num_imgs, num_points, num_classes).
+            reg_preds (Tensor): Shape (num_imgs, num_points, 4 * (regmax + 1)).
+            img_metas (dict): Dict of image info.
+
+        Returns:
+            results_list (list[tuple]): List of detection bboxes and labels.
+        """
+        device = cls_preds.device
+        b = cls_preds.shape[0]
+        input_height, input_width = img_metas["img"].shape[2:]
+        input_shape = (input_height, input_width)
+
+        featmap_sizes = [
+            (math.ceil(input_height / stride), math.ceil(input_width) / stride)
+            for stride in self.strides
+        ]
+        # get grid cells of one image
+        mlvl_center_priors = [
+            self.get_single_level_center_priors(
+                b,
+                featmap_sizes[i],
+                stride,
+                dtype=torch.float32,
+                device=device,
+            )
+            for i, stride in enumerate(self.strides)
+        ]
+        center_priors = torch.cat(mlvl_center_priors, dim=1)
+        dis_preds = self.distribution_project(reg_preds) * center_priors[..., 2, None]
+        bboxes = distance2bbox(center_priors[..., :2], dis_preds, max_shape=input_shape)
+        scores = cls_preds.sigmoid()
+        result_list = []
+        for i in range(b):
+            # add a dummy background class at the end of all labels
+            # same with mmdetection2.0
+            score, bbox = scores[i], bboxes[i]
+            padding = score.new_zeros(score.shape[0], 1)
+            score = torch.cat([score, padding], dim=1)
+            results = multiclass_nms(
+                bbox,
+                score,
+                score_thr=0.05,
+                nms_cfg=dict(type="nms", iou_threshold=0.6),
+                max_num=100,
+            )
+            result_list.append(results)
+        return result_list
+
+    def get_single_level_center_priors(
+        self, batch_size, featmap_size, stride, dtype, device
+    ):
+        """Generate centers of a single stage feature map.
+        Args:
+            batch_size (int): Number of images in one batch.
+            featmap_size (tuple[int]): height and width of the feature map
+            stride (int): down sample stride of the feature map
+            dtype (obj:`torch.dtype`): data type of the tensors
+            device (obj:`torch.device`): device of the tensors
+        Return:
+            priors (Tensor): center priors of a single level feature map.
+        """
+        h, w = featmap_size
+        x_range = (torch.arange(w, dtype=dtype, device=device)) * stride
+        y_range = (torch.arange(h, dtype=dtype, device=device)) * stride
+        y, x = torch.meshgrid(y_range, x_range)
+        y = y.flatten()
+        x = x.flatten()
+        strides = x.new_full((x.shape[0],), stride)
+        proiors = torch.stack([x, y, strides, strides], dim=-1)
+        return proiors.unsqueeze(0).repeat(batch_size, 1, 1)
+
+    def _forward_onnx(self, feats):
+        """only used for onnx export"""
+        outputs = []
+        for feat, cls_convs, gfl_cls in zip(
+            feats,
+            self.cls_convs,
+            self.gfl_cls,
+        ):
+            for conv in cls_convs:
+                feat = conv(feat)
+            output = gfl_cls(feat)
+            cls_pred, reg_pred = output.split(
+                [self.num_classes, 4 * (self.reg_max + 1)], dim=1
+            )
+            cls_pred = cls_pred.sigmoid()
+            out = torch.cat([cls_pred, reg_pred], dim=1)
+            outputs.append(out.flatten(start_dim=2))
+        return torch.cat(outputs, dim=2).permute(0, 2, 1)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/simple_conv_head.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/simple_conv_head.py
new file mode 100644
index 0000000000000000000000000000000000000000..b3d4d95ff7c08fc0b9656dafd33bcf7b2e1de237
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/head/simple_conv_head.py
@@ -0,0 +1,100 @@
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights import normal_init
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.scale import Scale
+
+
+class SimpleConvHead(nn.Module):
+    def __init__(
+        self,
+        num_classes,
+        input_channel,
+        feat_channels=256,
+        stacked_convs=4,
+        strides=[8, 16, 32],
+        conv_cfg=None,
+        norm_cfg=dict(type="GN", num_groups=32, requires_grad=True),
+        activation="LeakyReLU",
+        reg_max=16,
+        **kwargs
+    ):
+        super(SimpleConvHead, self).__init__()
+        self.num_classes = num_classes
+        self.in_channels = input_channel
+        self.feat_channels = feat_channels
+        self.stacked_convs = stacked_convs
+        self.strides = strides
+        self.reg_max = reg_max
+
+        self.conv_cfg = conv_cfg
+        self.norm_cfg = norm_cfg
+        self.activation = activation
+        self.cls_out_channels = num_classes
+
+        self._init_layers()
+        self.init_weights()
+
+    def _init_layers(self):
+        self.relu = nn.ReLU(inplace=True)
+        self.cls_convs = nn.ModuleList()
+        self.reg_convs = nn.ModuleList()
+        for i in range(self.stacked_convs):
+            chn = self.in_channels if i == 0 else self.feat_channels
+            self.cls_convs.append(
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=self.conv_cfg,
+                    norm_cfg=self.norm_cfg,
+                    activation=self.activation,
+                )
+            )
+            self.reg_convs.append(
+                ConvModule(
+                    chn,
+                    self.feat_channels,
+                    3,
+                    stride=1,
+                    padding=1,
+                    conv_cfg=self.conv_cfg,
+                    norm_cfg=self.norm_cfg,
+                    activation=self.activation,
+                )
+            )
+        self.gfl_cls = nn.Conv2d(
+            self.feat_channels, self.cls_out_channels, 3, padding=1
+        )
+        self.gfl_reg = nn.Conv2d(
+            self.feat_channels, 4 * (self.reg_max + 1), 3, padding=1
+        )
+        self.scales = nn.ModuleList([Scale(1.0) for _ in self.strides])
+
+    def init_weights(self):
+        for m in self.cls_convs:
+            normal_init(m.conv, std=0.01)
+        for m in self.reg_convs:
+            normal_init(m.conv, std=0.01)
+        bias_cls = -4.595
+        normal_init(self.gfl_cls, std=0.01, bias=bias_cls)
+        normal_init(self.gfl_reg, std=0.01)
+
+    def forward(self, feats):
+        outputs = []
+        for x, scale in zip(feats, self.scales):
+            cls_feat = x
+            reg_feat = x
+            for cls_conv in self.cls_convs:
+                cls_feat = cls_conv(cls_feat)
+            for reg_conv in self.reg_convs:
+                reg_feat = reg_conv(reg_feat)
+            cls_score = self.gfl_cls(cls_feat)
+            bbox_pred = scale(self.gfl_reg(reg_feat)).float()
+            output = torch.cat([cls_score, bbox_pred], dim=1)
+            outputs.append(output.flatten(start_dim=2))
+        outputs = torch.cat(outputs, dim=2).permute(0, 2, 1)
+        return outputs
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/gfocal_loss.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/gfocal_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..af0b4251c2570e1486494476d9bca560d854047a
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/gfocal_loss.py
@@ -0,0 +1,178 @@
+import torch
+import torch.nn as nn
+import torch.nn.functional as F
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.utils import weighted_loss
+
+
+@weighted_loss
+def quality_focal_loss(pred, target, beta=2.0):
+    r"""Quality Focal Loss (QFL) is from `Generalized Focal Loss: Learning
+    Qualified and Distributed Bounding Boxes for Dense Object Detection
+    <https://arxiv.org/abs/2006.04388>`_.
+
+    Args:
+        pred (torch.Tensor): Predicted joint representation of classification
+            and quality (IoU) estimation with shape (N, C), C is the number of
+            classes.
+        target (tuple([torch.Tensor])): Target category label with shape (N,)
+            and target quality label with shape (N,).
+        beta (float): The beta parameter for calculating the modulating factor.
+            Defaults to 2.0.
+
+    Returns:
+        torch.Tensor: Loss tensor with shape (N,).
+    """
+    assert (
+        len(target) == 2
+    ), """target for QFL must be a tuple of two elements,
+        including category label and quality label, respectively"""
+    # label denotes the category id, score denotes the quality score
+    label, score = target
+
+    # negatives are supervised by 0 quality score
+    pred_sigmoid = pred.sigmoid()
+    scale_factor = pred_sigmoid
+    zerolabel = scale_factor.new_zeros(pred.shape)
+    loss = F.binary_cross_entropy_with_logits(
+        pred, zerolabel, reduction="none"
+    ) * scale_factor.pow(beta)
+
+    # FG cat_id: [0, num_classes -1], BG cat_id: num_classes
+    bg_class_ind = pred.size(1)
+    pos = torch.nonzero((label >= 0) & (label < bg_class_ind), as_tuple=False).squeeze(
+        1
+    )
+    pos_label = label[pos].long()
+    # positives are supervised by bbox quality (IoU) score
+    scale_factor = score[pos] - pred_sigmoid[pos, pos_label]
+    loss[pos, pos_label] = F.binary_cross_entropy_with_logits(
+        pred[pos, pos_label], score[pos], reduction="none"
+    ) * scale_factor.abs().pow(beta)
+
+    loss = loss.sum(dim=1, keepdim=False)
+    return loss
+
+
+@weighted_loss
+def distribution_focal_loss(pred, label):
+    r"""Distribution Focal Loss (DFL) is from `Generalized Focal Loss: Learning
+    Qualified and Distributed Bounding Boxes for Dense Object Detection
+    <https://arxiv.org/abs/2006.04388>`_.
+
+    Args:
+        pred (torch.Tensor): Predicted general distribution of bounding boxes
+            (before softmax) with shape (N, n+1), n is the max value of the
+            integral set `{0, ..., n}` in paper.
+        label (torch.Tensor): Target distance label for bounding boxes with
+            shape (N,).
+
+    Returns:
+        torch.Tensor: Loss tensor with shape (N,).
+    """
+    dis_left = label.long()
+    dis_right = dis_left + 1
+    weight_left = dis_right.float() - label
+    weight_right = label - dis_left.float()
+    loss = F.cross_entropy(pred, dis_left, reduction="none") * weight_left + \
+        F.cross_entropy(pred, dis_right, reduction="none") * weight_right
+    return loss
+
+
+class QualityFocalLoss(nn.Module):
+    r"""Quality Focal Loss (QFL) is a variant of `Generalized Focal Loss:
+    Learning Qualified and Distributed Bounding Boxes for Dense Object
+    Detection <https://arxiv.org/abs/2006.04388>`_.
+
+    Args:
+        use_sigmoid (bool): Whether sigmoid operation is conducted in QFL.
+            Defaults to True.
+        beta (float): The beta parameter for calculating the modulating factor.
+            Defaults to 2.0.
+        reduction (str): Options are "none", "mean" and "sum".
+        loss_weight (float): Loss weight of current loss.
+    """
+
+    def __init__(self, use_sigmoid=True, beta=2.0, reduction="mean", loss_weight=1.0):
+        super(QualityFocalLoss, self).__init__()
+        assert use_sigmoid is True, "Only sigmoid in QFL supported now."
+        self.use_sigmoid = use_sigmoid
+        self.beta = beta
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self, pred, target, weight=None, avg_factor=None, reduction_override=None
+    ):
+        """Forward function.
+
+        Args:
+            pred (torch.Tensor): Predicted joint representation of
+                classification and quality (IoU) estimation with shape (N, C),
+                C is the number of classes.
+            target (tuple([torch.Tensor])): Target category label with shape
+                (N,) and target quality label with shape (N,).
+            weight (torch.Tensor, optional): The weight of loss for each
+                prediction. Defaults to None.
+            avg_factor (int, optional): Average factor that is used to average
+                the loss. Defaults to None.
+            reduction_override (str, optional): The reduction method used to
+                override the original reduction method of the loss.
+                Defaults to None.
+        """
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        if self.use_sigmoid:
+            loss_cls = self.loss_weight * quality_focal_loss(
+                pred,
+                target,
+                weight,
+                beta=self.beta,
+                reduction=reduction,
+                avg_factor=avg_factor,
+            )
+        else:
+            raise NotImplementedError
+        return loss_cls
+
+
+class DistributionFocalLoss(nn.Module):
+    r"""Distribution Focal Loss (DFL) is a variant of `Generalized Focal Loss:
+    Learning Qualified and Distributed Bounding Boxes for Dense Object
+    Detection <https://arxiv.org/abs/2006.04388>`_.
+
+    Args:
+        reduction (str): Options are `'none'`, `'mean'` and `'sum'`.
+        loss_weight (float): Loss weight of current loss.
+    """
+
+    def __init__(self, reduction="mean", loss_weight=1.0):
+        super(DistributionFocalLoss, self).__init__()
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self, pred, target, weight=None, avg_factor=None, reduction_override=None
+    ):
+        """Forward function.
+
+        Args:
+            pred (torch.Tensor): Predicted general distribution of bounding
+                boxes (before softmax) with shape (N, n+1), n is the max value
+                of the integral set `{0, ..., n}` in paper.
+            target (torch.Tensor): Target distance label for bounding boxes
+                with shape (N,).
+            weight (torch.Tensor, optional): The weight of loss for each
+                prediction. Defaults to None.
+            avg_factor (int, optional): Average factor that is used to average
+                the loss. Defaults to None.
+            reduction_override (str, optional): The reduction method used to
+                override the original reduction method of the loss.
+                Defaults to None.
+        """
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        loss_cls = self.loss_weight * distribution_focal_loss(
+            pred, target, weight, reduction=reduction, avg_factor=avg_factor
+        )
+        return loss_cls
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/iou_loss.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/iou_loss.py
new file mode 100644
index 0000000000000000000000000000000000000000..7ee9d324a3485674122b3b1ee84091d7d1bce0b6
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/iou_loss.py
@@ -0,0 +1,544 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import math
+
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.loss.utils import weighted_loss
+
+
+def bbox_overlaps(bboxes1, bboxes2, mode="iou", is_aligned=False, eps=1e-6):
+    """Calculate overlap between two set of bboxes.
+
+    If ``is_aligned `` is ``False``, then calculate the overlaps between each
+    bbox of bboxes1 and bboxes2, otherwise the overlaps between each aligned
+    pair of bboxes1 and bboxes2.
+
+    Args:
+        bboxes1 (Tensor): shape (B, m, 4) in <x1, y1, x2, y2> format or empty.
+        bboxes2 (Tensor): shape (B, n, 4) in <x1, y1, x2, y2> format or empty.
+            B indicates the batch dim, in shape (B1, B2, ..., Bn).
+            If ``is_aligned `` is ``True``, then m and n must be equal.
+        mode (str): "iou" (intersection over union) or "iof" (intersection over
+            foreground).
+        is_aligned (bool, optional): If True, then m and n must be equal.
+            Default False.
+        eps (float, optional): A value added to the denominator for numerical
+            stability. Default 1e-6.
+
+    Returns:
+        Tensor: shape (m, n) if ``is_aligned `` is False else shape (m,)
+
+    Example:
+        >>> bboxes1 = torch.FloatTensor([
+        >>>     [0, 0, 10, 10],
+        >>>     [10, 10, 20, 20],
+        >>>     [32, 32, 38, 42],
+        >>> ])
+        >>> bboxes2 = torch.FloatTensor([
+        >>>     [0, 0, 10, 20],
+        >>>     [0, 10, 10, 19],
+        >>>     [10, 10, 20, 20],
+        >>> ])
+        >>> bbox_overlaps(bboxes1, bboxes2)
+        tensor([[0.5000, 0.0000, 0.0000],
+                [0.0000, 0.0000, 1.0000],
+                [0.0000, 0.0000, 0.0000]])
+        >>> bbox_overlaps(bboxes1, bboxes2, mode='giou', eps=1e-7)
+        tensor([[0.5000, 0.0000, -0.5000],
+                [-0.2500, -0.0500, 1.0000],
+                [-0.8371, -0.8766, -0.8214]])
+
+    Example:
+        >>> empty = torch.FloatTensor([])
+        >>> nonempty = torch.FloatTensor([
+        >>>     [0, 0, 10, 9],
+        >>> ])
+        >>> assert tuple(bbox_overlaps(empty, nonempty).shape) == (0, 1)
+        >>> assert tuple(bbox_overlaps(nonempty, empty).shape) == (1, 0)
+        >>> assert tuple(bbox_overlaps(empty, empty).shape) == (0, 0)
+    """
+
+    assert mode in ["iou", "iof", "giou"], f"Unsupported mode {mode}"
+    # Either the boxes are empty or the length of boxes's last dimenstion is 4
+    assert bboxes1.size(-1) == 4 or bboxes1.size(0) == 0
+    assert bboxes2.size(-1) == 4 or bboxes2.size(0) == 0
+
+    # Batch dim must be the same
+    # Batch dim: (B1, B2, ... Bn)
+    assert bboxes1.shape[:-2] == bboxes2.shape[:-2]
+    batch_shape = bboxes1.shape[:-2]
+
+    rows = bboxes1.size(-2)
+    cols = bboxes2.size(-2)
+    if is_aligned:
+        assert rows == cols
+
+    if rows * cols == 0:
+        if is_aligned:
+            return bboxes1.new(batch_shape + (rows,))
+        else:
+            return bboxes1.new(batch_shape + (rows, cols))
+
+    area1 = (bboxes1[..., 2] - bboxes1[..., 0]) * (bboxes1[..., 3] - bboxes1[..., 1])
+    area2 = (bboxes2[..., 2] - bboxes2[..., 0]) * (bboxes2[..., 3] - bboxes2[..., 1])
+
+    if is_aligned:
+        lt = torch.max(bboxes1[..., :2], bboxes2[..., :2])  # [B, rows, 2]
+        rb = torch.min(bboxes1[..., 2:], bboxes2[..., 2:])  # [B, rows, 2]
+
+        wh = (rb - lt).clamp(min=0)  # [B, rows, 2]
+        overlap = wh[..., 0] * wh[..., 1]
+
+        if mode in ["iou", "giou"]:
+            union = area1 + area2 - overlap
+        else:
+            union = area1
+        if mode == "giou":
+            enclosed_lt = torch.min(bboxes1[..., :2], bboxes2[..., :2])
+            enclosed_rb = torch.max(bboxes1[..., 2:], bboxes2[..., 2:])
+    else:
+        lt = torch.max(
+            bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]
+        )  # [B, rows, cols, 2]
+        rb = torch.min(
+            bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]
+        )  # [B, rows, cols, 2]
+
+        wh = (rb - lt).clamp(min=0)  # [B, rows, cols, 2]
+        overlap = wh[..., 0] * wh[..., 1]
+
+        if mode in ["iou", "giou"]:
+            union = area1[..., None] + area2[..., None, :] - overlap
+        else:
+            union = area1[..., None]
+        if mode == "giou":
+            enclosed_lt = torch.min(
+                bboxes1[..., :, None, :2], bboxes2[..., None, :, :2]
+            )
+            enclosed_rb = torch.max(
+                bboxes1[..., :, None, 2:], bboxes2[..., None, :, 2:]
+            )
+
+    eps = union.new_tensor([eps])
+    union = torch.max(union, eps)
+    ious = overlap / union
+    if mode in ["iou", "iof"]:
+        return ious
+    # calculate gious
+    enclose_wh = (enclosed_rb - enclosed_lt).clamp(min=0)
+    enclose_area = enclose_wh[..., 0] * enclose_wh[..., 1]
+    enclose_area = torch.max(enclose_area, eps)
+    gious = ious - (enclose_area - union) / enclose_area
+    return gious
+
+
+@weighted_loss
+def iou_loss(pred, target, eps=1e-6):
+    """IoU loss.
+
+    Computing the IoU loss between a set of predicted bboxes and target bboxes.
+    The loss is calculated as negative log of IoU.
+
+    Args:
+        pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+            shape (n, 4).
+        target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
+        eps (float): Eps to avoid log(0).
+
+    Return:
+        torch.Tensor: Loss tensor.
+    """
+    ious = bbox_overlaps(pred, target, is_aligned=True).clamp(min=eps)
+    loss = -ious.log()
+    return loss
+
+
+@weighted_loss
+def bounded_iou_loss(pred, target, beta=0.2, eps=1e-3):
+    """BIoULoss.
+
+    This is an implementation of paper
+    `Improving Object Localization with Fitness NMS and Bounded IoU Loss.
+    <https://arxiv.org/abs/1711.00164>`_.
+
+    Args:
+        pred (torch.Tensor): Predicted bboxes.
+        target (torch.Tensor): Target bboxes.
+        beta (float): beta parameter in smoothl1.
+        eps (float): eps to avoid NaN.
+    """
+    pred_ctrx = (pred[:, 0] + pred[:, 2]) * 0.5
+    pred_ctry = (pred[:, 1] + pred[:, 3]) * 0.5
+    pred_w = pred[:, 2] - pred[:, 0]
+    pred_h = pred[:, 3] - pred[:, 1]
+    with torch.no_grad():
+        target_ctrx = (target[:, 0] + target[:, 2]) * 0.5
+        target_ctry = (target[:, 1] + target[:, 3]) * 0.5
+        target_w = target[:, 2] - target[:, 0]
+        target_h = target[:, 3] - target[:, 1]
+
+    dx = target_ctrx - pred_ctrx
+    dy = target_ctry - pred_ctry
+
+    loss_dx = 1 - torch.max(
+        (target_w - 2 * dx.abs()) / (target_w + 2 * dx.abs() + eps),
+        torch.zeros_like(dx),
+    )
+    loss_dy = 1 - torch.max(
+        (target_h - 2 * dy.abs()) / (target_h + 2 * dy.abs() + eps),
+        torch.zeros_like(dy),
+    )
+    loss_dw = 1 - torch.min(target_w / (pred_w + eps), pred_w / (target_w + eps))
+    loss_dh = 1 - torch.min(target_h / (pred_h + eps), pred_h / (target_h + eps))
+    loss_comb = torch.stack([loss_dx, loss_dy, loss_dw, loss_dh], dim=-1).view(
+        loss_dx.size(0), -1
+    )
+
+    loss = torch.where(
+        loss_comb < beta, 0.5 * loss_comb * loss_comb / beta, loss_comb - 0.5 * beta
+    ).sum(dim=-1)
+    return loss
+
+
+@weighted_loss
+def giou_loss(pred, target, eps=1e-7):
+    r"""`Generalized Intersection over Union: A Metric and A Loss for Bounding
+    Box Regression <https://arxiv.org/abs/1902.09630>`_.
+
+    Args:
+        pred (torch.Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+            shape (n, 4).
+        target (torch.Tensor): Corresponding gt bboxes, shape (n, 4).
+        eps (float): Eps to avoid log(0).
+
+    Return:
+        Tensor: Loss tensor.
+    """
+    gious = bbox_overlaps(pred, target, mode="giou", is_aligned=True, eps=eps)
+    loss = 1 - gious
+    return loss
+
+
+@weighted_loss
+def diou_loss(pred, target, eps=1e-7):
+    r"""`Implementation of Distance-IoU Loss: Faster and Better
+    Learning for Bounding Box Regression, https://arxiv.org/abs/1911.08287`_.
+
+    Code is modified from https://github.com/Zzh-tju/DIoU.
+
+    Args:
+        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+            shape (n, 4).
+        target (Tensor): Corresponding gt bboxes, shape (n, 4).
+        eps (float): Eps to avoid log(0).
+    Return:
+        Tensor: Loss tensor.
+    """
+    # overlap
+    lt = torch.max(pred[:, :2], target[:, :2])
+    rb = torch.min(pred[:, 2:], target[:, 2:])
+    wh = (rb - lt).clamp(min=0)
+    overlap = wh[:, 0] * wh[:, 1]
+
+    # union
+    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
+    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
+    union = ap + ag - overlap + eps
+
+    # IoU
+    ious = overlap / union
+
+    # enclose area
+    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
+    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
+    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
+
+    cw = enclose_wh[:, 0]
+    ch = enclose_wh[:, 1]
+
+    c2 = cw ** 2 + ch ** 2 + eps
+
+    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
+    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
+    b2_x1, b2_y1 = target[:, 0], target[:, 1]
+    b2_x2, b2_y2 = target[:, 2], target[:, 3]
+
+    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4
+    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
+    rho2 = left + right
+
+    # DIoU
+    dious = ious - rho2 / c2
+    loss = 1 - dious
+    return loss
+
+
+@weighted_loss
+def ciou_loss(pred, target, eps=1e-7):
+    r"""`Implementation of paper `Enhancing Geometric Factors into
+    Model Learning and Inference for Object Detection and Instance
+    Segmentation <https://arxiv.org/abs/2005.03572>`_.
+
+    Code is modified from https://github.com/Zzh-tju/CIoU.
+
+    Args:
+        pred (Tensor): Predicted bboxes of format (x1, y1, x2, y2),
+            shape (n, 4).
+        target (Tensor): Corresponding gt bboxes, shape (n, 4).
+        eps (float): Eps to avoid log(0).
+    Return:
+        Tensor: Loss tensor.
+    """
+    # overlap
+    lt = torch.max(pred[:, :2], target[:, :2])
+    rb = torch.min(pred[:, 2:], target[:, 2:])
+    wh = (rb - lt).clamp(min=0)
+    overlap = wh[:, 0] * wh[:, 1]
+
+    # union
+    ap = (pred[:, 2] - pred[:, 0]) * (pred[:, 3] - pred[:, 1])
+    ag = (target[:, 2] - target[:, 0]) * (target[:, 3] - target[:, 1])
+    union = ap + ag - overlap + eps
+
+    # IoU
+    ious = overlap / union
+
+    # enclose area
+    enclose_x1y1 = torch.min(pred[:, :2], target[:, :2])
+    enclose_x2y2 = torch.max(pred[:, 2:], target[:, 2:])
+    enclose_wh = (enclose_x2y2 - enclose_x1y1).clamp(min=0)
+
+    cw = enclose_wh[:, 0]
+    ch = enclose_wh[:, 1]
+
+    c2 = cw ** 2 + ch ** 2 + eps
+
+    b1_x1, b1_y1 = pred[:, 0], pred[:, 1]
+    b1_x2, b1_y2 = pred[:, 2], pred[:, 3]
+    b2_x1, b2_y1 = target[:, 0], target[:, 1]
+    b2_x2, b2_y2 = target[:, 2], target[:, 3]
+
+    w1, h1 = b1_x2 - b1_x1, b1_y2 - b1_y1 + eps
+    w2, h2 = b2_x2 - b2_x1, b2_y2 - b2_y1 + eps
+
+    left = ((b2_x1 + b2_x2) - (b1_x1 + b1_x2)) ** 2 / 4
+    right = ((b2_y1 + b2_y2) - (b1_y1 + b1_y2)) ** 2 / 4
+    rho2 = left + right
+
+    factor = 4 / math.pi ** 2
+    v = factor * torch.pow(torch.atan(w2 / h2) - torch.atan(w1 / h1), 2)
+
+    # CIoU
+    cious = ious - (rho2 / c2 + v ** 2 / (1 - ious + v))
+    loss = 1 - cious
+    return loss
+
+
+class IoULoss(nn.Module):
+    """IoULoss.
+
+    Computing the IoU loss between a set of predicted bboxes and target bboxes.
+
+    Args:
+        eps (float): Eps to avoid log(0).
+        reduction (str): Options are "none", "mean" and "sum".
+        loss_weight (float): Weight of loss.
+    """
+
+    def __init__(self, eps=1e-6, reduction="mean", loss_weight=1.0):
+        super(IoULoss, self).__init__()
+        self.eps = eps
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self,
+        pred,
+        target,
+        weight=None,
+        avg_factor=None,
+        reduction_override=None,
+        **kwargs,
+    ):
+        """Forward function.
+
+        Args:
+            pred (torch.Tensor): The prediction.
+            target (torch.Tensor): The learning target of the prediction.
+            weight (torch.Tensor, optional): The weight of loss for each
+                prediction. Defaults to None.
+            avg_factor (int, optional): Average factor that is used to average
+                the loss. Defaults to None.
+            reduction_override (str, optional): The reduction method used to
+                override the original reduction method of the loss.
+                Defaults to None. Options are "none", "mean" and "sum".
+        """
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        if (weight is not None) and (not torch.any(weight > 0)) and (reduction != "none"):
+            if pred.dim() == weight.dim() + 1:
+                weight = weight.unsqueeze(1)
+            return (pred * weight).sum()  # 0
+        loss = self.loss_weight * iou_loss(
+            pred,
+            target,
+            weight,
+            eps=self.eps,
+            reduction=reduction,
+            avg_factor=avg_factor,
+            **kwargs,
+        )
+        return loss
+
+
+class BoundedIoULoss(nn.Module):
+    def __init__(self, beta=0.2, eps=1e-3, reduction="mean", loss_weight=1.0):
+        super(BoundedIoULoss, self).__init__()
+        self.beta = beta
+        self.eps = eps
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self,
+        pred,
+        target,
+        weight=None,
+        avg_factor=None,
+        reduction_override=None,
+        **kwargs,
+    ):
+        if weight is not None and not torch.any(weight > 0):
+            if pred.dim() == weight.dim() + 1:
+                weight = weight.unsqueeze(1)
+            return (pred * weight).sum()  # 0
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        loss = self.loss_weight * bounded_iou_loss(
+            pred,
+            target,
+            weight,
+            beta=self.beta,
+            eps=self.eps,
+            reduction=reduction,
+            avg_factor=avg_factor,
+            **kwargs,
+        )
+        return loss
+
+
+class GIoULoss(nn.Module):
+    def __init__(self, eps=1e-6, reduction="mean", loss_weight=1.0):
+        super(GIoULoss, self).__init__()
+        self.eps = eps
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self,
+        pred,
+        target,
+        weight=None,
+        avg_factor=None,
+        reduction_override=None,
+        **kwargs,
+    ):
+        if weight is not None and not torch.any(weight > 0):
+            if pred.dim() == weight.dim() + 1:
+                weight = weight.unsqueeze(1)
+            return (pred * weight).sum()  # 0
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        loss = self.loss_weight * giou_loss(
+            pred,
+            target,
+            weight,
+            eps=self.eps,
+            reduction=reduction,
+            avg_factor=avg_factor,
+            **kwargs,
+        )
+        return loss
+
+
+class DIoULoss(nn.Module):
+    def __init__(self, eps=1e-6, reduction="mean", loss_weight=1.0):
+        super(DIoULoss, self).__init__()
+        self.eps = eps
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self,
+        pred,
+        target,
+        weight=None,
+        avg_factor=None,
+        reduction_override=None,
+        **kwargs,
+    ):
+        if weight is not None and not torch.any(weight > 0):
+            if pred.dim() == weight.dim() + 1:
+                weight = weight.unsqueeze(1)
+            return (pred * weight).sum()  # 0
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        loss = self.loss_weight * diou_loss(
+            pred,
+            target,
+            weight,
+            eps=self.eps,
+            reduction=reduction,
+            avg_factor=avg_factor,
+            **kwargs,
+        )
+        return loss
+
+
+class CIoULoss(nn.Module):
+    def __init__(self, eps=1e-6, reduction="mean", loss_weight=1.0):
+        super(CIoULoss, self).__init__()
+        self.eps = eps
+        self.reduction = reduction
+        self.loss_weight = loss_weight
+
+    def forward(
+        self,
+        pred,
+        target,
+        weight=None,
+        avg_factor=None,
+        reduction_override=None,
+        **kwargs,
+    ):
+        if weight is not None and not torch.any(weight > 0):
+            if pred.dim() == weight.dim() + 1:
+                weight = weight.unsqueeze(1)
+            return (pred * weight).sum()  # 0
+        assert reduction_override in (None, "none", "mean", "sum")
+        reduction = reduction_override if reduction_override else self.reduction
+        loss = self.loss_weight * ciou_loss(
+            pred,
+            target,
+            weight,
+            eps=self.eps,
+            reduction=reduction,
+            avg_factor=avg_factor,
+            **kwargs,
+        )
+        return loss
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/utils.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/utils.py
new file mode 100644
index 0000000000000000000000000000000000000000..f8bae7d5f795825c110224ae65d7489f9915cdd2
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/loss/utils.py
@@ -0,0 +1,93 @@
+import functools
+
+import torch.nn.functional as F
+
+
+def reduce_loss(loss, reduction):
+    """Reduce loss as specified.
+
+    Args:
+        loss (Tensor): Elementwise loss tensor.
+        reduction (str): Options are "none", "mean" and "sum".
+
+    Return:
+        Tensor: Reduced loss tensor.
+    """
+    reduction_enum = F._Reduction.get_enum(reduction)
+    # none: 0, elementwise_mean:1, sum: 2
+    if reduction_enum == 0:
+        return loss
+    elif reduction_enum == 1:
+        return loss.mean()
+    elif reduction_enum == 2:
+        return loss.sum()
+
+
+def weight_reduce_loss(loss, weight=None, reduction="mean", avg_factor=None):
+    """Apply element-wise weight and reduce loss.
+
+    Args:
+        loss (Tensor): Element-wise loss.
+        weight (Tensor): Element-wise weights.
+        reduction (str): Same as built-in losses of PyTorch.
+        avg_factor (float): Avarage factor when computing the mean of losses.
+
+    Returns:
+        Tensor: Processed loss values.
+    """
+    # if weight is specified, apply element-wise weight
+    if weight is not None:
+        loss = loss * weight
+
+    # if avg_factor is not specified, just reduce the loss
+    if avg_factor is None:
+        loss = reduce_loss(loss, reduction)
+    else:
+        # if reduction is mean, then average the loss by avg_factor
+        if reduction == "mean":
+            loss = loss.sum() / avg_factor
+        # if reduction is 'none', then do nothing, otherwise raise an error
+        elif reduction != "none":
+            raise ValueError('avg_factor can not be used with reduction="sum"')
+    return loss
+
+
+def weighted_loss(loss_func):
+    """Create a weighted version of a given loss function.
+
+    To use this decorator, the loss function must have the signature like
+    `loss_func(pred, target, **kwargs)`. The function only needs to compute
+    element-wise loss without any reduction. This decorator will add weight
+    and reduction arguments to the function. The decorated function will have
+    the signature like `loss_func(pred, target, weight=None, reduction='mean',
+    avg_factor=None, **kwargs)`.
+
+    :Example:
+
+    >>> import torch
+    >>> @weighted_loss
+    >>> def l1_loss(pred, target):
+    >>>     return (pred - target).abs()
+
+    >>> pred = torch.Tensor([0, 2, 3])
+    >>> target = torch.Tensor([1, 1, 1])
+    >>> weight = torch.Tensor([1, 0, 1])
+
+    >>> l1_loss(pred, target)
+    tensor(1.3333)
+    >>> l1_loss(pred, target, weight)
+    tensor(1.)
+    >>> l1_loss(pred, target, reduction='none')
+    tensor([1., 1., 2.])
+    >>> l1_loss(pred, target, weight, avg_factor=2)
+    tensor(1.5000)
+    """
+
+    @functools.wraps(loss_func)
+    def wrapper(pred, target, weight=None, reduction="mean", avg_factor=None, **kwargs):
+        # get element-wise loss
+        loss = loss_func(pred, target, **kwargs)
+        loss = weight_reduce_loss(loss, weight, reduction, avg_factor)
+        return loss
+
+    return wrapper
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/activation.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/activation.py
new file mode 100644
index 0000000000000000000000000000000000000000..8047fc81ce9590309aa358d9b2d445f981458656
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/activation.py
@@ -0,0 +1,41 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch.nn as nn
+
+activations = {
+    "ReLU": nn.ReLU,
+    "LeakyReLU": nn.LeakyReLU,
+    "ReLU6": nn.ReLU6,
+    "SELU": nn.SELU,
+    "ELU": nn.ELU,
+    "GELU": nn.GELU,
+    "PReLU": nn.PReLU,
+    "SiLU": nn.SiLU,
+    "HardSwish": nn.Hardswish,
+    "Hardswish": nn.Hardswish,
+    None: nn.Identity,
+}
+
+
+def act_layers(name):
+    assert name in activations.keys()
+    if name == "LeakyReLU":
+        return nn.LeakyReLU(negative_slope=0.1, inplace=True)
+    elif name == "GELU":
+        return nn.GELU()
+    elif name == "PReLU":
+        return nn.PReLU()
+    else:
+        return activations[name](inplace=True)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/conv.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/conv.py
new file mode 100644
index 0000000000000000000000000000000000000000..693e6fd0feec5b386ffa64d912d6dda14b31ccec
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/conv.py
@@ -0,0 +1,393 @@
+"""
+ConvModule refers from MMDetection
+RepVGGConvModule refers from RepVGG: Making VGG-style ConvNets Great Again
+"""
+import warnings
+
+import numpy as np
+import torch
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.init_weights\
+    import constant_init, kaiming_init
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.norm import build_norm_layer
+
+
+class ConvModule(nn.Module):
+    """A conv block that contains conv/norm/activation layers.
+
+    Args:
+        in_channels (int): Same as nn.Conv2d.
+        out_channels (int): Same as nn.Conv2d.
+        kernel_size (int or tuple[int]): Same as nn.Conv2d.
+        stride (int or tuple[int]): Same as nn.Conv2d.
+        padding (int or tuple[int]): Same as nn.Conv2d.
+        dilation (int or tuple[int]): Same as nn.Conv2d.
+        groups (int): Same as nn.Conv2d.
+        bias (bool or str): If specified as `auto`, it will be decided by the
+            norm_cfg. Bias will be set as True if norm_cfg is None, otherwise
+            False.
+        conv_cfg (dict): Config dict for convolution layer.
+        norm_cfg (dict): Config dict for normalization layer.
+        activation (str): activation layer, "ReLU" by default.
+        inplace (bool): Whether to use inplace mode for activation.
+        order (tuple[str]): The order of conv/norm/activation layers. It is a
+            sequence of "conv", "norm" and "act". Examples are
+            ("conv", "norm", "act") and ("act", "conv", "norm").
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        kernel_size,
+        stride=1,
+        padding=0,
+        dilation=1,
+        groups=1,
+        bias="auto",
+        conv_cfg=None,
+        norm_cfg=None,
+        activation="ReLU",
+        inplace=True,
+        order=("conv", "norm", "act"),
+    ):
+        super(ConvModule, self).__init__()
+        assert conv_cfg is None or isinstance(conv_cfg, dict)
+        assert norm_cfg is None or isinstance(norm_cfg, dict)
+        assert activation is None or isinstance(activation, str)
+        self.conv_cfg = conv_cfg
+        self.norm_cfg = norm_cfg
+        self.activation = activation
+        self.inplace = inplace
+        self.order = order
+        assert isinstance(self.order, tuple) and len(self.order) == 3
+        assert set(order) == {"conv", "norm", "act"}
+
+        self.with_norm = norm_cfg is not None
+        # if the conv layer is before a norm layer, bias is unnecessary.
+        if bias == "auto":
+            bias = False if self.with_norm else True
+        self.with_bias = bias
+
+        if self.with_norm and self.with_bias:
+            warnings.warn("ConvModule has norm and bias at the same time")
+
+        # build convolution layer
+        self.conv = nn.Conv2d(  #
+            in_channels,
+            out_channels,
+            kernel_size,
+            stride=stride,
+            padding=padding,
+            dilation=dilation,
+            groups=groups,
+            bias=bias,
+        )
+        # export the attributes of self.conv to a higher level for convenience
+        self.in_channels = self.conv.in_channels
+        self.out_channels = self.conv.out_channels
+        self.kernel_size = self.conv.kernel_size
+        self.stride = self.conv.stride
+        self.padding = self.conv.padding
+        self.dilation = self.conv.dilation
+        self.transposed = self.conv.transposed
+        self.output_padding = self.conv.output_padding
+        self.groups = self.conv.groups
+
+        # build normalization layers
+        if self.with_norm:
+            # norm layer is after conv layer
+            if order.index("norm") > order.index("conv"):
+                norm_channels = out_channels
+            else:
+                norm_channels = in_channels
+            self.norm_name, norm = build_norm_layer(norm_cfg, norm_channels)
+            self.add_module(self.norm_name, norm)
+        else:
+            self.norm_name = None
+
+        # build activation layer
+        if self.activation:
+            self.act = act_layers(self.activation)
+
+        # Use msra init by default
+        self.init_weights()
+
+    @property
+    def norm(self):
+        if self.norm_name:
+            return getattr(self, self.norm_name)
+        else:
+            return None
+
+    def init_weights(self):
+        if self.activation == "LeakyReLU":
+            nonlinearity = "leaky_relu"
+        else:
+            nonlinearity = "relu"
+        kaiming_init(self.conv, nonlinearity=nonlinearity)
+        if self.with_norm:
+            constant_init(self.norm, 1, bias=0)
+
+    def forward(self, x, norm=True):
+        for layer in self.order:
+            if layer == "conv":
+                x = self.conv(x)
+            elif layer == "norm" and norm and self.with_norm:
+                x = self.norm(x)
+            elif layer == "act" and self.activation:
+                x = self.act(x)
+        return x
+
+
+class DepthwiseConvModule(nn.Module):
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        kernel_size,
+        stride=1,
+        padding=0,
+        dilation=1,
+        bias="auto",
+        norm_cfg=dict(type="BN"),
+        activation="ReLU",
+        inplace=True,
+        order=("depthwise", "dwnorm", "act", "pointwise", "pwnorm", "act"),
+    ):
+        super(DepthwiseConvModule, self).__init__()
+        assert activation is None or isinstance(activation, str)
+        self.activation = activation
+        self.inplace = inplace
+        self.order = order
+        assert isinstance(self.order, tuple) and len(self.order) == 6
+        assert set(order) == {
+            "depthwise",
+            "dwnorm",
+            "act",
+            "pointwise",
+            "pwnorm",
+            "act",
+        }
+
+        self.with_norm = norm_cfg is not None
+        # if the conv layer is before a norm layer, bias is unnecessary.
+        if bias == "auto":
+            bias = False if self.with_norm else True
+        self.with_bias = bias
+
+        if self.with_norm and self.with_bias:
+            warnings.warn("ConvModule has norm and bias at the same time")
+
+        # build convolution layer
+        self.depthwise = nn.Conv2d(
+            in_channels,
+            in_channels,
+            kernel_size,
+            stride=stride,
+            padding=padding,
+            dilation=dilation,
+            groups=in_channels,
+            bias=bias,
+        )
+        self.pointwise = nn.Conv2d(
+            in_channels, out_channels, kernel_size=1, stride=1, padding=0, bias=bias
+        )
+
+        # export the attributes of self.conv to a higher level for convenience
+        self.in_channels = self.depthwise.in_channels
+        self.out_channels = self.pointwise.out_channels
+        self.kernel_size = self.depthwise.kernel_size
+        self.stride = self.depthwise.stride
+        self.padding = self.depthwise.padding
+        self.dilation = self.depthwise.dilation
+        self.transposed = self.depthwise.transposed
+        self.output_padding = self.depthwise.output_padding
+
+        # build normalization layers
+        if self.with_norm:
+            # norm layer is after conv layer
+            _, self.dwnorm = build_norm_layer(norm_cfg, in_channels)
+            _, self.pwnorm = build_norm_layer(norm_cfg, out_channels)
+
+        # build activation layer
+        if self.activation:
+            self.act = act_layers(self.activation)
+
+        # Use msra init by default
+        self.init_weights()
+
+    def init_weights(self):
+        if self.activation == "LeakyReLU":
+            nonlinearity = "leaky_relu"
+        else:
+            nonlinearity = "relu"
+        kaiming_init(self.depthwise, nonlinearity=nonlinearity)
+        kaiming_init(self.pointwise, nonlinearity=nonlinearity)
+        if self.with_norm:
+            constant_init(self.dwnorm, 1, bias=0)
+            constant_init(self.pwnorm, 1, bias=0)
+
+    def forward(self, x, norm=True):
+        for layer_name in self.order:
+            if layer_name != "act":
+                layer = self.__getattr__(layer_name)
+                x = layer(x)
+            elif layer_name == "act" and self.activation:
+                x = self.act(x)
+        return x
+
+
+class RepVGGConvModule(nn.Module):
+    """
+    RepVGG Conv Block from paper RepVGG: Making VGG-style ConvNets Great Again
+    https://arxiv.org/abs/2101.03697
+    https://github.com/DingXiaoH/RepVGG
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        kernel_size=3,
+        stride=1,
+        padding=1,
+        dilation=1,
+        groups=1,
+        activation="ReLU",
+        padding_mode="zeros",
+        deploy=False,
+        **kwargs
+    ):
+        super(RepVGGConvModule, self).__init__()
+        assert activation is None or isinstance(activation, str)
+        self.activation = activation
+
+        self.deploy = deploy
+        self.groups = groups
+        self.in_channels = in_channels
+
+        assert kernel_size == 3
+        assert padding == 1
+
+        padding_11 = padding - kernel_size // 2
+
+        # build activation layer
+        if self.activation:
+            self.act = act_layers(self.activation)
+
+        if deploy:
+            self.rbr_reparam = nn.Conv2d(
+                in_channels=in_channels,
+                out_channels=out_channels,
+                kernel_size=kernel_size,
+                stride=stride,
+                padding=padding,
+                dilation=dilation,
+                groups=groups,
+                bias=True,
+                padding_mode=padding_mode,
+            )
+
+        else:
+            self.rbr_identity = (
+                nn.BatchNorm2d(num_features=in_channels)
+                if out_channels == in_channels and stride == 1
+                else None
+            )
+
+            self.rbr_dense = nn.Sequential(
+                nn.Conv2d(
+                    in_channels=in_channels,
+                    out_channels=out_channels,
+                    kernel_size=kernel_size,
+                    stride=stride,
+                    padding=padding,
+                    groups=groups,
+                    bias=False,
+                ),
+                nn.BatchNorm2d(num_features=out_channels),
+            )
+
+            self.rbr_1x1 = nn.Sequential(
+                nn.Conv2d(
+                    in_channels=in_channels,
+                    out_channels=out_channels,
+                    kernel_size=1,
+                    stride=stride,
+                    padding=padding_11,
+                    groups=groups,
+                    bias=False,
+                ),
+                nn.BatchNorm2d(num_features=out_channels),
+            )
+            print("RepVGG Block, identity = ", self.rbr_identity)
+
+    def forward(self, inputs):
+        if hasattr(self, "rbr_reparam"):
+            return self.act(self.rbr_reparam(inputs))
+
+        if self.rbr_identity is None:
+            id_out = 0
+        else:
+            id_out = self.rbr_identity(inputs)
+
+        return self.act(self.rbr_dense(inputs) + self.rbr_1x1(inputs) + id_out)
+
+    #   This func derives the equivalent kernel and bias in a DIFFERENTIABLE way.
+    #   You can get the equivalent kernel and bias at any time and do whatever you want,
+    #   for example, apply some penalties or constraints during training, just like you
+    #   do to the other models.  May be useful for quantization or pruning.
+    def get_equivalent_kernel_bias(self):
+        kernel3x3, bias3x3 = self._fuse_bn_tensor(self.rbr_dense)
+        kernel1x1, bias1x1 = self._fuse_bn_tensor(self.rbr_1x1)
+        kernelid, biasid = self._fuse_bn_tensor(self.rbr_identity)
+        return (
+            kernel3x3 + self._pad_1x1_to_3x3_tensor(kernel1x1) + kernelid,
+            bias3x3 + bias1x1 + biasid,
+        )
+
+    def _pad_1x1_to_3x3_tensor(self, kernel1x1):
+        if kernel1x1 is None:
+            return 0
+        else:
+            return nn.functional.pad(kernel1x1, [1, 1, 1, 1])
+
+    def _fuse_bn_tensor(self, branch):
+        if branch is None:
+            return 0, 0
+        if isinstance(branch, nn.Sequential):
+            kernel = branch[0].weight
+            running_mean = branch[1].running_mean
+            running_var = branch[1].running_var
+            gamma = branch[1].weight
+            beta = branch[1].bias
+            eps = branch[1].eps
+        else:
+            assert isinstance(branch, nn.BatchNorm2d)
+            if not hasattr(self, "id_tensor"):
+                input_dim = self.in_channels // self.groups
+                kernel_value = np.zeros(
+                    (self.in_channels, input_dim, 3, 3), dtype=np.float32
+                )
+                for i in range(self.in_channels):
+                    kernel_value[i, i % input_dim, 1, 1] = 1
+                self.id_tensor = torch.from_numpy(kernel_value).to(branch.weight.device)
+            kernel = self.id_tensor
+            running_mean = branch.running_mean
+            running_var = branch.running_var
+            gamma = branch.weight
+            beta = branch.bias
+            eps = branch.eps
+        std = (running_var + eps).sqrt()
+        t = (gamma / std).reshape(-1, 1, 1, 1)
+        return kernel * t, beta - running_mean * gamma / std
+
+    def repvgg_convert(self):
+        kernel, bias = self.get_equivalent_kernel_bias()
+        return (
+            kernel.detach().cpu().numpy(),
+            bias.detach().cpu().numpy(),
+        )
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/init_weights.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/init_weights.py
new file mode 100644
index 0000000000000000000000000000000000000000..27da85c922af1606c286f0adab3a24fa88344e92
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/init_weights.py
@@ -0,0 +1,43 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+import torch.nn as nn
+
+
+def kaiming_init(
+    module, a=0, mode="fan_out", nonlinearity="relu", bias=0, distribution="normal"
+):
+    assert distribution in ["uniform", "normal"]
+    if distribution == "uniform":
+        nn.init.kaiming_uniform_(
+            module.weight, a=a, mode=mode, nonlinearity=nonlinearity
+        )
+    else:
+        nn.init.kaiming_normal_(
+            module.weight, a=a, mode=mode, nonlinearity=nonlinearity
+        )
+    if hasattr(module, "bias") and module.bias is not None:
+        nn.init.constant_(module.bias, bias)
+
+
+def xavier_init(module, gain=1, bias=0, distribution="normal"):
+    assert distribution in ["uniform", "normal"]
+    if distribution == "uniform":
+        nn.init.xavier_uniform_(module.weight, gain=gain)
+    else:
+        nn.init.xavier_normal_(module.weight, gain=gain)
+    if hasattr(module, "bias") and module.bias is not None:
+        nn.init.constant_(module.bias, bias)
+
+
+def normal_init(module, mean=0, std=1, bias=0):
+    nn.init.normal_(module.weight, mean, std)
+    if hasattr(module, "bias") and module.bias is not None:
+        nn.init.constant_(module.bias, bias)
+
+
+def constant_init(module, val, bias=0):
+    if hasattr(module, "weight") and module.weight is not None:
+        nn.init.constant_(module.weight, val)
+    if hasattr(module, "bias") and module.bias is not None:
+        nn.init.constant_(module.bias, bias)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/nms.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/nms.py
new file mode 100644
index 0000000000000000000000000000000000000000..e5fa3e216c123d422b67efdba421bd762c68603b
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/nms.py
@@ -0,0 +1,122 @@
+import torch
+from torchvision.ops import nms
+
+
+def multiclass_nms(
+    multi_bboxes, multi_scores, score_thr, nms_cfg, max_num=-1, score_factors=None
+):
+    """NMS for multi-class bboxes.
+
+    Args:
+        multi_bboxes (Tensor): shape (n, #class*4) or (n, 4)
+        multi_scores (Tensor): shape (n, #class), where the last column
+            contains scores of the background class, but this will be ignored.
+        score_thr (float): bbox threshold, bboxes with scores lower than it
+            will not be considered.
+        nms_thr (float): NMS IoU threshold
+        max_num (int): if there are more than max_num bboxes after NMS,
+            only top max_num will be kept.
+        score_factors (Tensor): The factors multiplied to scores before
+            applying NMS
+
+    Returns:
+        tuple: (bboxes, labels), tensors of shape (k, 5) and (k, 1). Labels \
+            are 0-based.
+    """
+    num_classes = multi_scores.size(1) - 1
+    # exclude background category
+    if multi_bboxes.shape[1] > 4:
+        bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4)
+    else:
+        bboxes = multi_bboxes[:, None].expand(multi_scores.size(0), num_classes, 4)
+    scores = multi_scores[:, :-1]
+
+    # filter out boxes with low scores
+    valid_mask = scores > score_thr
+
+    # We use masked_select for ONNX exporting purpose,
+    # which is equivalent to bboxes = bboxes[valid_mask]
+    # we have to use this ugly code
+    bboxes = torch.masked_select(
+        bboxes, torch.stack((valid_mask, valid_mask, valid_mask, valid_mask), -1)
+    ).view(-1, 4)
+    if score_factors is not None:
+        scores = scores * score_factors[:, None]
+    scores = torch.masked_select(scores, valid_mask)
+    labels = valid_mask.nonzero(as_tuple=False)[:, 1]
+
+    if bboxes.numel() == 0:
+        bboxes = multi_bboxes.new_zeros((0, 5))
+        labels = multi_bboxes.new_zeros((0,), dtype=torch.long)
+
+        if torch.onnx.is_in_onnx_export():
+            raise RuntimeError(
+                "[ONNX Error] Can not record NMS "
+                "as it has not been executed this time"
+            )
+        return bboxes, labels
+
+    dets, keep = batched_nms(bboxes, scores, labels, nms_cfg)
+
+    if max_num > 0:
+        dets = dets[:max_num]
+        keep = keep[:max_num]
+
+    return dets, labels[keep]
+
+
+def batched_nms(boxes, scores, idxs, nms_cfg, class_agnostic=False):
+    """Performs non-maximum suppression in a batched fashion.
+    Modified from https://github.com/pytorch/vision/blob
+    /505cd6957711af790211896d32b40291bea1bc21/torchvision/ops/boxes.py#L39.
+    In order to perform NMS independently per class, we add an offset to all
+    the boxes. The offset is dependent only on the class idx, and is large
+    enough so that boxes from different classes do not overlap.
+    Arguments:
+        boxes (torch.Tensor): boxes in shape (N, 4).
+        scores (torch.Tensor): scores in shape (N, ).
+        idxs (torch.Tensor): each index value correspond to a bbox cluster,
+            and NMS will not be applied between elements of different idxs,
+            shape (N, ).
+        nms_cfg (dict): specify nms type and other parameters like iou_thr.
+            Possible keys includes the following.
+            - iou_thr (float): IoU threshold used for NMS.
+            - split_thr (float): threshold number of boxes. In some cases the
+                number of boxes is large (e.g., 200k). To avoid OOM during
+                training, the users could set `split_thr` to a small value.
+                If the number of boxes is greater than the threshold, it will
+                perform NMS on each group of boxes separately and sequentially.
+                Defaults to 10000.
+        class_agnostic (bool): if true, nms is class agnostic,
+            i.e. IoU thresholding happens over all boxes,
+            regardless of the predicted class.
+    Returns:
+        tuple: kept dets and indice.
+    """
+    nms_cfg_ = nms_cfg.copy()
+    class_agnostic = nms_cfg_.pop("class_agnostic", class_agnostic)
+    if class_agnostic:
+        boxes_for_nms = boxes
+    else:
+        max_coordinate = boxes.max()
+        offsets = idxs.to(boxes) * (max_coordinate + 1)
+        boxes_for_nms = boxes + offsets[:, None]
+    nms_cfg_.pop("type", "nms")
+    split_thr = nms_cfg_.pop("split_thr", 10000)
+    if len(boxes_for_nms) < split_thr:
+        keep = nms(boxes_for_nms, scores, **nms_cfg_)
+        boxes = boxes[keep]
+        scores = scores[keep]
+    else:
+        total_mask = scores.new_zeros(scores.size(), dtype=torch.bool)
+        for id in torch.unique(idxs):
+            mask = (idxs == id).nonzero(as_tuple=False).view(-1)
+            keep = nms(boxes_for_nms[mask], scores[mask], **nms_cfg_)
+            total_mask[mask[keep]] = True
+
+        keep = total_mask.nonzero(as_tuple=False).view(-1)
+        keep = keep[scores[keep].argsort(descending=True)]
+        boxes = boxes[keep]
+        scores = scores[keep]
+
+    return torch.cat([boxes, scores[:, None]], -1), keep
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/norm.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/norm.py
new file mode 100644
index 0000000000000000000000000000000000000000..b9dd8f43e083412334fd4796bb5b82a7ba8c94e7
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/norm.py
@@ -0,0 +1,55 @@
+import torch.nn as nn
+
+norm_cfg = {
+    # format: layer_type: (abbreviation, module)
+    "BN": ("bn", nn.BatchNorm2d),
+    "SyncBN": ("bn", nn.SyncBatchNorm),
+    "GN": ("gn", nn.GroupNorm),
+    # and potentially 'SN'
+}
+
+
+def build_norm_layer(cfg, num_features, postfix=""):
+    """Build normalization layer
+
+    Args:
+        cfg (dict): cfg should contain:
+            type (str): identify norm layer type.
+            layer args: args needed to instantiate a norm layer.
+            requires_grad (bool): [optional] whether stop gradient updates
+        num_features (int): number of channels from input.
+        postfix (int, str): appended into norm abbreviation to
+            create named layer.
+
+    Returns:
+        name (str): abbreviation + postfix
+        layer (nn.Module): created norm layer
+    """
+    assert isinstance(cfg, dict) and "type" in cfg
+    cfg_ = cfg.copy()
+
+    layer_type = cfg_.pop("type")
+    if layer_type not in norm_cfg:
+        raise KeyError("Unrecognized norm type {}".format(layer_type))
+    else:
+        abbr, norm_layer = norm_cfg[layer_type]
+        if norm_layer is None:
+            raise NotImplementedError
+
+    assert isinstance(postfix, (int, str))
+    name = abbr + str(postfix)
+
+    requires_grad = cfg_.pop("requires_grad", True)
+    cfg_.setdefault("eps", 1e-5)
+    if layer_type != "GN":
+        layer = norm_layer(num_features, **cfg_)
+        if layer_type == "SyncBN" and hasattr(layer, "_specify_ddp_gpu_num"):
+            layer._specify_ddp_gpu_num(1)
+    else:
+        assert "num_groups" in cfg_
+        layer = norm_layer(num_channels=num_features, **cfg_)
+
+    for param in layer.parameters():
+        param.requires_grad = requires_grad
+
+    return name, layer
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/scale.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/scale.py
new file mode 100644
index 0000000000000000000000000000000000000000..2461af8a6fb23d911d7aac4e81bdfee36a31cadb
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/scale.py
@@ -0,0 +1,15 @@
+import torch
+import torch.nn as nn
+
+
+class Scale(nn.Module):
+    """
+    A learnable scale parameter
+    """
+
+    def __init__(self, scale=1.0):
+        super(Scale, self).__init__()
+        self.scale = nn.Parameter(torch.tensor(scale, dtype=torch.float))
+
+    def forward(self, x):
+        return x * self.scale
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/transformer.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/transformer.py
new file mode 100644
index 0000000000000000000000000000000000000000..24e2de458b6fa85a518dcdf71c6a1c746cc2893c
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/module/transformer.py
@@ -0,0 +1,138 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import torch.nn as nn
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.activation import act_layers
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.module.conv import ConvModule
+
+
+class MLP(nn.Module):
+    def __init__(
+        self, in_dim, hidden_dim=None, out_dim=None, drop=0.0, activation="GELU"
+    ):
+        super(MLP, self).__init__()
+        out_dim = out_dim or in_dim
+        hidden_dim = hidden_dim or in_dim
+        self.fc1 = nn.Linear(in_dim, hidden_dim)
+        self.act = act_layers(activation)
+        self.fc2 = nn.Linear(hidden_dim, out_dim)
+        self.drop = nn.Dropout(drop)
+
+    def forward(self, x):
+        x = self.fc1(x)
+        x = self.act(x)
+        x = self.drop(x)
+        x = self.fc2(x)
+        x = self.drop(x)
+        return x
+
+
+class TransformerEncoder(nn.Module):
+    """
+    Encoder layer of transformer
+    :param dim: feature dimension
+    :param num_heads: number of attention heads
+    :param mlp_ratio: hidden layer dimension expand ratio in MLP
+    :param dropout_ratio: probability of an element to be zeroed
+    :param activation: activation layer type
+    :param kv_bias: add bias on key and values
+    """
+
+    def __init__(
+        self,
+        dim,
+        num_heads,
+        mlp_ratio,
+        dropout_ratio=0.0,
+        activation="GELU",
+        kv_bias=False,
+    ):
+        super(TransformerEncoder, self).__init__()
+        self.norm1 = nn.LayerNorm(dim)
+
+        # embed_dim must be divisible by num_heads
+        assert dim // num_heads * num_heads == dim
+        self.attn = nn.MultiheadAttention(
+            embed_dim=dim,
+            num_heads=num_heads,
+            dropout=dropout_ratio,
+            add_bias_kv=kv_bias,
+        )
+        self.norm2 = nn.LayerNorm(dim)
+        self.mlp = MLP(
+            in_dim=dim,
+            hidden_dim=int(dim * mlp_ratio),
+            drop=dropout_ratio,
+            activation=activation,
+        )
+
+    def forward(self, x):
+        _x = self.norm1(x)
+        x = x + self.attn(_x, _x, _x)[0]
+        x = x + self.mlp(self.norm2(x))
+        return x
+
+
+class TransformerBlock(nn.Module):
+    """
+    Block of transformer encoder layers. Used in vision task.
+    :param in_channels: input channels
+    :param out_channels: output channels
+    :param num_heads: number of attention heads
+    :param num_encoders: number of transformer encoder layers
+    :param mlp_ratio: hidden layer dimension expand ratio in MLP
+    :param dropout_ratio: probability of an element to be zeroed
+    :param activation: activation layer type
+    :param kv_bias: add bias on key and values
+    """
+
+    def __init__(
+        self,
+        in_channels,
+        out_channels,
+        num_heads,
+        num_encoders=1,
+        mlp_ratio=1,
+        dropout_ratio=0.0,
+        kv_bias=False,
+        activation="GELU",
+    ):
+        super(TransformerBlock, self).__init__()
+
+        # out_channels must be divisible by num_heads
+        assert out_channels // num_heads * num_heads == out_channels
+
+        self.conv = (
+            nn.Identity()
+            if in_channels == out_channels
+            else ConvModule(in_channels, out_channels, 1)
+        )
+        self.linear = nn.Linear(out_channels, out_channels)
+        encoders = [
+            TransformerEncoder(
+                out_channels, num_heads, mlp_ratio, dropout_ratio, activation, kv_bias
+            )
+            for _ in range(num_encoders)
+        ]
+        self.encoders = nn.Sequential(*encoders)
+
+    def forward(self, x, pos_embed):
+        b, _, h, w = x.shape
+        x = self.conv(x)
+        x = x.flatten(2).permute(2, 0, 1)
+        x = x + pos_embed
+        x = self.encoders(x)
+        x = x.permute(1, 2, 0).reshape(b, -1, h, w)
+        return x
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/weight_averager/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/weight_averager/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..170b589cc99c9f6721815c9fe2ce7570951c4012
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/weight_averager/__init__.py
@@ -0,0 +1,26 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.weight_averager.ema import ExpMovingAverager
+
+
+def build_weight_averager(cfg, device="cpu"):
+    cfg = copy.deepcopy(cfg)
+    name = cfg.pop("name")
+    if name == "ExpMovingAverager":
+        return ExpMovingAverager(**cfg, device=device)
+    else:
+        raise NotImplementedError(f"{name} is not implemented")
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/weight_averager/ema.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/weight_averager/ema.py
new file mode 100644
index 0000000000000000000000000000000000000000..0906c7c6d71e2db7e0baae19d50e34bd2f315e51
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/model/weight_averager/ema.py
@@ -0,0 +1,80 @@
+# Copyright 2021 RangiLyu. All rights reserved.
+# =====================================================================
+# Modified from: https://github.com/facebookresearch/d2go
+# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved
+# Licensed under the Apache License, Version 2.0 (the "License")
+import itertools
+import math
+from typing import Any, Dict, Optional
+
+import torch
+import torch.nn as nn
+
+
+class ExpMovingAverager(object):
+    """Exponential Moving Average.
+
+    Args:
+        decay (float): EMA decay factor, should be in [0, 1]. A decay of 0 corresponds
+            to always using the latest value (no EMA) and a decay of 1 corresponds to
+            not updating weights after initialization. Default to 0.9998.
+        device (str): If not None, move EMA state to device.
+    """
+
+    def __init__(self, decay: float = 0.9998, device: Optional[str] = None):
+        if decay < 0 or decay > 1.0:
+            raise ValueError(f"Decay should be in [0, 1], {decay} was given.")
+        self.decay = decay
+        self.state = {}
+        self.device = device
+
+    def load_from(self, model: nn.Module) -> None:
+        """Load state from the model."""
+        self.state.clear()
+        for name, val in self._get_model_state_iterator(model):
+            val = val.detach().clone()
+            self.state[name] = val.to(self.device) if self.device else val
+
+    def has_inited(self) -> bool:
+        return len(self.state) > 0
+
+    def apply_to(self, model: nn.Module) -> None:
+        """Apply EMA state to the model."""
+        with torch.no_grad():
+            for name, val in self._get_model_state_iterator(model):
+                assert (
+                    name in self.state
+                ), f"Name {name} not exist, available names are {self.state.keys()}"
+                val.copy_(self.state[name])
+
+    def state_dict(self) -> Dict[str, Any]:
+        return self.state
+
+    def load_state_dict(self, state_dict: Dict[str, Any]) -> None:
+        self.state.clear()
+        for name, val in state_dict.items():
+            self.state[name] = val.to(self.device) if self.device else val
+
+    def to(self, device: torch.device) -> None:
+        """moves EMA state to device."""
+        for name, val in self.state.items():
+            self.state[name] = val.to(device)
+
+    def _get_model_state_iterator(self, model: nn.Module):
+        param_iter = model.named_parameters()
+        # pyre-fixme[16]: `nn.Module` has no attribute `named_buffers`.
+        buffer_iter = model.named_buffers()
+        return itertools.chain(param_iter, buffer_iter)
+
+    def calculate_dacay(self, iteration: int) -> float:
+        decay = (self.decay) * math.exp(-(1 + iteration) / 2000) + (1 - self.decay)
+        return decay
+
+    def update(self, model: nn.Module, iteration: int) -> None:
+        decay = self.calculate_dacay(iteration)
+        with torch.no_grad():
+            for name, val in self._get_model_state_iterator(model):
+                ema_val = self.state[name]
+                if self.device:
+                    val = val.to(self.device)
+                ema_val.copy_(ema_val * (1 - decay) + val * decay)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/trainer/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/trainer/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..bc1f10f77da725672def88f4f7c4c47cb60ca21f
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/trainer/__init__.py
@@ -0,0 +1,16 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.trainer.task import TrainingTask
+
+__all__ = ["TrainingTask"]
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/trainer/task.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/trainer/task.py
new file mode 100644
index 0000000000000000000000000000000000000000..d2939d22e1f1513e57ff097159c188d45086776a
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/trainer/task.py
@@ -0,0 +1,362 @@
+# Modifications Copyright 2021 - present, OpenDR European Project
+#
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import copy
+import os
+import warnings
+from typing import Any, Dict, List
+
+import torch
+import torch.distributed as dist
+from pytorch_lightning import LightningModule
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.batch_process import stack_batch_img
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util\
+    import convert_avg_params, gather_results, mkdir
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.check_point import save_model_state
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.weight_averager import build_weight_averager
+
+
+class TrainingTask(LightningModule):
+    """
+    Pytorch Lightning module of a general training task.
+    Including training, evaluating and testing.
+    Args:
+        cfg: Training configurations
+        evaluator: Evaluator for evaluating the model performance.
+    """
+
+    def __init__(self, cfg, model, evaluator=None):
+        super(TrainingTask, self).__init__()
+        self.cfg = cfg
+        self.model = model
+        self.evaluator = evaluator
+        self.save_flag = -10
+        self.log_style = "NanoDet"
+        self.weight_averager = None
+        if "weight_averager" in self.cfg.model:
+            self.weight_averager = build_weight_averager(
+                self.cfg.model.weight_averager, device=self.device
+            )
+            self.avg_model = copy.deepcopy(self.model)
+
+    def _preprocess_batch_input(self, batch):
+        batch_imgs = batch["img"]
+        if isinstance(batch_imgs, list):
+            batch_imgs = [img.to(self.device) for img in batch_imgs]
+            batch_img_tensor = stack_batch_img(batch_imgs, divisible=32)
+            batch["img"] = batch_img_tensor
+        return batch
+
+    def forward(self, x):
+        x = self.model(x)
+        return x
+
+    @torch.no_grad()
+    def predict(self, batch, batch_idx=None, dataloader_idx=None):
+        batch = self._preprocess_batch_input(batch)
+        preds = self.forward(batch["img"])
+        results = self.model.head.post_process(preds, batch)
+        return results
+
+    def save_current_model(self, path, logger):
+        save_model_state(path=path, model=self.model, weight_averager=self.weight_averager, logger=logger)
+
+    def training_step(self, batch, batch_idx):
+        batch = self._preprocess_batch_input(batch)
+        preds, loss, loss_states = self.model.forward_train(batch)
+
+        # log train losses
+        if self.global_step % self.cfg.log.interval == 0:
+            lr = self.optimizers().param_groups[0]["lr"]
+            log_msg = "Train|Epoch{}/{}|Iter{}({})| lr:{:.2e}| ".format(
+                self.current_epoch + 1,
+                self.cfg.schedule.total_epochs,
+                self.global_step,
+                batch_idx,
+                lr,
+            )
+            self.scalar_summary("Train_loss/lr", "Train", lr, self.global_step)
+            for loss_name in loss_states:
+                log_msg += "{}:{:.4f}| ".format(
+                    loss_name, loss_states[loss_name].mean().item()
+                )
+                self.scalar_summary(
+                    "Train_loss/" + loss_name,
+                    "Train",
+                    loss_states[loss_name].mean().item(),
+                    self.global_step,
+                )
+            if self.logger:
+                self.logger.info(log_msg)
+
+        return loss
+
+    def training_epoch_end(self, outputs: List[Any]) -> None:
+        # save models in schedule epoches
+        if self.current_epoch % self.cfg.schedule.val_intervals == 0:
+            checkpoint_save_path = os.path.join(self.cfg.save_dir, "checkpoints")
+            mkdir(checkpoint_save_path)
+            print("===" * 10)
+            print("checkpoint_save_path: {} \n epoch: {}".format(checkpoint_save_path, self.current_epoch))
+            print("===" * 10)
+            self.trainer.save_checkpoint(
+                os.path.join(checkpoint_save_path, "model_iter_{}.ckpt".format(self.current_epoch))
+            )
+
+        self.lr_scheduler.step()
+
+    def validation_step(self, batch, batch_idx):
+        batch = self._preprocess_batch_input(batch)
+        if self.weight_averager is not None:
+            preds, loss, loss_states = self.avg_model.forward_train(batch)
+        else:
+            preds, loss, loss_states = self.model.forward_train(batch)
+
+        if batch_idx % self.cfg.log.interval == 0:
+            lr = self.optimizers().param_groups[0]["lr"]
+            log_msg = "Val|Epoch{}/{}|Iter{}({})| lr:{:.2e}| ".format(
+                self.current_epoch + 1,
+                self.cfg.schedule.total_epochs,
+                self.global_step,
+                batch_idx,
+                lr,
+            )
+            for loss_name in loss_states:
+                log_msg += "{}:{:.4f}| ".format(
+                    loss_name, loss_states[loss_name].mean().item()
+                )
+            if self.logger:
+                self.logger.info(log_msg)
+
+        dets = self.model.head.post_process(preds, batch)
+        return dets
+
+    def validation_epoch_end(self, validation_step_outputs):
+        """
+        Called at the end of the validation epoch with the
+        outputs of all validation steps.Evaluating results
+        and save best model.
+        Args:
+            validation_step_outputs: A list of val outputs
+
+        """
+        results = {}
+        for res in validation_step_outputs:
+            results.update(res)
+        all_results = (
+            gather_results(results, self.device)
+            if dist.is_available() and dist.is_initialized()
+            else results
+        )
+        if all_results:
+            eval_results = self.evaluator.evaluate(
+                all_results, self.cfg.save_dir)
+            metric = eval_results[self.cfg.evaluator.save_key]
+            # save best model
+            if metric > self.save_flag:
+                self.save_flag = metric
+                best_save_path = os.path.join(self.cfg.save_dir, "model_best")
+                mkdir(best_save_path)
+                self.trainer.save_checkpoint(
+                    os.path.join(best_save_path, "model_best.ckpt")
+                )
+                self.save_current_model(os.path.join(best_save_path, "nanodet_model_best.pth"), logger=self.logger)
+                txt_path = os.path.join(best_save_path, "eval_results.txt")
+                with open(txt_path, "a") as f:
+                    f.write("Epoch:{}\n".format(self.current_epoch + 1))
+                    for k, v in eval_results.items():
+                        f.write("{}: {}\n".format(k, v))
+            else:
+                warnings.warn(
+                    "Warning! Save_key is not in eval results! Only save model last!"
+                )
+            if self.logger:
+                self.logger.log_metrics(eval_results, self.current_epoch + 1)
+        else:
+            # self.logger.info("Skip val on rank {}".format(self.local_rank))
+            if self.logger:
+                self.logger.info("Skip val ")
+
+    def test_step(self, batch, batch_idx):
+        dets = self.predict(batch, batch_idx)
+        return dets
+
+    def test_epoch_end(self, test_step_outputs):
+        results = {}
+        for res in test_step_outputs:
+            results.update(res)
+        all_results = (
+            gather_results(results, self.device)
+            if dist.is_available() and dist.is_initialized()
+            else results
+        )
+        if all_results:
+            if self.cfg.test_mode == "val":
+                eval_results = self.evaluator.evaluate(
+                    all_results, self.cfg.save_dir)
+                txt_path = os.path.join(self.cfg.save_dir, "eval_results.txt")
+                with open(txt_path, "a") as f:
+                    for k, v in eval_results.items():
+                        f.write("{}: {}\n".format(k, v))
+
+        else:
+            if self.logger:
+                self.logger.info("Skip test on rank {}".format(self.local_rank))
+
+    def configure_optimizers(self):
+        """
+        Prepare optimizer and learning-rate scheduler
+        to use in optimization.
+
+        Returns:
+            optimizer
+        """
+
+        optimizer_cfg = copy.deepcopy(self.cfg.schedule.optimizer)
+        name = optimizer_cfg.pop("name")
+        build_optimizer = getattr(torch.optim, name)
+        optimizer = build_optimizer(params=self.parameters(), **optimizer_cfg)
+
+        schedule_cfg = copy.deepcopy(self.cfg.schedule.lr_schedule)
+        name = schedule_cfg.pop("name")
+        build_scheduler = getattr(torch.optim.lr_scheduler, name)
+        self.lr_scheduler = build_scheduler(optimizer=optimizer, **schedule_cfg)
+
+        return optimizer
+
+    def optimizer_step(
+        self,
+        epoch=None,
+        batch_idx=None,
+        optimizer=None,
+        optimizer_idx=None,
+        optimizer_closure=None,
+        on_tpu=None,
+        using_native_amp=None,
+        using_lbfgs=None,
+    ):
+        """
+        Performs a single optimization step (parameter update).
+        Args:
+            epoch: Current epoch
+            batch_idx: Index of current batch
+            optimizer: A PyTorch optimizer
+            optimizer_idx: If you used multiple optimizers this indexes into that list.
+            optimizer_closure: closure for all optimizers
+            on_tpu: true if TPU backward is required
+            using_native_amp: True if using native amp
+            using_lbfgs: True if the matching optimizer is lbfgs
+        """
+        # warm up lr
+        if self.trainer.global_step <= self.cfg.schedule.warmup.steps:
+            if self.cfg.schedule.warmup.name == "constant":
+                warmup_lr = (
+                    self.cfg.schedule.optimizer.lr * self.cfg.schedule.warmup.ratio
+                )
+            elif self.cfg.schedule.warmup.name == "linear":
+                k = (1 - self.trainer.global_step / self.cfg.schedule.warmup.steps) * (
+                    1 - self.cfg.schedule.warmup.ratio
+                )
+                warmup_lr = self.cfg.schedule.optimizer.lr * (1 - k)
+            elif self.cfg.schedule.warmup.name == "exp":
+                k = self.cfg.schedule.warmup.ratio ** (
+                    1 - self.trainer.global_step / self.cfg.schedule.warmup.steps
+                )
+                warmup_lr = self.cfg.schedule.optimizer.lr * k
+            else:
+                raise Exception("Unsupported warm up type!")
+            for pg in optimizer.param_groups:
+                pg["lr"] = warmup_lr
+
+        # update params
+        optimizer.step(closure=optimizer_closure)
+        optimizer.zero_grad()
+
+    def get_progress_bar_dict(self):
+        # don't show the version number
+        items = super().get_progress_bar_dict()
+        items.pop("v_num", None)
+        items.pop("loss", None)
+        return items
+
+    def scalar_summary(self, tag, phase, value, step):
+        """
+        Write Tensorboard scalar summary log.
+        Args:
+            tag: Name for the tag
+            phase: 'Train' or 'Val'
+            value: Value to record
+            step: Step value to record
+
+        """
+        # if self.local_rank < 1:
+        if self.logger:
+            self.logger.experiment.add_scalars(tag, {phase: value}, step)
+
+    def info(self, string):
+        if self.logger:
+            self.logger.info(string)
+
+    # ------------Hooks-----------------
+    def on_train_start(self) -> None:
+        if self.current_epoch > 0:
+            self.lr_scheduler.last_epoch = self.current_epoch - 1
+
+    def on_pretrain_routine_end(self) -> None:
+        if "weight_averager" in self.cfg.model:
+            if self.logger:
+                self.logger.info("Weight Averaging is enabled")
+            if self.weight_averager and self.weight_averager.has_inited():
+                self.weight_averager.to(self.weight_averager.device)
+                return
+            self.weight_averager = build_weight_averager(
+                self.cfg.model.weight_averager, device=self.device
+            )
+            self.weight_averager.load_from(self.model)
+
+    def on_epoch_start(self):
+        self.model.set_epoch(self.current_epoch)
+
+    def on_train_batch_end(self, outputs, batch, batch_idx, dataloader_idx) -> None:
+        if self.weight_averager:
+            self.weight_averager.update(self.model, self.global_step)
+
+    def on_validation_epoch_start(self):
+        if self.weight_averager:
+            self.weight_averager.apply_to(self.avg_model)
+
+    def on_test_epoch_start(self) -> None:
+        if self.weight_averager:
+            self.on_load_checkpoint({"state_dict": self.state_dict()})
+            self.weight_averager.apply_to(self.model)
+
+    def on_load_checkpoint(self, checkpointed_state: Dict[str, Any]) -> None:
+        if self.weight_averager:
+            avg_params = convert_avg_params(checkpointed_state)
+            if len(avg_params) != len(self.model.state_dict()):
+                if self.logger:
+                    self.logger.info(
+                        "Weight averaging is enabled but average state does not"
+                        "match the model"
+                    )
+            else:
+                self.weight_averager = build_weight_averager(
+                    self.cfg.model.weight_averager, device=self.device
+                )
+                self.weight_averager.load_state_dict(avg_params)
+                if self.logger:
+                    self.logger.info("Loaded average state from checkpoint.")
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/__init__.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..10c0ee2b8888241ae03558a5a8f28c715741ba33
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/__init__.py
@@ -0,0 +1,41 @@
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.box_transform \
+    import bbox2distance, distance2bbox
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.check_point import (
+    convert_avg_params,
+    load_model_weight,
+    save_model,
+)
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.config import cfg, load_config
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.logger \
+    import AverageMeter, Logger, MovingAverage, NanoDetLightningLogger
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.misc \
+    import images_to_levels, multi_apply, unmap
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.path import collect_files, mkdir
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.rank_filter import rank_filter
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.scatter_gather \
+    import gather_results, scatter_kwargs
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.util_mixins import NiceRepr
+
+
+__all__ = [
+    "distance2bbox",
+    "bbox2distance",
+    "load_model_weight",
+    "save_model",
+    "cfg",
+    "load_config",
+    "AverageMeter",
+    "Logger",
+    "MovingAverage",
+    "images_to_levels",
+    "multi_apply",
+    "unmap",
+    "mkdir",
+    "rank_filter",
+    "gather_results",
+    "scatter_kwargs",
+    "NiceRepr",
+    "collect_files",
+    "NanoDetLightningLogger",
+    "convert_avg_params",
+]
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/box_transform.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/box_transform.py
new file mode 100644
index 0000000000000000000000000000000000000000..4b82a8c19f50d351e042ab2f8f6fd6199aa74534
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/box_transform.py
@@ -0,0 +1,49 @@
+import torch
+
+
+def distance2bbox(points, distance, max_shape=None):
+    """Decode distance prediction to bounding box.
+
+    Args:
+        points (Tensor): Shape (n, 2), [x, y].
+        distance (Tensor): Distance from the given point to 4
+            boundaries (left, top, right, bottom).
+        max_shape (tuple): Shape of the image.
+
+    Returns:
+        Tensor: Decoded bboxes.
+    """
+    x1 = points[..., 0] - distance[..., 0]
+    y1 = points[..., 1] - distance[..., 1]
+    x2 = points[..., 0] + distance[..., 2]
+    y2 = points[..., 1] + distance[..., 3]
+    if max_shape is not None:
+        x1 = x1.clamp(min=0, max=max_shape[1])
+        y1 = y1.clamp(min=0, max=max_shape[0])
+        x2 = x2.clamp(min=0, max=max_shape[1])
+        y2 = y2.clamp(min=0, max=max_shape[0])
+    return torch.stack([x1, y1, x2, y2], -1)
+
+
+def bbox2distance(points, bbox, max_dis=None, eps=0.1):
+    """Decode bounding box based on distances.
+
+    Args:
+        points (Tensor): Shape (n, 2), [x, y].
+        bbox (Tensor): Shape (n, 4), "xyxy" format
+        max_dis (float): Upper bound of the distance.
+        eps (float): a small value to ensure target < max_dis, instead <=
+
+    Returns:
+        Tensor: Decoded distances.
+    """
+    left = points[:, 0] - bbox[:, 0]
+    top = points[:, 1] - bbox[:, 1]
+    right = bbox[:, 2] - points[:, 0]
+    bottom = bbox[:, 3] - points[:, 1]
+    if max_dis is not None:
+        left = left.clamp(min=0, max=max_dis - eps)
+        top = top.clamp(min=0, max=max_dis - eps)
+        right = right.clamp(min=0, max=max_dis - eps)
+        bottom = bottom.clamp(min=0, max=max_dis - eps)
+    return torch.stack([left, top, right, bottom], -1)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/check_point.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/check_point.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ac516167a25b5255f9c25832c6e87b586436c17
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/check_point.py
@@ -0,0 +1,100 @@
+# Modifications Copyright 2021 - present, OpenDR European Project
+#
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import Any, Dict
+import torch
+
+
+def load_model_weight(model, checkpoint, logger=None):
+    state_dict = checkpoint["state_dict"].copy()
+    for k in checkpoint["state_dict"]:
+        # convert average model weights
+        if k.startswith("avg_model."):
+            v = state_dict.pop(k)
+            state_dict[k[4:]] = v
+    # strip prefix of state_dict
+    if list(state_dict.keys())[0].startswith("module."):
+        state_dict = {k[7:]: v for k, v in state_dict.items()}
+    if list(state_dict.keys())[0].startswith("model."):
+        state_dict = {k[6:]: v for k, v in state_dict.items()}
+
+    model_state_dict = (
+        model.module.state_dict() if hasattr(model, "module") else model.state_dict()
+    )
+
+    # check loaded parameters and created model parameters
+    for k in state_dict:
+        if k in model_state_dict:
+            if state_dict[k].shape != model_state_dict[k].shape:
+                if logger:
+                    logger.log(
+                        "Skip loading parameter {}, required shape{}, "
+                        "loaded shape{}.".format(
+                            k, model_state_dict[k].shape, state_dict[k].shape
+                        )
+                    )
+                state_dict[k] = model_state_dict[k]
+        else:
+            if logger:
+                logger.log("Drop parameter {}.".format(k))
+    for k in model_state_dict:
+        if not (k in state_dict):
+            if logger:
+                logger.log("No param {}.".format(k))
+            state_dict[k] = model_state_dict[k]
+    model.load_state_dict(state_dict, strict=False)
+    return model
+
+
+# @rank_zero_only
+# @rank_filter
+def save_model(model, path, epoch, iter, optimizer=None):
+    model_state_dict = (
+        model.module.state_dict() if hasattr(model, "module") else model.state_dict()
+    )
+    data = {"epoch": epoch, "state_dict": model_state_dict, "iter": iter}
+    if optimizer is not None:
+        data["optimizer"] = optimizer.state_dict()
+
+    torch.save(data, path)
+
+
+# @rank_zero_only
+# @rank_filter
+def save_model_state(path, model, weight_averager=None, logger=None):
+    if logger:
+        logger.info("Saving model to {}".format(path))
+    state_dict = (
+        weight_averager.state_dict()
+        if weight_averager
+        else model.state_dict()
+    )
+    torch.save({"state_dict": state_dict}, path)
+
+
+def convert_avg_params(checkpoint: Dict[str, Any]) -> Dict[str, Any]:
+    """Converts average state dict to the format that can be loaded to a model.
+    Args:
+        checkpoint: model.
+    Returns:
+        Converted average state dict.
+    """
+    state_dict = checkpoint["state_dict"]
+    avg_weights = {}
+    for k, v in state_dict.items():
+        if "avg_model" in k:
+            avg_weights[k[10:]] = v
+    return avg_weights
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/config.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/config.py
new file mode 100644
index 0000000000000000000000000000000000000000..1b8b3e055c1facc80b324641c2c08ab2a1c6fc84
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/config.py
@@ -0,0 +1,39 @@
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.yacs import CfgNode
+
+cfg = CfgNode(new_allowed=True)
+cfg.save_dir = "./"
+# common params for NETWORK
+cfg.model = CfgNode(new_allowed=True)
+cfg.model.arch = CfgNode(new_allowed=True)
+cfg.model.arch.backbone = CfgNode(new_allowed=True)
+cfg.model.arch.fpn = CfgNode(new_allowed=True)
+cfg.model.arch.head = CfgNode(new_allowed=True)
+
+# DATASET related params
+cfg.data = CfgNode(new_allowed=True)
+cfg.data.train = CfgNode(new_allowed=True)
+cfg.data.val = CfgNode(new_allowed=True)
+cfg.device = CfgNode(new_allowed=True)
+# train
+cfg.schedule = CfgNode(new_allowed=True)
+
+# logger
+cfg.log = CfgNode()
+cfg.log.interval = 50
+
+# testing
+cfg.test = CfgNode()
+# size of images for each device
+
+
+def load_config(cfg, args_cfg):
+    cfg.defrost()
+    cfg.merge_from_file(args_cfg)
+    cfg.freeze()
+
+
+if __name__ == "__main__":
+    import sys
+
+    with open(sys.argv[1], "w") as f:
+        print(cfg, file=f)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/logger.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/logger.py
new file mode 100644
index 0000000000000000000000000000000000000000..b883d8f3363035f4c69b7044575d2282e8e6c8ee
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/logger.py
@@ -0,0 +1,216 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import logging
+import os
+import time
+
+import numpy as np
+from pytorch_lightning.loggers import LightningLoggerBase
+from pytorch_lightning.loggers.base import rank_zero_experiment
+from pytorch_lightning.utilities import rank_zero_only
+from pytorch_lightning.utilities.cloud_io import get_filesystem
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.path import mkdir
+
+
+class Logger:
+    def __init__(self, local_rank, save_dir="./", use_tensorboard=True):
+        # mkdir(local_rank, save_dir)
+        mkdir(save_dir)
+        self.rank = local_rank
+        fmt = ("[%(name)s] [%(asctime)s] %(levelname)s: %(message)s")
+        logging.basicConfig(
+            level=logging.INFO,
+            filename=os.path.join(save_dir, "logs.txt"),
+            filemode="w",
+        )
+        self.log_dir = os.path.join(save_dir, "logs")
+        console = logging.StreamHandler()
+        console.setLevel(logging.INFO)
+        formatter = logging.Formatter(fmt, datefmt="%m-%d %H:%M:%S")
+        console.setFormatter(formatter)
+        logging.getLogger().addHandler(console)
+        if use_tensorboard:
+            try:
+                from torch.utils.tensorboard import SummaryWriter
+            except ImportError:
+                raise ImportError(
+                    'Please run "pip install future tensorboard" to install '
+                    "the dependencies to use torch.utils.tensorboard "
+                    "(applicable to PyTorch 1.1 or higher)"
+                ) from None
+            if self.rank < 1:
+                logging.info(
+                    "Using Tensorboard, logs will be saved in {}".format(self.log_dir)
+                )
+                self.writer = SummaryWriter(log_dir=self.log_dir)
+
+    def log(self, string):
+        if self.rank < 1:
+            logging.info(string)
+
+    def scalar_summary(self, tag, phase, value, step):
+        if self.rank < 1:
+            self.writer.add_scalars(tag, {phase: value}, step)
+
+
+class MovingAverage(object):
+    def __init__(self, val, window_size=50):
+        self.window_size = window_size
+        self.reset()
+        self.push(val)
+
+    def reset(self):
+        self.queue = []
+
+    def push(self, val):
+        self.queue.append(val)
+        if len(self.queue) > self.window_size:
+            self.queue.pop(0)
+
+    def avg(self):
+        return np.mean(self.queue)
+
+
+class AverageMeter(object):
+    """Computes and stores the average and current value"""
+
+    def __init__(self, val):
+        self.reset()
+        self.update(val)
+
+    def reset(self):
+        self.val = 0
+        self.avg = 0
+        self.sum = 0
+        self.count = 0
+
+    def update(self, val, n=1):
+        self.val = val
+        self.sum += val * n
+        self.count += n
+        if self.count > 0:
+            self.avg = self.sum / self.count
+
+
+class NanoDetLightningLogger(LightningLoggerBase):
+    def __init__(self, save_dir="./", **kwargs):
+        super().__init__()
+        self._name = "NanoDet"
+        self._version = time.strftime("%Y-%m-%d-%H-%M-%S", time.localtime())
+        self.log_dir = os.path.join(save_dir, f"logs-{self._version}")
+
+        self._fs = get_filesystem(save_dir)
+        self._fs.makedirs(self.log_dir, exist_ok=True)
+        self._init_logger()
+
+        self._experiment = None
+        self._kwargs = kwargs
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    @rank_zero_experiment
+    def experiment(self):
+        r"""
+        Actual tensorboard object. To use TensorBoard features in your
+        :class:`~pytorch_lightning.core.lightning.LightningModule` do the following.
+
+        Example::
+
+            self.logger.experiment.some_tensorboard_function()
+
+        """
+        if self._experiment is not None:
+            return self._experiment
+
+        assert rank_zero_only.rank == 0, "tried to init log dirs in non global_rank=0"
+
+        try:
+            from torch.utils.tensorboard import SummaryWriter
+        except ImportError:
+            raise ImportError(
+                'Please run "pip install future tensorboard" to install '
+                "the dependencies to use torch.utils.tensorboard "
+                "(applicable to PyTorch 1.1 or higher)"
+            ) from None
+
+        self._experiment = SummaryWriter(log_dir=self.log_dir, **self._kwargs)
+        return self._experiment
+
+    @property
+    def version(self):
+        return self._version
+
+    @rank_zero_only
+    def _init_logger(self):
+        self.logger = logging.getLogger(name=self.name)
+        self.logger.setLevel(logging.INFO)
+
+        # create file handler
+        fh = logging.FileHandler(os.path.join(self.log_dir, "logs.txt"))
+        fh.setLevel(logging.INFO)
+        # set file formatter
+        f_fmt = "[%(name)s][%(asctime)s]%(levelname)s: %(message)s"
+        file_formatter = logging.Formatter(f_fmt, datefmt="%m-%d %H:%M:%S")
+        fh.setFormatter(file_formatter)
+
+        # create console handler
+        ch = logging.StreamHandler()
+        ch.setLevel(logging.INFO)
+        # set console formatter
+
+        c_fmt = ("[%(name)s] [%(asctime)s] %(levelname)s: %(message)s")
+        console_formatter = logging.Formatter(c_fmt, datefmt="%m-%d %H:%M:%S")
+        ch.setFormatter(console_formatter)
+
+        # add the handlers to the logger
+        self.logger.addHandler(fh)
+        self.logger.addHandler(ch)
+
+    @rank_zero_only
+    def info(self, string):
+        self.logger.info(string)
+
+    @rank_zero_only
+    def log(self, string):
+        self.logger.info(string)
+
+    @rank_zero_only
+    def dump_cfg(self, cfg_node):
+        with open(os.path.join(self.log_dir, "train_cfg.yml"), "w") as f:
+            cfg_node.dump(stream=f)
+
+    @rank_zero_only
+    def log_hyperparams(self, params):
+        self.logger.info(f"hyperparams: {params}")
+
+    @rank_zero_only
+    def log_metrics(self, metrics, step):
+        self.logger.info(f"Val_metrics: {metrics}")
+        for k, v in metrics.items():
+            self.experiment.add_scalars("Val_metrics/" + k, {"Val": v}, step)
+
+    @rank_zero_only
+    def save(self):
+        super().save()
+
+    @rank_zero_only
+    def finalize(self, status):
+        self.experiment.flush()
+        self.experiment.close()
+        self.save()
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/misc.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/misc.py
new file mode 100644
index 0000000000000000000000000000000000000000..961b77bc28f1831cd74b1088bfa7fb4511bde883
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/misc.py
@@ -0,0 +1,52 @@
+# Modification 2020 RangiLyu
+# Copyright 2018-2019 Open-MMLab.
+
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+
+#     http://www.apache.org/licenses/LICENSE-2.0
+
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from functools import partial
+
+import torch
+
+
+def multi_apply(func, *args, **kwargs):
+    pfunc = partial(func, **kwargs) if kwargs else func
+    map_results = map(pfunc, *args)
+    return tuple(map(list, zip(*map_results)))
+
+
+def images_to_levels(target, num_level_anchors):
+    """Convert targets by image to targets by feature level.
+
+    [target_img0, target_img1] -> [target_level0, target_level1, ...]
+    """
+    target = torch.stack(target, 0)
+    level_targets = []
+    start = 0
+    for n in num_level_anchors:
+        end = start + n
+        level_targets.append(target[:, start:end].squeeze(0))
+        start = end
+    return level_targets
+
+
+def unmap(data, count, inds, fill=0):
+    """Unmap a subset of item (data) back to the original set of items (of
+    size count)"""
+    if data.dim() == 1:
+        ret = data.new_full((count,), fill)
+        ret[inds.type(torch.bool)] = data
+    else:
+        new_size = (count,) + data.size()[1:]
+        ret = data.new_full(new_size, fill)
+        ret[inds.type(torch.bool), :] = data
+    return ret
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/path.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/path.py
new file mode 100644
index 0000000000000000000000000000000000000000..b0887d41a60706c9fb37916de83c50c670352d83
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/path.py
@@ -0,0 +1,34 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+
+# from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.rank_filter import rank_filter
+
+
+# @rank_filter
+def mkdir(path):
+    if not os.path.exists(path):
+        os.makedirs(path)
+
+
+def collect_files(path, exts):
+    file_paths = []
+    for maindir, subdir, filename_list in os.walk(path):
+        for filename in filename_list:
+            file_path = os.path.join(maindir, filename)
+            ext = os.path.splitext(file_path)[1]
+            if ext in exts:
+                file_paths.append(file_path)
+    return file_paths
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/rank_filter.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/rank_filter.py
new file mode 100644
index 0000000000000000000000000000000000000000..2316b2f983b30372f6068f28feba0415fddebc61
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/rank_filter.py
@@ -0,0 +1,23 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+
+def rank_filter(func):
+    def func_filter(local_rank=-1, *args, **kwargs):
+        if local_rank < 1:
+            return func(*args, **kwargs)
+        else:
+            pass
+
+    return func_filter
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/scatter_gather.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/scatter_gather.py
new file mode 100644
index 0000000000000000000000000000000000000000..8e28560d69dc6d8b08ead52fd1cd14c8d7659e5d
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/scatter_gather.py
@@ -0,0 +1,97 @@
+# Copyright 2021 RangiLyu.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import pickle
+
+import torch
+import torch.distributed as dist
+from torch.autograd import Variable
+from torch.nn.parallel._functions import Scatter
+
+
+def list_scatter(input, target_gpus, chunk_sizes):
+    ret = []
+    for idx, size in enumerate(chunk_sizes):
+        ret.append(input[:size])
+        del input[:size]
+    return tuple(ret)
+
+
+def scatter(inputs, target_gpus, dim=0, chunk_sizes=None):
+    """
+    Slices variables into approximately equal chunks and
+    distributes them across given GPUs. Duplicates
+    references to objects that are not variables. Does not
+    support Tensors.
+    """
+
+    def scatter_map(obj):
+        if isinstance(obj, Variable):
+            return Scatter.apply(target_gpus, chunk_sizes, dim, obj)
+        assert not torch.is_tensor(obj), "Tensors not supported in scatter."
+        if isinstance(obj, list):
+            return list_scatter(obj, target_gpus, chunk_sizes)
+        if isinstance(obj, tuple):
+            return list(zip(*map(scatter_map, obj)))
+        if isinstance(obj, dict):
+            return list(map(type(obj), zip(*map(scatter_map, obj.items()))))
+        return [obj for targets in target_gpus]
+
+    return scatter_map(inputs)
+
+
+def scatter_kwargs(inputs, kwargs, target_gpus, dim=0, chunk_sizes=None):
+    r"""Scatter with support for kwargs dictionary"""
+    inputs = scatter(inputs, target_gpus, dim, chunk_sizes) if inputs else []
+    kwargs = scatter(kwargs, target_gpus, dim, chunk_sizes) if kwargs else []
+    if len(inputs) < len(kwargs):
+        inputs.extend([() for _ in range(len(kwargs) - len(inputs))])
+    elif len(kwargs) < len(inputs):
+        kwargs.extend([{} for _ in range(len(inputs) - len(kwargs))])
+    inputs = tuple(inputs)
+    kwargs = tuple(kwargs)
+    return inputs, kwargs
+
+
+def gather_results(result_part, device):
+    rank = -1
+    world_size = 1
+    if dist.is_available() and dist.is_initialized():
+        rank = dist.get_rank()
+        world_size = dist.get_world_size()
+
+    # dump result part to tensor with pickle
+    part_tensor = torch.tensor(
+        bytearray(pickle.dumps(result_part)), dtype=torch.uint8, device=device
+    )
+
+    # gather all result part tensor shape
+    shape_tensor = torch.tensor(part_tensor.shape, device=device)
+    shape_list = [shape_tensor.clone() for _ in range(world_size)]
+    dist.all_gather(shape_list, shape_tensor)
+
+    # padding result part tensor to max length
+    shape_max = torch.tensor(shape_list).max()
+    part_send = torch.zeros(shape_max, dtype=torch.uint8, device=device)
+    part_send[: shape_tensor[0]] = part_tensor
+    part_recv_list = [part_tensor.new_zeros(shape_max) for _ in range(world_size)]
+
+    # gather all result dict
+    dist.all_gather(part_recv_list, part_send)
+
+    if rank < 1:
+        all_res = {}
+        for recv, shape in zip(part_recv_list, shape_list):
+            all_res.update(pickle.loads(recv[: shape[0]].cpu().numpy().tobytes()))
+        return all_res
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/util_mixins.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/util_mixins.py
new file mode 100644
index 0000000000000000000000000000000000000000..278aa037f8ee4dc8db22fc8ef50eb324530e6630
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/util_mixins.py
@@ -0,0 +1,105 @@
+"""This module defines the :class:`NiceRepr` mixin class, which defines a
+``__repr__`` and ``__str__`` method that only depend on a custom ``__nice__``
+method, which you must define. This means you only have to overload one
+function instead of two.  Furthermore, if the object defines a ``__len__``
+method, then the ``__nice__`` method defaults to something sensible, otherwise
+it is treated as abstract and raises ``NotImplementedError``.
+
+To use simply have your object inherit from :class:`NiceRepr`
+(multi-inheritance should be ok).
+
+This code was copied from the ubelt library: https://github.com/Erotemic/ubelt
+
+Example:
+    >>> # Objects that define __nice__ have a default __str__ and __repr__
+    >>> class Student(NiceRepr):
+    ...    def __init__(self, name):
+    ...        self.name = name
+    ...    def __nice__(self):
+    ...        return self.name
+    >>> s1 = Student('Alice')
+    >>> s2 = Student('Bob')
+    >>> print(f's1 = {s1}')
+    >>> print(f's2 = {s2}')
+    s1 = <Student(Alice)>
+    s2 = <Student(Bob)>
+
+Example:
+    >>> # Objects that define __len__ have a default __nice__
+    >>> class Group(NiceRepr):
+    ...    def __init__(self, data):
+    ...        self.data = data
+    ...    def __len__(self):
+    ...        return len(self.data)
+    >>> g = Group([1, 2, 3])
+    >>> print(f'g = {g}')
+    g = <Group(3)>
+"""
+import warnings
+
+
+class NiceRepr(object):
+    """Inherit from this class and define ``__nice__`` to "nicely" print your
+    objects.
+
+    Defines ``__str__`` and ``__repr__`` in terms of ``__nice__`` function
+    Classes that inherit from :class:`NiceRepr` should redefine ``__nice__``.
+    If the inheriting class has a ``__len__``, method then the default
+    ``__nice__`` method will return its length.
+
+    Example:
+        >>> class Foo(NiceRepr):
+        ...    def __nice__(self):
+        ...        return 'info'
+        >>> foo = Foo()
+        >>> assert str(foo) == '<Foo(info)>'
+        >>> assert repr(foo).startswith('<Foo(info) at ')
+
+    Example:
+        >>> class Bar(NiceRepr):
+        ...    pass
+        >>> bar = Bar()
+        >>> import pytest
+        >>> with pytest.warns(None) as record:
+        >>>     assert 'object at' in str(bar)
+        >>>     assert 'object at' in repr(bar)
+
+    Example:
+        >>> class Baz(NiceRepr):
+        ...    def __len__(self):
+        ...        return 5
+        >>> baz = Baz()
+        >>> assert str(baz) == '<Baz(5)>'
+    """
+
+    def __nice__(self):
+        """str: a "nice" summary string describing this module"""
+        if hasattr(self, "__len__"):
+            # It is a common pattern for objects to use __len__ in __nice__
+            # As a convenience we define a default __nice__ for these objects
+            return str(len(self))
+        else:
+            # In all other cases force the subclass to overload __nice__
+            raise NotImplementedError(
+                f"Define the __nice__ method for {self.__class__!r}"
+            )
+
+    def __repr__(self):
+        """str: the string of the module"""
+        try:
+            nice = self.__nice__()
+            classname = self.__class__.__name__
+            return f"<{classname}({nice}) at {hex(id(self))}>"
+        except NotImplementedError as ex:
+            warnings.warn(str(ex), category=RuntimeWarning)
+            return object.__repr__(self)
+
+    def __str__(self):
+        """str: the string of the module"""
+        try:
+            classname = self.__class__.__name__
+            nice = self.__nice__()
+            return f"<{classname}({nice})>"
+        except NotImplementedError as ex:
+            warnings.warn(str(ex), category=RuntimeWarning)
+            return object.__repr__(self)
diff --git a/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/yacs.py b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/yacs.py
new file mode 100644
index 0000000000000000000000000000000000000000..c38294b663f15013377d88d99451706206abeeda
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/algorithm/nanodet/util/yacs.py
@@ -0,0 +1,510 @@
+# Copyright (c) 2018-present, Facebook, Inc.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+##############################################################################
+"""YACS -- Yet Another Configuration System is designed to be a simple
+configuration management system for academic and industrial research
+projects.
+
+See README.md for usage and examples.
+"""
+
+import copy
+import io
+import logging
+import os
+from ast import literal_eval
+
+import yaml
+import importlib.util
+
+# Filename extensions for loading configs from files
+_YAML_EXTS = {"", ".yaml", ".yml"}
+_PY_EXTS = {".py"}
+
+_FILE_TYPES = (io.IOBase,)
+
+# CfgNodes can only contain a limited set of valid types
+_VALID_TYPES = {tuple, list, str, int, float, bool, type(None)}
+
+
+logger = logging.getLogger(__name__)
+
+
+class CfgNode(dict):
+    """
+    CfgNode represents an internal node in the configuration tree. It's a simple
+    dict-like container that allows for attribute-based access to keys.
+    """
+
+    IMMUTABLE = "__immutable__"
+    DEPRECATED_KEYS = "__deprecated_keys__"
+    RENAMED_KEYS = "__renamed_keys__"
+    NEW_ALLOWED = "__new_allowed__"
+
+    def __init__(self, init_dict=None, key_list=None, new_allowed=False):
+        """
+        Args:
+            init_dict (dict): the possibly-nested dictionary to initailize the
+                CfgNode.
+            key_list (list[str]): a list of names which index this CfgNode from
+                the root.
+                Currently only used for logging purposes.
+            new_allowed (bool): whether adding new key is allowed when merging with
+                other configs.
+        """
+        # Recursively convert nested dictionaries in init_dict into CfgNodes
+        init_dict = {} if init_dict is None else init_dict
+        key_list = [] if key_list is None else key_list
+        init_dict = self._create_config_tree_from_dict(init_dict, key_list)
+        super(CfgNode, self).__init__(init_dict)
+        # Manage if the CfgNode is frozen or not
+        self.__dict__[CfgNode.IMMUTABLE] = False
+        # Deprecated options
+        # If an option is removed from the code and you don't want to break existing
+        # yaml configs, you can add the full config key as a string to the set below.
+        self.__dict__[CfgNode.DEPRECATED_KEYS] = set()
+        # Renamed options
+        # If you rename a config option, record the mapping from the old name to the
+        # new name in the dictionary below. Optionally, if the type also changed, you
+        # can make the value a tuple that specifies first the renamed key and then
+        # instructions for how to edit the config file.
+        self.__dict__[CfgNode.RENAMED_KEYS] = {
+            # 'EXAMPLE.OLD.KEY': 'EXAMPLE.NEW.KEY',  # Dummy example to follow
+            # 'EXAMPLE.OLD.KEY': (                   # A more complex example to follow
+            #     'EXAMPLE.NEW.KEY',
+            #     "Also convert to a tuple, e.g., 'foo' -> ('foo',) or "
+            #     + "'foo:bar' -> ('foo', 'bar')"
+            # ),
+        }
+
+        # Allow new attributes after initialisation
+        self.__dict__[CfgNode.NEW_ALLOWED] = new_allowed
+
+    @classmethod
+    def _create_config_tree_from_dict(cls, dic, key_list):
+        """
+        Create a configuration tree using the given dict.
+        Any dict-like objects inside dict will be treated as a new CfgNode.
+
+        Args:
+            dic (dict):
+            key_list (list[str]): a list of names which index this CfgNode from
+                the root. Currently only used for logging purposes.
+        """
+        dic = copy.deepcopy(dic)
+        for k, v in dic.items():
+            if isinstance(v, dict):
+                # Convert dict to CfgNode
+                dic[k] = cls(v, key_list=key_list + [k])
+            else:
+                # Check for valid leaf type or nested CfgNode
+                _assert_with_logging(
+                    _valid_type(v, allow_cfg_node=False),
+                    "Key {} with value {} is not a valid type; valid types: {}".format(
+                        ".".join(key_list + [k]), type(v), _VALID_TYPES
+                    ),
+                )
+        return dic
+
+    def __getattr__(self, name):
+        if name in self:
+            return self[name]
+        else:
+            raise AttributeError(name)
+
+    def __setattr__(self, name, value):
+        if self.is_frozen():
+            raise AttributeError(
+                "Attempted to set {} to {}, but CfgNode is immutable".format(
+                    name, value
+                )
+            )
+
+        _assert_with_logging(
+            name not in self.__dict__,
+            "Invalid attempt to modify internal CfgNode state: {}".format(name),
+        )
+        _assert_with_logging(
+            _valid_type(value, allow_cfg_node=True),
+            "Invalid type {} for key {}; valid types = {}".format(
+                type(value), name, _VALID_TYPES
+            ),
+        )
+
+        self[name] = value
+
+    def __str__(self):
+        def _indent(s_, num_spaces):
+            s = s_.split("\n")
+            if len(s) == 1:
+                return s_
+            first = s.pop(0)
+            s = [(num_spaces * " ") + line for line in s]
+            s = "\n".join(s)
+            s = first + "\n" + s
+            return s
+
+        r = ""
+        s = []
+        for k, v in sorted(self.items()):
+            seperator = "\n" if isinstance(v, CfgNode) else " "
+            attr_str = "{}:{}{}".format(str(k), seperator, str(v))
+            attr_str = _indent(attr_str, 2)
+            s.append(attr_str)
+        r += "\n".join(s)
+        return r
+
+    def __repr__(self):
+        return "{}({})".format(self.__class__.__name__, super(CfgNode, self).__repr__())
+
+    def dump(self, **kwargs):
+        """Dump to a string."""
+
+        def convert_to_dict(cfg_node, key_list):
+            if not isinstance(cfg_node, CfgNode):
+                _assert_with_logging(
+                    _valid_type(cfg_node),
+                    "Key {} with value {} is not a valid type; valid types: {}".format(
+                        ".".join(key_list), type(cfg_node), _VALID_TYPES
+                    ),
+                )
+                return cfg_node
+            else:
+                cfg_dict = dict(cfg_node)
+                for k, v in cfg_dict.items():
+                    cfg_dict[k] = convert_to_dict(v, key_list + [k])
+                return cfg_dict
+
+        self_as_dict = convert_to_dict(self, [])
+        return yaml.safe_dump(self_as_dict, **kwargs)
+
+    def merge_from_file(self, cfg_filename):
+        """Load a yaml config file and merge it this CfgNode."""
+        with open(cfg_filename, "r", encoding="utf-8") as f:
+            cfg = self.load_cfg(f)
+        self.merge_from_other_cfg(cfg)
+
+    def merge_from_other_cfg(self, cfg_other):
+        """Merge `cfg_other` into this CfgNode."""
+        _merge_a_into_b(cfg_other, self, self, [])
+
+    def merge_from_list(self, cfg_list):
+        """Merge config (keys, values) in a list (e.g., from command line) into
+        this CfgNode. For example, `cfg_list = ['FOO.BAR', 0.5]`.
+        """
+        _assert_with_logging(
+            len(cfg_list) % 2 == 0,
+            "Override list has odd length: {}; it must be a list of pairs".format(
+                cfg_list
+            ),
+        )
+        root = self
+        for full_key, v in zip(cfg_list[0::2], cfg_list[1::2]):
+            if root.key_is_deprecated(full_key):
+                continue
+            if root.key_is_renamed(full_key):
+                root.raise_key_rename_error(full_key)
+            key_list = full_key.split(".")
+            d = self
+            for subkey in key_list[:-1]:
+                _assert_with_logging(
+                    subkey in d, "Non-existent key: {}".format(full_key)
+                )
+                d = d[subkey]
+            subkey = key_list[-1]
+            _assert_with_logging(subkey in d, "Non-existent key: {}".format(full_key))
+            value = self._decode_cfg_value(v)
+            value = _check_and_coerce_cfg_value_type(value, d[subkey], subkey, full_key)
+            d[subkey] = value
+
+    def freeze(self):
+        """Make this CfgNode and all of its children immutable."""
+        self._immutable(True)
+
+    def defrost(self):
+        """Make this CfgNode and all of its children mutable."""
+        self._immutable(False)
+
+    def is_frozen(self):
+        """Return mutability."""
+        return self.__dict__[CfgNode.IMMUTABLE]
+
+    def _immutable(self, is_immutable):
+        """Set immutability to is_immutable and recursively apply the setting
+        to all nested CfgNodes.
+        """
+        self.__dict__[CfgNode.IMMUTABLE] = is_immutable
+        # Recursively set immutable state
+        for v in self.__dict__.values():
+            if isinstance(v, CfgNode):
+                v._immutable(is_immutable)
+        for v in self.values():
+            if isinstance(v, CfgNode):
+                v._immutable(is_immutable)
+
+    def clone(self):
+        """Recursively copy this CfgNode."""
+        return copy.deepcopy(self)
+
+    def register_deprecated_key(self, key):
+        """Register key (e.g. `FOO.BAR`) a deprecated option. When merging deprecated
+        keys a warning is generated and the key is ignored.
+        """
+        _assert_with_logging(
+            key not in self.__dict__[CfgNode.DEPRECATED_KEYS],
+            "key {} is already registered as a deprecated key".format(key),
+        )
+        self.__dict__[CfgNode.DEPRECATED_KEYS].add(key)
+
+    def register_renamed_key(self, old_name, new_name, message=None):
+        """Register a key as having been renamed from `old_name` to `new_name`.
+        When merging a renamed key, an exception is thrown alerting to user to
+        the fact that the key has been renamed.
+        """
+        _assert_with_logging(
+            old_name not in self.__dict__[CfgNode.RENAMED_KEYS],
+            "key {} is already registered as a renamed cfg key".format(old_name),
+        )
+        value = new_name
+        if message:
+            value = (new_name, message)
+        self.__dict__[CfgNode.RENAMED_KEYS][old_name] = value
+
+    def key_is_deprecated(self, full_key):
+        """Test if a key is deprecated."""
+        if full_key in self.__dict__[CfgNode.DEPRECATED_KEYS]:
+            logger.warning("Deprecated config key (ignoring): {}".format(full_key))
+            return True
+        return False
+
+    def key_is_renamed(self, full_key):
+        """Test if a key is renamed."""
+        return full_key in self.__dict__[CfgNode.RENAMED_KEYS]
+
+    def raise_key_rename_error(self, full_key):
+        new_key = self.__dict__[CfgNode.RENAMED_KEYS][full_key]
+        if isinstance(new_key, tuple):
+            msg = " Note: " + new_key[1]
+            new_key = new_key[0]
+        else:
+            msg = ""
+        raise KeyError(
+            "Key {} was renamed to {}; please update your config.{}".format(
+                full_key, new_key, msg
+            )
+        )
+
+    def is_new_allowed(self):
+        return self.__dict__[CfgNode.NEW_ALLOWED]
+
+    @classmethod
+    def load_cfg(cls, cfg_file_obj_or_str):
+        """
+        Load a cfg.
+        Args:
+            cfg_file_obj_or_str (str or file):
+                Supports loading from:
+                - A file object backed by a YAML file
+                - A file object backed by a Python source file that exports an attribute
+                  "cfg" that is either a dict or a CfgNode
+                - A string that can be parsed as valid YAML
+        """
+        _assert_with_logging(
+            isinstance(cfg_file_obj_or_str, _FILE_TYPES + (str,)),
+            "Expected first argument to be of type {} or {}, but it was {}".format(
+                _FILE_TYPES, str, type(cfg_file_obj_or_str)
+            ),
+        )
+        if isinstance(cfg_file_obj_or_str, str):
+            return cls._load_cfg_from_yaml_str(cfg_file_obj_or_str)
+        elif isinstance(cfg_file_obj_or_str, _FILE_TYPES):
+            return cls._load_cfg_from_file(cfg_file_obj_or_str)
+        else:
+            raise NotImplementedError("Impossible to reach here (unless there's a bug)")
+
+    @classmethod
+    def _load_cfg_from_file(cls, file_obj):
+        """Load a config from a YAML file or a Python source file."""
+        _, file_extension = os.path.splitext(file_obj.name)
+        if file_extension in _YAML_EXTS:
+            return cls._load_cfg_from_yaml_str(file_obj.read())
+        elif file_extension in _PY_EXTS:
+            return cls._load_cfg_py_source(file_obj.name)
+        else:
+            raise Exception(
+                "Attempt to load from an unsupported file type {}; "
+                "only {} are supported".format(file_obj, _YAML_EXTS.union(_PY_EXTS))
+            )
+
+    @classmethod
+    def _load_cfg_from_yaml_str(cls, str_obj):
+        """Load a config from a YAML string encoding."""
+        cfg_as_dict = yaml.safe_load(str_obj)
+        return cls(cfg_as_dict)
+
+    @classmethod
+    def _load_cfg_py_source(cls, filename):
+        """Load a config from a Python source file."""
+        module = _load_module_from_file("yacs.config.override", filename)
+        _assert_with_logging(
+            hasattr(module, "cfg"),
+            "Python module from file {} must have 'cfg' attr".format(filename),
+        )
+        VALID_ATTR_TYPES = {dict, CfgNode}
+        _assert_with_logging(
+            type(module.cfg) in VALID_ATTR_TYPES,
+            "Imported module 'cfg' attr must be in {} but is {} instead".format(
+                VALID_ATTR_TYPES, type(module.cfg)
+            ),
+        )
+        return cls(module.cfg)
+
+    @classmethod
+    def _decode_cfg_value(cls, value):
+        """
+        Decodes a raw config value (e.g., from a yaml config files or command
+        line argument) into a Python object.
+
+        If the value is a dict, it will be interpreted as a new CfgNode.
+        If the value is a str, it will be evaluated as literals.
+        Otherwise it is returned as-is.
+        """
+        # Configs parsed from raw yaml will contain dictionary keys that need to be
+        # converted to CfgNode objects
+        if isinstance(value, dict):
+            return cls(value)
+        # All remaining processing is only applied to strings
+        if not isinstance(value, str):
+            return value
+        # Try to interpret `value` as a:
+        #   string, number, tuple, list, dict, boolean, or None
+        try:
+            value = literal_eval(value)
+        # The following two excepts allow v to pass through when it represents a
+        # string.
+        #
+        # Longer explanation:
+        # The type of v is always a string (before calling literal_eval), but
+        # sometimes it *represents* a string and other times a data structure, like
+        # a list. In the case that v represents a string, what we got back from the
+        # yaml parser is 'foo' *without quotes* (so, not '"foo"'). literal_eval is
+        # ok with '"foo"', but will raise a ValueError if given 'foo'. In other
+        # cases, like paths (v = 'foo/bar' and not v = '"foo/bar"'), literal_eval
+        # will raise a SyntaxError.
+        except ValueError:
+            pass
+        except SyntaxError:
+            pass
+        return value
+
+
+load_cfg = (
+    CfgNode.load_cfg
+)  # keep this function in global scope for backward compatibility
+
+
+def _valid_type(value, allow_cfg_node=False):
+    return (type(value) in _VALID_TYPES) or (
+        allow_cfg_node and isinstance(value, CfgNode)
+    )
+
+
+def _merge_a_into_b(a, b, root, key_list):
+    """Merge config dictionary a into config dictionary b, clobbering the
+    options in b whenever they are also specified in a.
+    """
+    _assert_with_logging(
+        isinstance(a, CfgNode),
+        "`a` (cur type {}) must be an instance of {}".format(type(a), CfgNode),
+    )
+    _assert_with_logging(
+        isinstance(b, CfgNode),
+        "`b` (cur type {}) must be an instance of {}".format(type(b), CfgNode),
+    )
+
+    for k, v_ in a.items():
+        full_key = ".".join(key_list + [k])
+
+        v = copy.deepcopy(v_)
+        v = b._decode_cfg_value(v)
+
+        if k in b:
+            v = _check_and_coerce_cfg_value_type(v, b[k], k, full_key)
+            # Recursively merge dicts
+            if isinstance(v, CfgNode):
+                try:
+                    _merge_a_into_b(v, b[k], root, key_list + [k])
+                except BaseException:
+                    raise
+            else:
+                b[k] = v
+        elif b.is_new_allowed():
+            b[k] = v
+        else:
+            if root.key_is_deprecated(full_key):
+                continue
+            elif root.key_is_renamed(full_key):
+                root.raise_key_rename_error(full_key)
+            else:
+                raise KeyError("Non-existent config key: {}".format(full_key))
+
+
+def _check_and_coerce_cfg_value_type(replacement, original, key, full_key):
+    """Checks that `replacement`, which is intended to replace `original` is of
+    the right type. The type is correct if it matches exactly or is one of a few
+    cases in which the type can be easily coerced.
+    """
+    original_type = type(original)
+    replacement_type = type(replacement)
+
+    # The types must match (with some exceptions)
+    if replacement_type == original_type:
+        return replacement
+
+    # Cast replacement from from_type to to_type if the replacement and original
+    # types match from_type and to_type
+    def conditional_cast(from_type, to_type):
+        if replacement_type == from_type and original_type == to_type:
+            return True, to_type(replacement)
+        else:
+            return False, None
+
+    # Conditionally casts
+    # list <-> tuple
+    casts = [(tuple, list), (list, tuple)]
+
+    for (from_type, to_type) in casts:
+        converted, converted_value = conditional_cast(from_type, to_type)
+        if converted:
+            return converted_value
+
+    raise ValueError(
+        "Type mismatch ({} vs. {}) with values ({} vs. {}) for config "
+        "key: {}".format(
+            original_type, replacement_type, original, replacement, full_key
+        )
+    )
+
+
+def _assert_with_logging(cond, msg):
+    if not cond:
+        logger.debug(msg)
+    assert cond, msg
+
+
+def _load_module_from_file(name, filename):
+    spec = importlib.util.spec_from_file_location(name, filename)
+    module = importlib.util.module_from_spec(spec)
+    spec.loader.exec_module(module)
+    return module
diff --git a/src/opendr/perception/object_detection_2d/nanodet/dependencies.ini b/src/opendr/perception/object_detection_2d/nanodet/dependencies.ini
new file mode 100644
index 0000000000000000000000000000000000000000..a3105f449670aa6daae01834f187592a4eb5c593
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/dependencies.ini
@@ -0,0 +1,21 @@
+[runtime]
+# 'python' key expects a value using the Python requirements file format
+#  https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format 
+python=torch>=1.7
+       pytorch-lightning==1.2.3
+       omegaconf>=2.0.1
+       torchvision
+       opencv-python
+       pycocotools
+       Cython
+       matplotlib
+       numpy
+       onnx
+       onnx-simplifier
+       pyaml
+       tabulate
+       tensorboard
+       torchmetrics
+       tqdm
+
+opendr=opendr-toolkit-engine
diff --git a/src/opendr/perception/object_detection_2d/nanodet/nanodet_learner.py b/src/opendr/perception/object_detection_2d/nanodet/nanodet_learner.py
new file mode 100644
index 0000000000000000000000000000000000000000..be505ee6e35c95103993f1f4838b13dd471c91a0
--- /dev/null
+++ b/src/opendr/perception/object_detection_2d/nanodet/nanodet_learner.py
@@ -0,0 +1,525 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import os
+import datetime
+import json
+from pathlib import Path
+
+import pytorch_lightning as pl
+import torch
+from pytorch_lightning.callbacks import ProgressBar
+
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util.check_point import save_model_state
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.model.arch import build_model
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.collate import naive_collate
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.data.dataset import build_dataset
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.trainer.task import TrainingTask
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.evaluator import build_evaluator
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.inferencer.utilities import Predictor
+from opendr.perception.object_detection_2d.nanodet.algorithm.nanodet.util import (
+    NanoDetLightningLogger,
+    Logger,
+    cfg,
+    load_config,
+    load_model_weight,
+    mkdir,
+)
+
+from opendr.engine.data import Image
+from opendr.engine.target import BoundingBox, BoundingBoxList
+from opendr.engine.constants import OPENDR_SERVER_URL
+
+from opendr.engine.learners import Learner
+from urllib.request import urlretrieve
+
+_MODEL_NAMES = {"EfficientNet_Lite0_320", "EfficientNet_Lite1_416", "EfficientNet_Lite2_512",
+                "RepVGG_A0_416", "t", "g", "m", "m_416", "m_0.5x", "m_1.5x", "m_1.5x_416",
+                "plus_m_320", "plus_m_1.5x_320", "plus_m_416", "plus_m_1.5x_416", "custom"}
+
+
+class NanodetLearner(Learner):
+    def __init__(self, model_to_use="plus_m_1.5x_416", iters=None, lr=None, batch_size=None, checkpoint_after_iter=None,
+                 checkpoint_load_iter=None, temp_path='', device='cuda', weight_decay=None, warmup_steps=None,
+                 warmup_ratio=None, lr_schedule_T_max=None, lr_schedule_eta_min=None, grad_clip=None):
+
+        """Initialise the Nanodet Learner"""
+
+        self.cfg = self._load_hparam(model_to_use)
+        self.lr_schedule_T_max = lr_schedule_T_max
+        self.lr_schedule_eta_min = lr_schedule_eta_min
+        self.warmup_steps = warmup_steps
+        self.warmup_ratio = warmup_ratio
+        self.grad_clip = grad_clip
+
+        self.overwrite_config(lr=lr, weight_decay=weight_decay, iters=iters, batch_size=batch_size,
+                              checkpoint_after_iter=checkpoint_after_iter, checkpoint_load_iter=checkpoint_load_iter,
+                              temp_path=temp_path)
+
+        self.lr = float(self.cfg.schedule.optimizer.lr)
+        self.weight_decay = float(self.cfg.schedule.optimizer.weight_decay)
+        self.iters = int(self.cfg.schedule.total_epochs)
+        self.batch_size = int(self.cfg.device.batchsize_per_gpu)
+        self.temp_path = self.cfg.save_dir
+        self.checkpoint_after_iter = int(self.cfg.schedule.val_intervals)
+        self.checkpoint_load_iter = int(self.cfg.schedule.resume)
+        self.device = device
+        self.classes = self.cfg.class_names
+
+        super(NanodetLearner, self).__init__(lr=self.lr, iters=self.iters, batch_size=self.batch_size,
+                                             checkpoint_after_iter=self.checkpoint_after_iter,
+                                             checkpoint_load_iter=self.checkpoint_load_iter,
+                                             temp_path=self.temp_path, device=self.device)
+
+        self.model = build_model(self.cfg.model)
+        self.logger = None
+        self.task = None
+
+    def _load_hparam(self, model: str):
+        """ Load hyperparameters for nanodet models and training configuration
+
+        :parameter model: The name of the model of which we want to load the config file
+        :type model: str
+        :return: config with hyperparameters
+        :rtype: dict
+        """
+        assert (
+                model in _MODEL_NAMES
+        ), f"Invalid model selected. Choose one of {_MODEL_NAMES}."
+        full_path = list()
+        path = Path(__file__).parent / "algorithm" / "config"
+        wanted_file = "nanodet_{}.yml".format(model)
+        for root, dir, files in os.walk(path):
+            if wanted_file in files:
+                full_path.append(os.path.join(root, wanted_file))
+        assert (len(full_path) == 1), f"You must have only one nanodet_{model}.yaml file in your config folder"
+        load_config(cfg, full_path[0])
+        return cfg
+
+    def overwrite_config(self, lr=0.001, weight_decay=0.05, iters=10, batch_size=64, checkpoint_after_iter=0,
+                         checkpoint_load_iter=0, temp_path=''):
+        """
+        Helping method for config file update to overwrite the cfg with arguments of OpenDR.
+        :param lr: learning rate used in training
+        :type lr: float, optional
+        :param weight_decay: weight_decay used in training
+        :type weight_decay: float, optional
+        :param iters: max epoches that the training will be run
+        :type iters: int, optional
+        :param batch_size: batch size of each gpu in use, if device is cpu batch size
+         will be used one single time for training
+        :type batch_size: int, optional
+        :param checkpoint_after_iter: after that number of epoches, evaluation will be
+         performed and one checkpoint will be saved
+        :type checkpoint_after_iter: int, optional
+        :param checkpoint_load_iter: the epoch in which checkpoint we want to load
+        :type checkpoint_load_iter: int, optional
+        :param temp_path: path to a temporal dictionary for saving models, logs and tensorboard graphs.
+         If temp_path='' the `cfg.save_dir` will be used instead.
+        :type temp_path: str, optional
+        """
+        self.cfg.defrost()
+
+        # Nanodet specific parameters
+        if self.cfg.model.arch.head.num_classes != len(self.cfg.class_names):
+            raise ValueError(
+                "cfg.model.arch.head.num_classes must equal len(cfg.class_names), "
+                "but got {} and {}".format(
+                    self.cfg.model.arch.head.num_classes, len(self.cfg.class_names)
+                )
+            )
+        if self.warmup_steps is not None:
+            self.cfg.schedule.warmup.warmup_steps = self.warmup_steps
+        if self.warmup_ratio is not None:
+            self.cfg.schedule.warmup.warmup_ratio = self.warmup_ratio
+        if self.lr_schedule_T_max is not None:
+            self.cfg.schedule.lr_schedule.T_max = self.lr_schedule_T_max
+        if self.lr_schedule_eta_min is not None:
+            self.cfg.schedule.lr_schedule.eta_min = self.lr_schedule_eta_min
+        if self.grad_clip is not None:
+            self.cfg.grad_clip = self.grad_clip
+
+        # OpenDR
+        if lr is not None:
+            self.cfg.schedule.optimizer.lr = lr
+        if weight_decay is not None:
+            self.cfg.schedule.optimizer.weight_decay = weight_decay
+        if iters is not None:
+            self.cfg.schedule.total_epochs = iters
+        if batch_size is not None:
+            self.cfg.device.batchsize_per_gpu = batch_size
+        if checkpoint_after_iter is not None:
+            self.cfg.schedule.val_intervals = checkpoint_after_iter
+        if checkpoint_load_iter is not None:
+            self.cfg.schedule.resume = checkpoint_load_iter
+        if temp_path != '':
+            self.cfg.save_dir = temp_path
+
+        self.cfg.freeze()
+
+    def save(self, path=None, verbose=True):
+        """
+        Method for saving the current model and metadata in the path provided.
+        :param path: path to folder where model will be saved
+        :type path: str, optional
+        :param verbose: whether to print a success message or not, defaults to False
+        :type verbose: bool, optional
+        """
+        path = path if path is not None else self.cfg.save_dir
+        model = self.cfg.check_point_name
+        os.makedirs(path, exist_ok=True)
+
+        metadata = {"model_paths": [], "framework": "pytorch", "format": "pth",
+                    "has_data": False, "inference_params": {}, "optimized": False,
+                    "optimizer_info": {}, "classes": self.classes}
+
+        param_filepath = "nanodet_{}.pth".format(model)
+        metadata["model_paths"].append(param_filepath)
+
+        logger = self.logger if verbose else None
+        if self.task is None:
+            print("You do not have call a task yet, only the state of the loaded or initialized model will be saved")
+            save_model_state(os.path.join(path, metadata["model_paths"][0]), self.model, None, logger)
+        else:
+            self.task.save_current_model(os.path.join(path, metadata["model_paths"][0]), logger)
+
+        with open(os.path.join(path, "nanodet_{}.json".format(model)), 'w', encoding='utf-8') as f:
+            json.dump(metadata, f, ensure_ascii=False, indent=4)
+        if verbose:
+            print("Model metadata saved.")
+        return True
+
+    def load(self, path=None, verbose=True):
+        """
+        Loads the model from the path provided.
+        :param path: path of the directory where the model was saved
+        :type path: str, optional
+        :param verbose: whether to print a success message or not, defaults to False
+        :type verbose: bool, optional
+        """
+        path = path if path is not None else self.cfg.save_dir
+        model = self.cfg.check_point_name
+        if verbose:
+            print("Model name:", model, "-->", os.path.join(path, model + ".json"))
+        with open(os.path.join(path, "nanodet_{}.json".format(model))) as f:
+            metadata = json.load(f)
+
+        logger = Logger(-1, path, False) if verbose else None
+        ckpt = torch.load(os.path.join(path, metadata["model_paths"][0]), map_location=torch.device(self.device))
+        self.model = load_model_weight(self.model, ckpt, logger)
+        if verbose:
+            logger.log("Loaded model weight from {}".format(path))
+        pass
+
+    def download(self, path=None, mode="pretrained", verbose=False,
+                 url=OPENDR_SERVER_URL + "/perception/object_detection_2d/nanodet/"):
+
+        """
+        Downloads all files necessary for inference, evaluation and training. Valid mode options are: ["pretrained",
+        "images", "test_data"].
+        :param path: folder to which files will be downloaded, if None self.temp_path will be used
+        :type path: str, optional
+        :param mode: one of: ["pretrained", "images", "test_data"], where "pretrained" downloads a pretrained
+        network depending on the network choosed in config file, "images" downloads example inference data,
+        and "test_data" downloads additional image,annotation file and pretrained network for training and testing
+        :type mode: str, optional
+        :param model: the specific name of the model to download, all pre-configured configs files have their pretrained
+        model and can be selected, if None self.cfg.check_point_name will be used
+        :param verbose: if True, additional information is printed on stdout
+        :type verbose: bool, optional
+        :param url: URL to file location on FTP server
+        :type url: str, optional
+        """
+
+        valid_modes = ["pretrained", "images", "test_data"]
+        if mode not in valid_modes:
+            raise UserWarning("mode parameter not valid:", mode, ", file should be one of:", valid_modes)
+
+        if path is None:
+            path = self.temp_path
+        if not os.path.exists(path):
+            os.makedirs(path)
+
+        if mode == "pretrained":
+
+            model = self.cfg.check_point_name
+
+            path = os.path.join(path, "nanodet_{}".format(model))
+            if not os.path.exists(path):
+                os.makedirs(path)
+
+            if verbose:
+                print("Downloading pretrained checkpoint...")
+
+            file_url = os.path.join(url, "pretrained",
+                                    "nanodet_{}".format(model),
+                                    "nanodet_{}.ckpt".format(model))
+
+            urlretrieve(file_url, os.path.join(path, "nanodet_{}.ckpt".format(model)))
+
+            if verbose:
+                print("Downloading pretrain weights if provided...")
+
+            file_url = os.path.join(url, "pretrained", "nanodet_{}".format(model),
+                                    "nanodet_{}.pth".format(model))
+            try:
+                urlretrieve(file_url, os.path.join(path, "nanodet_{}.pth".format(model)))
+
+                if verbose:
+                    print("Making metadata...")
+                metadata = {"model_paths": [], "framework": "pytorch", "format": "pth",
+                            "has_data": False, "inference_params": {}, "optimized": False,
+                            "optimizer_info": {}, "classes": self.classes}
+
+                param_filepath = "nanodet_{}.pth".format(model)
+                metadata["model_paths"].append(param_filepath)
+                with open(os.path.join(path, "nanodet_{}.json".format(model)), 'w', encoding='utf-8') as f:
+                    json.dump(metadata, f, ensure_ascii=False, indent=4)
+
+            except:
+                print("Pretrain weights for this model are not provided!!! \n"
+                      "Only the hole ckeckpoint will be download")
+
+                if verbose:
+                    print("Making metadata...")
+                metadata = {"model_paths": [], "framework": "pytorch", "format": "pth",
+                            "has_data": False, "inference_params": {}, "optimized": False,
+                            "optimizer_info": {}, "classes": self.classes}
+
+                param_filepath = "nanodet_{}.ckpt".format(model)
+                metadata["model_paths"].append(param_filepath)
+                with open(os.path.join(path, "nanodet_{}.json".format(model)), 'w', encoding='utf-8') as f:
+                    json.dump(metadata, f, ensure_ascii=False, indent=4)
+
+        elif mode == "images":
+            file_url = os.path.join(url, "images", "000000000036.jpg")
+            if verbose:
+                print("Downloading example image...")
+            urlretrieve(file_url, os.path.join(path, "000000000036.jpg"))
+
+        elif mode == "test_data":
+            os.makedirs(os.path.join(path, "test_data"), exist_ok=True)
+            os.makedirs(os.path.join(path, "test_data", "train"), exist_ok=True)
+            os.makedirs(os.path.join(path, "test_data", "val"), exist_ok=True)
+            os.makedirs(os.path.join(path, "test_data", "train", "JPEGImages"), exist_ok=True)
+            os.makedirs(os.path.join(path, "test_data", "train", "Annotations"), exist_ok=True)
+            os.makedirs(os.path.join(path, "test_data", "val", "JPEGImages"), exist_ok=True)
+            os.makedirs(os.path.join(path, "test_data", "val", "Annotations"), exist_ok=True)
+            # download image
+            file_url = os.path.join(url, "images", "000000000036.jpg")
+            if verbose:
+                print("Downloading image...")
+            urlretrieve(file_url, os.path.join(path, "test_data", "train", "JPEGImages", "000000000036.jpg"))
+            urlretrieve(file_url, os.path.join(path, "test_data", "val", "JPEGImages", "000000000036.jpg"))
+            # download annotations
+            file_url = os.path.join(url, "annotations", "000000000036.xml")
+            if verbose:
+                print("Downloading annotations...")
+            urlretrieve(file_url, os.path.join(path, "test_data", "train", "Annotations", "000000000036.xml"))
+            urlretrieve(file_url, os.path.join(path, "test_data", "val", "Annotations", "000000000036.xml"))
+
+    def reset(self):
+        """This method is not used in this implementation."""
+        return NotImplementedError
+
+    def optimize(self):
+        """This method is not used in this implementation."""
+        return NotImplementedError
+
+    def fit(self, dataset, val_dataset=None, logging_path='', verbose=True, seed=123):
+        """
+        This method is used to train the detector on the COCO dataset. Validation is performed in a val_dataset if
+        provided, else validation is performed in training dataset.
+        :param dataset: training dataset; COCO and Pascal VOC are supported as ExternalDataset types,
+        with 'coco' or 'voc' dataset_type attributes. custom DetectionDataset types are not supported at the moment.
+        Any xml type dataset can be use if voc is used in datatype.
+        :type dataset: ExternalDataset, DetectionDataset not implemented yet
+        :param val_dataset: validation dataset object
+        :type val_dataset: ExternalDataset, DetectionDataset not implemented yet
+        :param logging_path: subdirectory in temp_path to save logger outputs
+        :type logging_path: str, optional
+        :param verbose: if set to True, additional information is printed to STDOUT and logger txt output,
+        defaults to True
+        :type verbose: bool
+        :param seed: seed for reproducibility
+        :type seed: int
+        """
+
+        mkdir(self.cfg.save_dir)
+
+        if verbose:
+            self.logger = NanoDetLightningLogger(self.temp_path + "/" + logging_path)
+            self.logger.dump_cfg(self.cfg)
+
+        if seed != '' or seed is not None:
+            if verbose:
+                self.logger.info("Set random seed to {}".format(seed))
+            pl.seed_everything(seed)
+
+        if verbose:
+            self.logger.info("Setting up data...")
+
+        train_dataset = build_dataset(self.cfg.data.val, dataset, self.cfg.class_names, "train")
+        val_dataset = train_dataset if val_dataset is None else \
+            build_dataset(self.cfg.data.val, val_dataset, self.cfg.class_names, "val")
+
+        evaluator = build_evaluator(self.cfg.evaluator, val_dataset)
+
+        train_dataloader = torch.utils.data.DataLoader(
+            train_dataset,
+            batch_size=self.batch_size,
+            shuffle=True,
+            num_workers=self.cfg.device.workers_per_gpu,
+            pin_memory=True,
+            collate_fn=naive_collate,
+            drop_last=True,
+        )
+        val_dataloader = torch.utils.data.DataLoader(
+            val_dataset,
+            batch_size=self.batch_size,
+            shuffle=False,
+            num_workers=self.cfg.device.workers_per_gpu,
+            pin_memory=True,
+            collate_fn=naive_collate,
+            drop_last=False,
+        )
+
+        # Load state dictionary
+        model_resume_path = (
+            os.path.join(self.temp_path, "checkpoints", "model_iter_{}.ckpt".format(self.checkpoint_load_iter))
+            if self.checkpoint_load_iter > 0 else None
+        )
+
+        if verbose:
+            self.logger.info("Creating task...")
+        self.task = TrainingTask(self.cfg, self.model, evaluator)
+
+        if self.device == "cpu":
+            gpu_ids = None
+            accelerator = None
+        elif self.device == "cuda":
+            gpu_ids = self.cfg.device.gpu_ids
+            accelerator = None if len(gpu_ids) <= 1 else "ddp"
+
+        trainer = pl.Trainer(
+            default_root_dir=self.temp_path,
+            max_epochs=self.iters,
+            gpus=gpu_ids,
+            check_val_every_n_epoch=self.checkpoint_after_iter,
+            accelerator=accelerator,
+            log_every_n_steps=self.cfg.log.interval,
+            num_sanity_val_steps=0,
+            resume_from_checkpoint=model_resume_path,
+            callbacks=[ProgressBar(refresh_rate=0)],  # disable tqdm bar
+            logger=self.logger,
+            benchmark=True,
+            gradient_clip_val=self.cfg.get("grad_clip", 0.0),
+        )
+
+        trainer.fit(self.task, train_dataloader, val_dataloader)
+
+    def eval(self, dataset, verbose=True):
+        """
+        This method performs evaluation on a given dataset and returns a dictionary with the evaluation results.
+        :param dataset: dataset object, to perform evaluation on
+        :type dataset: ExternalDataset, DetectionDataset not implemented yet
+        :param verbose: if set to True, additional information is printed to STDOUT and logger txt output,
+        defaults to True
+        :type verbose: bool
+        """
+
+        timestr = datetime.datetime.now().__format__("%Y_%m_%d_%H:%M:%S")
+        save_dir = os.path.join(self.cfg.save_dir, timestr)
+        mkdir(save_dir)
+
+        if verbose:
+            self.logger = NanoDetLightningLogger(save_dir)
+
+        self.cfg.update({"test_mode": "val"})
+
+        if verbose:
+            self.logger.info("Setting up data...")
+
+        val_dataset = build_dataset(self.cfg.data.val, dataset, self.cfg.class_names, "val")
+
+        val_dataloader = torch.utils.data.DataLoader(
+            val_dataset,
+            batch_size=self.batch_size,
+            shuffle=False,
+            num_workers=self.cfg.device.workers_per_gpu,
+            pin_memory=True,
+            collate_fn=naive_collate,
+            drop_last=False,
+        )
+        evaluator = build_evaluator(self.cfg.evaluator, val_dataset)
+
+        if verbose:
+            self.logger.info("Creating task...")
+        self.task = TrainingTask(self.cfg, self.model, evaluator)
+
+        if self.device == "cpu":
+            gpu_ids = None
+            accelerator = None
+        elif self.device == "cuda":
+            gpu_ids = self.cfg.device.gpu_ids
+            accelerator = None if len(gpu_ids) <= 1 else "ddp"
+
+        trainer = pl.Trainer(
+            default_root_dir=save_dir,
+            gpus=gpu_ids,
+            accelerator=accelerator,
+            log_every_n_steps=self.cfg.log.interval,
+            num_sanity_val_steps=0,
+            logger=self.logger,
+        )
+        if verbose:
+            self.logger.info("Starting testing...")
+        return trainer.test(self.task, val_dataloader, verbose=verbose)
+
+    def infer(self, input, threshold=0.35, verbose=True):
+        """
+        Performs inference
+        :param input: input can be an Image type image to perform inference
+        :type input: str, optional
+        :param threshold: confidence threshold
+        :type threshold: float, optional
+        :param verbose: if set to True, additional information is printed to STDOUT and logger txt output,
+        defaults to True
+        :type verbose: bool
+        :return: list of bounding boxes of last image of input or last frame of the video
+        :rtype: BoundingBoxList
+        """
+
+        if verbose:
+            self.logger = Logger(0, use_tensorboard=False)
+        predictor = Predictor(self.cfg, self.model, device=self.device)
+        if not isinstance(input, Image):
+            input = Image(input)
+        _input = input.opencv()
+        meta, res = predictor.inference(_input, verbose)
+
+        bounding_boxes = BoundingBoxList([])
+        for label in res[0]:
+            for box in res[0][label]:
+                score = box[-1]
+                if score > threshold:
+                    bbox = BoundingBox(left=box[0], top=box[1],
+                                       width=box[2] - box[0],
+                                       height=box[3] - box[1],
+                                       name=label,
+                                       score=score)
+                    bounding_boxes.data.append(bbox)
+        bounding_boxes.data.sort(key=lambda v: v.confidence)
+
+        return bounding_boxes
diff --git a/src/opendr/perception/object_detection_2d/utils/eval_utils.py b/src/opendr/perception/object_detection_2d/utils/eval_utils.py
index ef4674ce3c563ce2d8d77ffbe3407c3cb81c5ca1..772a655b7733121f7f5889289008d60db02d7fd2 100644
--- a/src/opendr/perception/object_detection_2d/utils/eval_utils.py
+++ b/src/opendr/perception/object_detection_2d/utils/eval_utils.py
@@ -267,7 +267,8 @@ class DetectionDatasetCOCOEval(DetectionEvalMetric):
                 if score < self.score_threshold:
                     continue
                 img_dets.append(np.asarray([image['id'], box[0], box[1], box[2] - box[0], box[3] - box[1], score, cls]))
-            self.detections.append(np.asarray(img_dets))
+            if img_dets:
+                self.detections.append(np.asarray(img_dets))
 
             for box_idx, box in enumerate(gt_boxes[idx, :, :]):
                 cls = gt_labels[idx, box_idx]
diff --git a/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py b/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py
index 3cb4303fe1715d77024d0cf2c6fc59cbd9d655ab..008192e8f29c9c1514b2e6c4d1ba52d8e23e7cb3 100644
--- a/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py
+++ b/src/opendr/perception/object_detection_2d/yolov3/yolov3_learner.py
@@ -572,21 +572,35 @@ class YOLOv3DetectorLearner(Learner):
                                     "yolo_voc.json")
             if verbose:
                 print("Downloading metadata...")
-            urlretrieve(file_url, os.path.join(path, "yolo_default.json"))
+            if not os.path.exists(os.path.join(path, "yolo_default.json")):
+                urlretrieve(file_url, os.path.join(path, "yolo_default.json"))
+                if verbose:
+                    print("Downloaded metadata json.")
+            elif verbose:
+                print("Metadata json file already exists.")
 
             if verbose:
                 print("Downloading params...")
             file_url = os.path.join(url, "pretrained", "yolo_voc",
                                          "yolo_voc.params")
 
-            urlretrieve(file_url,
-                        os.path.join(path, "yolo_voc.params"))
+            if not os.path.exists(os.path.join(path, "yolo_voc.params")):
+                urlretrieve(file_url, os.path.join(path, "yolo_voc.params"))
+                if verbose:
+                    print("Downloaded params.")
+            elif verbose:
+                print("Params file already exists.")
 
         elif mode == "images":
             file_url = os.path.join(url, "images", "cat.jpg")
             if verbose:
                 print("Downloading example image...")
-            urlretrieve(file_url, os.path.join(path, "cat.jpg"))
+            if not os.path.exists(os.path.join(path, "cat.jpg")):
+                urlretrieve(file_url, os.path.join(path, "cat.jpg"))
+                if verbose:
+                    print("Downloaded example image.")
+            elif verbose:
+                print("Example image already exists.")
 
         elif mode == "test_data":
             os.makedirs(os.path.join(path, "test_data"), exist_ok=True)
@@ -596,17 +610,32 @@ class YOLOv3DetectorLearner(Learner):
             file_url = os.path.join(url, "test_data", "train.txt")
             if verbose:
                 print("Downloading filelist...")
-            urlretrieve(file_url, os.path.join(path, "test_data", "train.txt"))
+            if not os.path.exists(os.path.join(path, "test_data", "train.txt")):
+                urlretrieve(file_url, os.path.join(path, "test_data", "train.txt"))
+                if verbose:
+                    print("Downloaded filelist.")
+            elif verbose:
+                print("Filelist already exists.")
             # download image
             file_url = os.path.join(url, "test_data", "Images", "000040.jpg")
             if verbose:
                 print("Downloading image...")
-            urlretrieve(file_url, os.path.join(path, "test_data", "Images", "000040.jpg"))
+            if not os.path.exists(os.path.join(path, "test_data", "Images", "000040.jpg")):
+                urlretrieve(file_url, os.path.join(path, "test_data", "Images", "000040.jpg"))
+                if verbose:
+                    print("Downloaded image.")
+            elif verbose:
+                print("Image already exists.")
             # download annotations
             file_url = os.path.join(url, "test_data", "Annotations", "000040.jpg.txt")
             if verbose:
                 print("Downloading annotations...")
-            urlretrieve(file_url, os.path.join(path, "test_data", "Annotations", "000040.jpg.txt"))
+            if not os.path.exists(os.path.join(path, "test_data", "Annotations", "000040.jpg.txt")):
+                urlretrieve(file_url, os.path.join(path, "test_data", "Annotations", "000040.jpg.txt"))
+                if verbose:
+                    print("Downloaded annotations.")
+            elif verbose:
+                print("Annotations already exist.")
 
     def optimize(self, target_device):
         """This method is not used in this implementation."""
diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini
index ac7db7fa35ba6f2b030d2e33569de930f7d8c8e5..0e7589f1d68d519166684e46dd0d8b5a37d762b7 100644
--- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini
+++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/dependencies.ini
@@ -1,6 +1,7 @@
 [runtime]
 # 'python' key expects a value using the Python requirements file format
 #  https://pip.pypa.io/en/stable/reference/pip_install/#requirements-file-format 
+
 python=torch==1.9.0
        torchvision==0.10.0
        tensorboardX>=2.0
@@ -14,7 +15,7 @@ python=torch==1.9.0
        llvmlite>=0.31.0
        numba>=0.53.0
        pyyaml>=5.3
-       scikit-image>=0.16.2
+       scikit-image>0.16.2
        easydict>=1.9
 linux=libboost-dev
 
diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/pointpillars/car/test_short.proto b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/pointpillars/car/test_short.proto
index 8247b9de53e7c558bce04980822dc6f92700c767..07c13e216c5fd2f7160bde2f32b5d9b106285fdd 100644
--- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/pointpillars/car/test_short.proto
+++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/pointpillars/car/test_short.proto
@@ -8,7 +8,7 @@ model: {
     num_class: 1
     voxel_feature_extractor: {
       module_class_name: "PillarFeatureNet"
-      num_filters: [64]
+      num_filters: [4]
       with_distance: false
     }
     middle_feature_extractor: {
@@ -16,11 +16,11 @@ model: {
     }
     rpn: {
       module_class_name: "RPN"
-      layer_nums: [3, 5, 5]
+      layer_nums: [1, 1, 1]
       layer_strides: [2, 2, 2]
-      num_filters: [64, 128, 256]
+      num_filters: [4, 4, 4]
       upsample_strides: [1, 2, 4]
-      num_upsample_filters: [128, 128, 128]
+      num_upsample_filters: [4, 4, 4]
       use_groupnorm: false
       num_groups: 32
     }
diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/tanet/car/test_short.proto b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/tanet/car/test_short.proto
index 0e4179656c068e17ce0dc542960f0efcb5a6ed5e..f1700588cc7fed9018c72033619215df84f2ac69 100644
--- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/tanet/car/test_short.proto
+++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/second_detector/configs/tanet/car/test_short.proto
@@ -16,7 +16,7 @@ model: {
     }
     rpn: {
       module_class_name: "PSA"
-      layer_nums: [3, 5, 5]
+      layer_nums: [1, 1, 1]
       layer_strides: [2, 2, 2]
       num_filters: [64, 128, 256]
       upsample_strides: [1, 2, 4]
diff --git a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/voxel_object_detection_3d_learner.py b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/voxel_object_detection_3d_learner.py
index a1ea23a39f0dce50134cbe084eabee6c845060cc..85c5a401d3853c920377df0ae28772f425d9cb6a 100644
--- a/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/voxel_object_detection_3d_learner.py
+++ b/src/opendr/perception/object_detection_3d/voxel_object_detection_3d/voxel_object_detection_3d_learner.py
@@ -20,27 +20,37 @@ import shutil
 import pathlib
 import onnxruntime as ort
 from opendr.engine.learners import Learner
-from opendr.engine.datasets import DatasetIterator, ExternalDataset, MappedDatasetIterator
+from opendr.engine.datasets import (
+    DatasetIterator,
+    ExternalDataset,
+    MappedDatasetIterator,
+)
 from opendr.engine.data import PointCloud
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.load import (
     create_model as second_create_model,
     load_from_checkpoint,
 )
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.run import (
-    compute_lidar_kitti_output, evaluate, example_convert_to_torch, train
+    compute_lidar_kitti_output,
+    evaluate,
+    example_convert_to_torch,
+    train,
 )
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.pytorch.builder import (
-    input_reader_builder, )
+    input_reader_builder,
+)
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.logger import (
-    Logger, )
+    Logger,
+)
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.pytorch.models.tanet import (
-    set_tanet_config
+    set_tanet_config,
 )
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.data.preprocess import (
-    _prep_v9, _prep_v9_infer
+    _prep_v9,
+    _prep_v9_infer,
 )
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.builder.dataset_builder import (
-    create_prep_func
+    create_prep_func,
 )
 from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.data.preprocess import (
     merge_second_batch,
@@ -81,14 +91,12 @@ class VoxelObjectDetection3DLearner(Learner):
         threshold=0.0,
         scale=1.0,
         tanet_config_path=None,
-        optimizer_params={
-            "weight_decay": 0.0001,
-        },
+        optimizer_params={"weight_decay": 0.0001},
         lr_schedule_params={
             "decay_steps": 27840,
             "decay_factor": 0.8,
             "staircase": True,
-        }
+        },
     ):
         # Pass the shared parameters on super's constructor so they can get initialized as class attributes
         super(VoxelObjectDetection3DLearner, self).__init__(
@@ -140,73 +148,109 @@ class VoxelObjectDetection3DLearner(Learner):
         if self.model is None:
             raise UserWarning("No model is loaded, cannot save.")
 
-        folder_name, _, tail = self.__extract_trailing(path)  # Extract trailing folder name from path
+        folder_name, _, tail = self.__extract_trailing(
+            path
+        )  # Extract trailing folder name from path
         # Also extract folder name without any extension if extension is erroneously provided
-        folder_name_no_ext = folder_name.split(sep='.')[0]
+        folder_name_no_ext = folder_name.split(sep=".")[0]
 
         # Extract path without folder name, by removing folder name from original path
-        path_no_folder_name = ''.join(path.rsplit(folder_name, 1))
+        path_no_folder_name = "".join(path.rsplit(folder_name, 1))
         # If tail is '', then path was a/b/c/, which leaves a trailing double '/'
-        if tail == '':
+        if tail == "":
             path_no_folder_name = path_no_folder_name[0:-1]  # Remove one '/'
 
         # Create model directory
         new_path = path_no_folder_name + folder_name_no_ext
         os.makedirs(new_path, exist_ok=True)
 
-        model_metadata = {"model_paths": [], "framework": "pytorch", "format": "", "has_data": False,
-                          "inference_params": {}, "optimized": None, "optimizer_info": {}}
+        model_metadata = {
+            "model_paths": [],
+            "framework": "pytorch",
+            "format": "",
+            "has_data": False,
+            "inference_params": {},
+            "optimized": None,
+            "optimizer_info": {},
+        }
 
         if self.model.rpn_ort_session is None:
             model_metadata["model_paths"] = [
                 folder_name_no_ext + "_vfe.pth",
                 folder_name_no_ext + "_mfe.pth",
-                folder_name_no_ext + "_rpn.pth"
+                folder_name_no_ext + "_rpn.pth",
             ]
             model_metadata["optimized"] = False
             model_metadata["format"] = "pth"
 
-            torch.save({
-                'state_dict': self.model.voxel_feature_extractor.state_dict()
-            }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
-            torch.save({
-                'state_dict': self.model.middle_feature_extractor.state_dict()
-            }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][1]))
-            torch.save({
-                'state_dict': self.model.rpn.state_dict()
-            }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][2]))
+            torch.save(
+                {"state_dict": self.model.voxel_feature_extractor.state_dict()},
+                os.path.join(
+                    path_no_folder_name,
+                    folder_name_no_ext,
+                    model_metadata["model_paths"][0],
+                ),
+            )
+            torch.save(
+                {"state_dict": self.model.middle_feature_extractor.state_dict()},
+                os.path.join(
+                    path_no_folder_name,
+                    folder_name_no_ext,
+                    model_metadata["model_paths"][1],
+                ),
+            )
+            torch.save(
+                {"state_dict": self.model.rpn.state_dict()},
+                os.path.join(
+                    path_no_folder_name,
+                    folder_name_no_ext,
+                    model_metadata["model_paths"][2],
+                ),
+            )
             if verbose:
                 print("Saved Pytorch VFE, MFE and RPN sub-models.")
         else:
             model_metadata["model_paths"] = [
                 folder_name_no_ext + "_vfe.pth",
                 folder_name_no_ext + "_mfe.pth",
-                folder_name_no_ext + "_rpn.onnx"
+                folder_name_no_ext + "_rpn.onnx",
             ]
             model_metadata["optimized"] = True
             model_metadata["format"] = "onnx"
 
-            torch.save({
-                'state_dict': self.model.voxel_feature_extractor.state_dict()
-            }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][0]))
-            torch.save({
-                'state_dict': self.model.middle_feature_extractor.state_dict()
-            }, os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][1]))
+            torch.save(
+                {"state_dict": self.model.voxel_feature_extractor.state_dict()},
+                os.path.join(
+                    path_no_folder_name,
+                    folder_name_no_ext,
+                    model_metadata["model_paths"][0],
+                ),
+            )
+            torch.save(
+                {"state_dict": self.model.middle_feature_extractor.state_dict()},
+                os.path.join(
+                    path_no_folder_name,
+                    folder_name_no_ext,
+                    model_metadata["model_paths"][1],
+                ),
+            )
             # Copy already optimized model from temp path
             shutil.copy2(
                 os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"),
-                os.path.join(path_no_folder_name, folder_name_no_ext, model_metadata["model_paths"][2])
+                os.path.join(
+                    path_no_folder_name,
+                    folder_name_no_ext,
+                    model_metadata["model_paths"][2],
+                ),
             )
             if verbose:
                 print("Saved Pytorch VFE, MFE and ONNX RPN sub-models.")
 
-        with open(os.path.join(new_path, folder_name_no_ext + ".json"), 'w') as outfile:
+        with open(os.path.join(new_path, folder_name_no_ext + ".json"), "w") as outfile:
             json.dump(model_metadata, outfile)
 
     def load(
-        self,
-        path,
-        verbose=False,
+        self, path, verbose=False,
     ):
         """
         Loads the model from inside the path provided, based on the metadata .json file included.
@@ -216,17 +260,24 @@ class VoxelObjectDetection3DLearner(Learner):
         :type verbose: bool, optional
         """
 
-        model_name, _, _ = self.__extract_trailing(path)  # Trailing folder name from the path provided
+        model_name, _, _ = self.__extract_trailing(
+            path
+        )  # Trailing folder name from the path provided
 
         with open(os.path.join(path, model_name + ".json")) as metadata_file:
             metadata = json.load(metadata_file)
 
         if len(metadata["model_paths"]) == 1:
-            self.__load_from_pth(self.model, os.path.join(path, metadata["model_paths"][0]), True)
+            self.__load_from_pth(
+                self.model, os.path.join(path, metadata["model_paths"][0]), True
+            )
             if verbose:
                 print("Loaded Pytorch model.")
         else:
-            self.__load_from_pth(self.model.voxel_feature_extractor, os.path.join(path, metadata["model_paths"][0]))
+            self.__load_from_pth(
+                self.model.voxel_feature_extractor,
+                os.path.join(path, metadata["model_paths"][0]),
+            )
             self.__load_from_pth(self.model.middle_feature_extractor, os.path.join(path, metadata["model_paths"][1]))
             if verbose:
                 print("Loaded Pytorch VFE and MFE sub-model.")
@@ -267,8 +318,7 @@ class VoxelObjectDetection3DLearner(Learner):
             self.model_dir = model_dir
 
         if self.model_dir is None and (
-            self.checkpoint_load_iter != 0 or
-            self.checkpoint_after_iter != 0
+            self.checkpoint_load_iter != 0 or self.checkpoint_after_iter != 0
         ):
             raise ValueError(
                 "Can not use checkpoint_load_iter or checkpoint_after_iter if model_dir is None and load was not called before"
@@ -295,9 +345,12 @@ class VoxelObjectDetection3DLearner(Learner):
 
         if self.checkpoint_load_iter != 0:
             self.lr_scheduler = load_from_checkpoint(
-                self.model, self.mixed_optimizer,
+                self.model,
+                self.mixed_optimizer,
                 checkpoints_path / f"checkpoint_{self.checkpoint_load_iter}.pth",
-                self.lr_schedule, self.lr_schedule_params, self.device
+                self.lr_schedule,
+                self.lr_schedule_params,
+                self.device,
             )
 
         train(
@@ -343,11 +396,7 @@ class VoxelObjectDetection3DLearner(Learner):
 
         logger = Logger(silent, verbose, logging_path)
 
-        (
-            _,
-            eval_dataset_iterator,
-            ground_truth_annotations,
-        ) = self.__prepare_datasets(
+        (_, eval_dataset_iterator, ground_truth_annotations,) = self.__prepare_datasets(
             None,
             dataset,
             self.input_config,
@@ -418,20 +467,16 @@ class VoxelObjectDetection3DLearner(Learner):
                 "point_clouds should be a PointCloud or a list of PointCloud"
             )
 
-        output = self.model(example_convert_to_torch(
-            input_data,
-            self.float_dtype,
-            device=self.device,
-        ))
+        output = self.model(
+            example_convert_to_torch(input_data, self.float_dtype, device=self.device,)
+        )
 
-        if (
-            self.model_config.rpn.module_class_name == "PSA" or
-            self.model_config.rpn.module_class_name == "RefineDet"
-        ):
+        if self.model_config.rpn.module_class_name == "PSA" or self.model_config.rpn.module_class_name == "RefineDet":
             output = output[-1]
 
         annotations = compute_lidar_kitti_output(
-            output, self.center_limit_range, self.class_names, None)
+            output, self.center_limit_range, self.class_names, None
+        )
 
         result = [BoundingBox3DList.from_kitti(anno) for anno in annotations]
 
@@ -448,7 +493,9 @@ class VoxelObjectDetection3DLearner(Learner):
         :type do_constant_folding: bool, optional
         """
         if self.model is None:
-            raise UserWarning("No model is loaded, cannot optimize. Load or train a model first.")
+            raise UserWarning(
+                "No model is loaded, cannot optimize. Load or train a model first."
+            )
         if self.model.rpn_ort_session is not None:
             raise UserWarning("Model is already optimized in ONNX.")
 
@@ -463,18 +510,24 @@ class VoxelObjectDetection3DLearner(Learner):
 
         try:
             self.__convert_rpn_to_onnx(
-                input_shape, has_refine,
-                os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"), do_constant_folding
+                input_shape,
+                has_refine,
+                os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"),
+                do_constant_folding,
             )
         except FileNotFoundError:
             # Create temp directory
             os.makedirs(self.temp_path, exist_ok=True)
             self.__convert_rpn_to_onnx(
-                input_shape, has_refine,
-                os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"), do_constant_folding
+                input_shape,
+                has_refine,
+                os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"),
+                do_constant_folding,
             )
 
-        self.__load_rpn_from_onnx(os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx"))
+        self.__load_rpn_from_onnx(
+            os.path.join(self.temp_path, "onnx_model_rpn_temp.onnx")
+        )
 
     @staticmethod
     def download(model_name, path, server_url=None):
@@ -491,46 +544,48 @@ class VoxelObjectDetection3DLearner(Learner):
 
         if server_url is None:
             server_url = os.path.join(
-                OPENDR_SERVER_URL, "perception", "object_detection_3d",
-                "voxel_object_detection_3d"
+                OPENDR_SERVER_URL,
+                "perception",
+                "object_detection_3d",
+                "voxel_object_detection_3d",
             )
 
-        url = os.path.join(
-            server_url, model_name
-        )
+        url = os.path.join(server_url, model_name)
 
         model_dir = os.path.join(path, model_name)
         os.makedirs(model_dir, exist_ok=True)
 
-        urlretrieve(os.path.join(
-            url, model_name + ".json"
-        ), os.path.join(
-            model_dir, model_name + ".json"
-        ))
+        urlretrieve(
+            os.path.join(url, model_name + ".json"),
+            os.path.join(model_dir, model_name + ".json"),
+        )
 
         try:
-            urlretrieve(os.path.join(
-                url, model_name + ".pth"
-            ), os.path.join(
-                model_dir, model_name + ".pth"
-            ))
+            urlretrieve(
+                os.path.join(url, model_name + ".pth"),
+                os.path.join(model_dir, model_name + ".pth"),
+            )
         except URLError:
-            urlretrieve(os.path.join(
-                url, model_name + ".tckpt"
-            ), os.path.join(
-                model_dir, model_name + ".pth"
-            ))
+            urlretrieve(
+                os.path.join(url, model_name + ".tckpt"),
+                os.path.join(model_dir, model_name + ".pth"),
+            )
 
         print("Downloaded model", model_name, "to", model_dir)
 
         return model_dir
 
-    def __convert_rpn_to_onnx(self, input_shape, has_refine, output_name, do_constant_folding=False, verbose=False):
+    def __convert_rpn_to_onnx(
+        self,
+        input_shape,
+        has_refine,
+        output_name,
+        do_constant_folding=False,
+        verbose=False,
+    ):
         inp = torch.randn(input_shape).to(self.device)
         input_names = ["data"]
-        output_names = [
-            "box_preds", "cls_preds", "dir_cls_preds"
-        ]
+        output_names = ["box_preds", "cls_preds", "dir_cls_preds"]
 
         if has_refine:
             output_names.append("Refine_loc_preds")
@@ -538,8 +593,14 @@ class VoxelObjectDetection3DLearner(Learner):
             output_names.append("Refine_dir_preds")
 
         torch.onnx.export(
-            self.model.rpn, inp, output_name, verbose=verbose, enable_onnx_checker=True,
-            do_constant_folding=do_constant_folding, input_names=input_names, output_names=output_names
+            self.model.rpn,
+            inp,
+            output_name,
+            verbose=verbose,
+            enable_onnx_checker=True,
+            do_constant_folding=do_constant_folding,
+            input_names=input_names,
+            output_names=output_names,
         )
 
     def __load_rpn_from_onnx(self, path):
@@ -564,7 +625,9 @@ class VoxelObjectDetection3DLearner(Learner):
 
     def __load_from_pth(self, model, path, use_original_dict=False):
         all_params = torch.load(path, map_location=self.device)
-        model.load_state_dict(all_params if use_original_dict else all_params["state_dict"])
+        model.load_state_dict(
+            all_params if use_original_dict else all_params["state_dict"]
+        )
 
     def __prepare_datasets(
         self,
@@ -578,13 +641,14 @@ class VoxelObjectDetection3DLearner(Learner):
         gt_annos,
         require_dataset=True,
     ):
-
         def create_map_point_cloud_dataset_func(is_training):
 
             prep_func = create_prep_func(
                 input_cfg if is_training else eval_input_cfg,
-                model_cfg, is_training,
-                voxel_generator, target_assigner,
+                model_cfg,
+                is_training,
+                voxel_generator,
+                target_assigner,
                 use_sampler=False,
             )
 
@@ -615,21 +679,24 @@ class VoxelObjectDetection3DLearner(Learner):
 
             if dataset.dataset_type.lower() != "kitti":
                 raise ValueError(
-                    "ExternalDataset (" + str(dataset) +
-                    ") is given as a dataset, but it is not a KITTI dataset")
+                    "ExternalDataset (" + str(dataset) + ") is given as a dataset, but it is not a KITTI dataset"
+                )
 
             dataset_path = dataset.path
 
-            if not self.input_config_prepared: 
-                input_cfg.kitti_info_path = (dataset_path + "/" +
-                                            input_cfg.kitti_info_path)
-                input_cfg.kitti_root_path = (dataset_path + "/" +
-                                            input_cfg.kitti_root_path)
-                input_cfg.record_file_path = (dataset_path + "/" +
-                                            input_cfg.record_file_path)
+            if not self.input_config_prepared:
+                input_cfg.kitti_info_path = (
+                    dataset_path + "/" + input_cfg.kitti_info_path
+                )
+                input_cfg.kitti_root_path = (
+                    dataset_path + "/" + input_cfg.kitti_root_path
+                )
+                input_cfg.record_file_path = (
+                    dataset_path + "/" + input_cfg.record_file_path
+                )
                 input_cfg.database_sampler.database_info_path = (
-                    dataset_path + "/" +
-                    input_cfg.database_sampler.database_info_path)
+                    dataset_path + "/" + input_cfg.database_sampler.database_info_path
+                )
 
                 self.input_config_prepared = True
 
@@ -642,8 +709,7 @@ class VoxelObjectDetection3DLearner(Learner):
             )
         elif isinstance(dataset, DatasetIterator):
             input_dataset_iterator = MappedDatasetIterator(
-                dataset,
-                create_map_point_cloud_dataset_func(True),
+                dataset, create_map_point_cloud_dataset_func(True),
             )
         else:
             if require_dataset or dataset is not None:
@@ -656,21 +722,22 @@ class VoxelObjectDetection3DLearner(Learner):
             val_dataset_path = val_dataset.path
             if val_dataset.dataset_type.lower() != "kitti":
                 raise ValueError(
-                    "ExternalDataset (" + str(val_dataset) +
-                    ") is given as a val_dataset, but it is not a KITTI dataset"
+                    "ExternalDataset (" + str(val_dataset) + ") is given as a val_dataset, but it is not a KITTI dataset"
                 )
 
             if not self.eval_config_prepared:
-                eval_input_cfg.kitti_info_path = (val_dataset_path + "/" +
-                                                eval_input_cfg.kitti_info_path)
-                eval_input_cfg.kitti_root_path = (val_dataset_path + "/" +
-                                                eval_input_cfg.kitti_root_path)
-                eval_input_cfg.record_file_path = (val_dataset_path + "/" +
-                                                eval_input_cfg.record_file_path)
+                eval_input_cfg.kitti_info_path = (
+                    val_dataset_path + "/" + eval_input_cfg.kitti_info_path
+                )
+                eval_input_cfg.kitti_root_path = (
+                    val_dataset_path + "/" + eval_input_cfg.kitti_root_path
+                )
+                eval_input_cfg.record_file_path = (
+                    val_dataset_path + "/" + eval_input_cfg.record_file_path
+                )
                 eval_input_cfg.database_sampler.database_info_path = (
-                    val_dataset_path + "/" +
-                    eval_input_cfg.database_sampler.database_info_path)
-                
+                    val_dataset_path + "/" + eval_input_cfg.database_sampler.database_info_path
+                )
                 self.eval_config_prepared = True
 
             eval_dataset_iterator = input_reader_builder.build(
@@ -683,35 +750,34 @@ class VoxelObjectDetection3DLearner(Learner):
 
             if gt_annos is None:
                 gt_annos = [
-                    info["annos"]
-                    for info in eval_dataset_iterator.dataset.kitti_infos
+                    info["annos"] for info in eval_dataset_iterator.dataset.kitti_infos
                 ]
 
         elif isinstance(val_dataset, DatasetIterator):
             eval_dataset_iterator = MappedDatasetIterator(
-                val_dataset,
-                create_map_point_cloud_dataset_func(False),
+                val_dataset, create_map_point_cloud_dataset_func(False),
             )
         elif val_dataset is None:
             if isinstance(dataset, ExternalDataset):
                 dataset_path = dataset.path
                 if dataset.dataset_type.lower() != "kitti":
                     raise ValueError(
-                        "ExternalDataset (" + str(dataset) +
-                        ") is given as a dataset, but it is not a KITTI dataset"
+                        "ExternalDataset (" + str(dataset) + ") is given as a dataset, but it is not a KITTI dataset"
                     )
 
                 if not self.eval_config_prepared:
                     eval_input_cfg.kitti_info_path = (
-                        dataset_path + "/" + eval_input_cfg.kitti_info_path)
+                        dataset_path + "/" + eval_input_cfg.kitti_info_path
+                    )
                     eval_input_cfg.kitti_root_path = (
-                        dataset_path + "/" + eval_input_cfg.kitti_root_path)
+                        dataset_path + "/" + eval_input_cfg.kitti_root_path
+                    )
                     eval_input_cfg.record_file_path = (
-                        dataset_path + "/" + eval_input_cfg.record_file_path)
+                        dataset_path + "/" + eval_input_cfg.record_file_path
+                    )
                     eval_input_cfg.database_sampler.database_info_path = (
-                        dataset_path + "/" +
-                        eval_input_cfg.database_sampler.database_info_path)
-                        
+                        dataset_path + "/" + eval_input_cfg.database_sampler.database_info_path
+                    )
                     self.eval_config_prepared = True
 
                 eval_dataset_iterator = input_reader_builder.build(
@@ -729,13 +795,11 @@ class VoxelObjectDetection3DLearner(Learner):
                     ]
             else:
                 raise ValueError(
-                    "val_dataset is None and can't be derived from" +
-                    " the dataset object because the dataset is not an ExternalDataset"
+                    "val_dataset is None and can't be derived from " +
+                    "the dataset object because the dataset is not an ExternalDataset"
                 )
         else:
-            raise ValueError(
-                "val_dataset parameter should be an ExternalDataset or a DatasetIterator or None"
-            )
+            raise ValueError("val_dataset parameter should be an ExternalDataset or a DatasetIterator or None")
 
         return input_dataset_iterator, eval_dataset_iterator, gt_annos
 
@@ -755,7 +819,8 @@ class VoxelObjectDetection3DLearner(Learner):
             class_names,
             center_limit_range,
         ) = second_create_model(
-            self.model_config_path, device=self.device,
+            self.model_config_path,
+            device=self.device,
             optimizer_name=self.optimizer,
             optimizer_params=self.optimizer_params,
             lr=self.lr,
diff --git a/src/opendr/perception/object_tracking_2d/fair_mot/algorithm/gen_labels_mot.py b/src/opendr/perception/object_tracking_2d/fair_mot/algorithm/gen_labels_mot.py
index 32e057418d7c169bfd6f6059041af40654792b9a..7d413dc0a20c1690cfe7da99a88a56f46d4454ac 100644
--- a/src/opendr/perception/object_tracking_2d/fair_mot/algorithm/gen_labels_mot.py
+++ b/src/opendr/perception/object_tracking_2d/fair_mot/algorithm/gen_labels_mot.py
@@ -18,7 +18,8 @@ def gen_labels_mot(
     tid_curr = 0
     tid_last = -1
     for seq in seqs:
-        seq_info = open(osp.join(seq_root, seq, "seqinfo.ini")).read()
+        with open(osp.join(seq_root, seq, "seqinfo.ini")) as f:
+            seq_info = f.read()
         seq_width = int(
             seq_info[seq_info.find("imWidth=") + 8: seq_info.find("\nimHeight")]
         )
diff --git a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py
index 19b0e1d761182c6f7993206ae6ec189ad31764f4..a90e9704152f23f0727a985d8e914407cd63a129 100644
--- a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py
+++ b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/ab3dmot.py
@@ -15,20 +15,8 @@
 import numpy as np
 from opendr.engine.target import BoundingBox3DList, TrackingAnnotation3DList
 from scipy.optimize import linear_sum_assignment
+from opendr.perception.object_tracking_3d.ab3dmot.algorithm.core import convert_3dbox_to_8corner, iou3D
 from opendr.perception.object_tracking_3d.ab3dmot.algorithm.kalman_tracker_3d import KalmanTracker3D
-from opendr.perception.object_detection_3d.voxel_object_detection_3d.second_detector.core.box_np_ops import (
-    center_to_corner_box3d,
-)
-from numba.cuda.cudadrv.error import CudaSupportError
-
-try:
-    from opendr.perception.object_detection_3d.voxel_object_detection_3d.\
-        second_detector.core.non_max_suppression.nms_gpu import (
-            rotate_iou_gpu_eval as iou3D,
-        )
-except (CudaSupportError, ValueError):
-    def iou3D(boxes, qboxes, criterion=-1):
-        return np.ones((boxes.shape[0], qboxes.shape[0]))
 
 
 class AB3DMOT():
@@ -46,9 +34,10 @@ class AB3DMOT():
 
         self.max_staleness = max_staleness
         self.min_updates = min_updates
-        self.frame = frame
+        self.frame = frame - 1
+        self.starting_frame = frame - 1
         self.tracklets = []
-        self.last_tracklet_id = 1
+        self.last_tracklet_id = 0
         self.iou_threshold = iou_threshold
 
         self.state_dimensions = state_dimensions
@@ -60,6 +49,8 @@ class AB3DMOT():
 
     def update(self, detections: BoundingBox3DList):
 
+        self.frame += 1
+
         if len(detections) > 0:
 
             predictions = np.zeros([len(self.tracklets), self.measurement_dimensions])
@@ -68,18 +59,16 @@ class AB3DMOT():
                 box = tracklet.predict().reshape(-1)[:self.measurement_dimensions]
                 predictions[i] = [*box]
 
-            detection_corners = center_to_corner_box3d(
-                np.array([box.location for box in detections.boxes]),
-                np.array([box.dimensions for box in detections.boxes]),
-                np.array([box.rotation_y for box in detections.boxes]),
-            )
+            detection_corners = [
+                convert_3dbox_to_8corner(np.array([*box.location, box.rotation_y, *box.dimensions]))
+                for box in detections.boxes
+            ]
 
             if len(predictions) > 0:
-                prediction_corners = center_to_corner_box3d(
-                    predictions[:, :3],
-                    predictions[:, 4:],
-                    predictions[:, 3],
-                )
+                prediction_corners = [
+                    convert_3dbox_to_8corner(p)
+                    for p in predictions
+                ]
             else:
                 prediction_corners = np.zeros((0, 8, 3))
 
@@ -115,22 +104,22 @@ class AB3DMOT():
                     tracked_boxes.append(tracklet.tracking_bounding_box_3d(self.frame))
 
         result = TrackingAnnotation3DList(tracked_boxes)
-
-        self.frame += 1
-
         return result
 
     def reset(self):
-        self.frame = 0
+        self.frame = self.starting_frame
         self.tracklets = []
-        self.last_tracklet_id = 1
+        self.last_tracklet_id = 0
 
 
 def associate(detection_corners, prediction_corners, iou_threshold):
 
-    ious = iou3D(detection_corners, prediction_corners)
+    iou_matrix = np.zeros((len(detection_corners), len(prediction_corners)), dtype=np.float32)
+    for d, det in enumerate(detection_corners):
+        for t, trk in enumerate(prediction_corners):
+            iou_matrix[d, t] = iou3D(det, trk)[0]
 
-    detection_match_ids, prediction_match_ids = linear_sum_assignment(-ious)
+    detection_match_ids, prediction_match_ids = linear_sum_assignment(-iou_matrix)
     unmatched_detections = []
     unmatched_predictions = []
 
@@ -148,7 +137,7 @@ def associate(detection_corners, prediction_corners, iou_threshold):
         detection_id = detection_match_ids[i]
         prediction_id = prediction_match_ids[i]
 
-        if ious[detection_id, prediction_id] < iou_threshold:
+        if iou_matrix[detection_id, prediction_id] < iou_threshold:
             unmatched_detections.append(detection_id)
             unmatched_predictions.append(prediction_id)
         else:
diff --git a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/core.py b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/core.py
new file mode 100644
index 0000000000000000000000000000000000000000..3b382d5bb68cd5e89488b0ce458e9eda31e9f043
--- /dev/null
+++ b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/core.py
@@ -0,0 +1,127 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+from typing import List
+import numba
+import copy
+import numpy as np
+from scipy.spatial import ConvexHull
+
+
+@numba.jit
+def polygon_area(x, y):
+    return 0.5 * np.abs(np.dot(x, np.roll(y, 1)) - np.dot(y, np.roll(x, 1)))
+
+
+@numba.jit
+def corner_box3d_volume(corners: np.array):  # [8, 3] -> []
+
+    result = (
+        np.sqrt(np.sum((corners[0, :] - corners[1, :]) ** 2)) *
+        np.sqrt(np.sum((corners[1, :] - corners[2, :]) ** 2)) *
+        np.sqrt(np.sum((corners[0, :] - corners[4, :]) ** 2))
+    )
+    return result
+
+
+def polygon_clip(subject_polygon, clip_polygon):  # [(x, y)] -> [(x, y)] -> [(x, y))
+    def is_inside(p, clip_polygon1, clip_polygon2):
+        return (clip_polygon2[0] - clip_polygon1[0]) * (p[1] - clip_polygon1[1]) > (
+            clip_polygon2[1] - clip_polygon1[1]
+        ) * (p[0] - clip_polygon1[0])
+
+    def intersection(clip_polygon1, clip_polygon2):
+        dc = [clip_polygon1[0] - clip_polygon2[0], clip_polygon1[1] - clip_polygon2[1]]
+        dp = [s[0] - e[0], s[1] - e[1]]
+        n1 = clip_polygon1[0] * clip_polygon2[1] - clip_polygon1[1] * clip_polygon2[0]
+        n2 = s[0] * e[1] - s[1] * e[0]
+        n3 = 1.0 / (dc[0] * dp[1] - dc[1] * dp[0])
+        return [(n1 * dp[0] - n2 * dc[0]) * n3, (n1 * dp[1] - n2 * dc[1]) * n3]
+
+    outputList = subject_polygon
+    cp1 = clip_polygon[-1]
+
+    for clip_vertex in clip_polygon:
+        cp2 = clip_vertex
+        inputList = outputList
+        outputList = []
+        s = inputList[-1]
+
+        for subjectVertex in inputList:
+            e = subjectVertex
+            if is_inside(e, cp1, cp2):
+                if not is_inside(s, cp1, cp2):
+                    outputList.append(intersection(cp1, cp2))
+                outputList.append(e)
+            elif is_inside(s, cp1, cp2):
+                outputList.append(intersection(cp1, cp2))
+            s = e
+        cp1 = cp2
+        if len(outputList) == 0:
+            return None
+    return outputList
+
+
+@numba.jit
+def convex_hull_intersection(
+    polygon1: List[tuple], polygon2: List[tuple]
+):  # [(x, y)] -> [(x, y)] -> ([(x, y), []])
+    inter_p = polygon_clip(polygon1, polygon2)
+    if inter_p is not None:
+        hull_inter = ConvexHull(inter_p)
+        return inter_p, hull_inter.volume
+    else:
+        return None, 0.0
+
+
+def iou3D(corners1, corners2):  # [8, 3] -> [8, 3] -> ([], [])
+    # corner points are in counter clockwise order
+    rect1 = [(corners1[i, 0], corners1[i, 2]) for i in range(3, -1, -1)]
+    rect2 = [(corners2[i, 0], corners2[i, 2]) for i in range(3, -1, -1)]
+    area1 = polygon_area(np.array(rect1)[:, 0], np.array(rect1)[:, 1])
+    area2 = polygon_area(np.array(rect2)[:, 0], np.array(rect2)[:, 1])
+    _, inter_area = convex_hull_intersection(rect1, rect2)
+    iou_2d = inter_area / (area1 + area2 - inter_area)
+    y_max = min(corners1[0, 1], corners2[0, 1])
+    y_min = max(corners1[4, 1], corners2[4, 1])
+    inter_vol = inter_area * max(0.0, y_max - y_min)
+    vol1 = corner_box3d_volume(corners1)
+    vol2 = corner_box3d_volume(corners2)
+    iou = inter_vol / (vol1 + vol2 - inter_vol)
+    return iou, iou_2d
+
+
+@numba.jit
+def rotation_matrix_y(t):  # [] -> [3, 3]
+    c = np.cos(t)
+    s = np.sin(t)
+    return np.array([[c, 0, s], [0, 1, 0], [-s, 0, c]])
+
+
+def convert_3dbox_to_8corner(bbox3d_input):  # [7] -> [8, 3]
+    bbox3d = copy.copy(bbox3d_input)
+    rot_matrix = rotation_matrix_y(bbox3d[3])
+
+    l, w, h = bbox3d[4:7]
+
+    x_corners = [l / 2, l / 2, -l / 2, -l / 2, l / 2, l / 2, -l / 2, -l / 2]
+    y_corners = [0, 0, 0, 0, -h, -h, -h, -h]
+    z_corners = [w / 2, -w / 2, -w / 2, w / 2, w / 2, -w / 2, -w / 2, w / 2]
+
+    corners_3d = np.dot(rot_matrix, np.vstack([x_corners, y_corners, z_corners]))
+    corners_3d[0, :] = corners_3d[0, :] + bbox3d[0]
+    corners_3d[1, :] = corners_3d[1, :] + bbox3d[1]
+    corners_3d[2, :] = corners_3d[2, :] + bbox3d[2]
+
+    return np.transpose(corners_3d)
diff --git a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/evaluate.py b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/evaluate.py
index d28d8d99516af9c9752b848e0bc6ddc4dea8b6b1..c6494453023cbff53601dd7357eb673fc0c42084 100644
--- a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/evaluate.py
+++ b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/evaluate.py
@@ -284,46 +284,47 @@ class TrackingEvaluator(object):
             for boxList in input_seq_data:
                 input_seq_boxes += boxList.boxes
 
-            f_data = [[] for x in range(input_seq_boxes[-1].frame + 1)]
+            # f_data = [[] for x in range(input_seq_boxes[-1].frame + 1)]
+            f_data = [[] for x in range(len(input_seq_data))]
 
-            for TrackingAnnotation3D in input_seq_boxes:
+            for trackingAnnotation3D in input_seq_boxes:
                 # KITTI tracking benchmark data format:
                 # (frame,tracklet_id,objectType,truncation,occlusion,alpha,x1,y1,x2,y2,h,w,l,X,Y,Z,ry)
 
-                if not any([s for s in classes if s == TrackingAnnotation3D.name.lower()]):
+                if not any([s for s in classes if s == trackingAnnotation3D.name.lower()]):
                     continue
                 # get fields from table
-                t_data.frame = int(TrackingAnnotation3D.frame)
-                t_data.track_id = int(TrackingAnnotation3D.id)
-                t_data.obj_type = TrackingAnnotation3D.name.lower()  # object type [car, pedestrian, cyclist, ...]
+                t_data.frame = int(trackingAnnotation3D.frame)
+                t_data.track_id = int(trackingAnnotation3D.id)
+                t_data.obj_type = trackingAnnotation3D.name.lower()  # object type [car, pedestrian, cyclist, ...]
                 t_data.truncation = int(
-                    TrackingAnnotation3D.truncated
+                    trackingAnnotation3D.truncated
                 )  # truncation [-1,0,1,2]
                 t_data.occlusion = int(
-                    TrackingAnnotation3D.occluded
+                    trackingAnnotation3D.occluded
                 )  # occlusion  [-1,0,1,2]
-                t_data.obs_angle = float(TrackingAnnotation3D.alpha)  # observation angle [rad]
-                t_data.x1 = float(TrackingAnnotation3D.bbox2d[0])  # left   [px]
-                t_data.y1 = float(TrackingAnnotation3D.bbox2d[1])  # top    [px]
-                t_data.x2 = float(TrackingAnnotation3D.bbox2d[2])  # right  [px]
-                t_data.y2 = float(TrackingAnnotation3D.bbox2d[3])  # bottom [px]
-                t_data.h = float(TrackingAnnotation3D.dimensions[0])  # height [m]
-                t_data.w = float(TrackingAnnotation3D.dimensions[1])  # width  [m]
-                t_data.length = float(TrackingAnnotation3D.dimensions[2])  # length [m]
-                t_data.X = float(TrackingAnnotation3D.location[0])  # X [m]
-                t_data.Y = float(TrackingAnnotation3D.location[1])  # Y [m]
-                t_data.Z = float(TrackingAnnotation3D.location[2])  # Z [m]
-                t_data.yaw = float(TrackingAnnotation3D.rotation_y)  # yaw angle [rad]
-                t_data.score = float(TrackingAnnotation3D.confidence)
+                t_data.obs_angle = float(trackingAnnotation3D.alpha)  # observation angle [rad]
+                t_data.x1 = float(trackingAnnotation3D.bbox2d[0])  # left   [px]
+                t_data.y1 = float(trackingAnnotation3D.bbox2d[1])  # top    [px]
+                t_data.x2 = float(trackingAnnotation3D.bbox2d[2])  # right  [px]
+                t_data.y2 = float(trackingAnnotation3D.bbox2d[3])  # bottom [px]
+                t_data.h = float(trackingAnnotation3D.dimensions[0])  # height [m]
+                t_data.w = float(trackingAnnotation3D.dimensions[1])  # width  [m]
+                t_data.length = float(trackingAnnotation3D.dimensions[2])  # length [m]
+                t_data.X = float(trackingAnnotation3D.location[0])  # X [m]
+                t_data.Y = float(trackingAnnotation3D.location[1])  # Y [m]
+                t_data.Z = float(trackingAnnotation3D.location[2])  # Z [m]
+                t_data.yaw = float(trackingAnnotation3D.rotation_y)  # yaw angle [rad]
+                t_data.score = float(trackingAnnotation3D.confidence)
 
                 # do not consider objects marked as invalid
-                if t_data.track_id is -1 and t_data.obj_type != "dontcare":
+                if t_data.track_id == -1 and t_data.obj_type != "dontcare":
                     continue
 
                 idx = t_data.frame
                 # check if length for frame data is sufficient
                 if idx >= len(f_data):
-                    raise ValueError("Frame " + str(idx) + "is out of range")
+                    raise ValueError("Frame " + str(idx) + " is out of range")
 
                 id_frame = (t_data.frame, t_data.track_id)
                 if id_frame in id_frame_cache and not loading_groundtruth:
diff --git a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/kalman_tracker_3d.py b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/kalman_tracker_3d.py
index 5f73a56a78a23ebb702eff6cdcd9ada688d29595..502b4bf94cc6c7977bbd61a857502a7213ce37a5 100644
--- a/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/kalman_tracker_3d.py
+++ b/src/opendr/perception/object_tracking_3d/ab3dmot/algorithm/kalman_tracker_3d.py
@@ -135,7 +135,7 @@ class KalmanTracker3D():
         return TrackingAnnotation3D(
             self.name, self.truncated, self.occluded,
             self.alpha, self.bbox2d,
-            self.kalman_filter.x[4:].reshape(-1),
+            self.kalman_filter.x[4:7].reshape(-1),
             self.kalman_filter.x[:3].reshape(-1),
             float(self.kalman_filter.x[3]),
             self.id,
diff --git a/src/opendr/perception/object_tracking_3d/ab3dmot/object_tracking_3d_ab3dmot_learner.py b/src/opendr/perception/object_tracking_3d/ab3dmot/object_tracking_3d_ab3dmot_learner.py
index 067410f9924f33baf6e0ab51033537479843a611..1d72586cdea872e293ef61634eb4bda98accad6d 100644
--- a/src/opendr/perception/object_tracking_3d/ab3dmot/object_tracking_3d_ab3dmot_learner.py
+++ b/src/opendr/perception/object_tracking_3d/ab3dmot/object_tracking_3d_ab3dmot_learner.py
@@ -49,6 +49,7 @@ class ObjectTracking3DAb3dmotLearner(Learner):
         self.covariance_matrix = covariance_matrix
         self.process_uncertainty_matrix = process_uncertainty_matrix
         self.iou_threshold = iou_threshold
+        self.model = None
 
         self.infers_count = 0
         self.infers_time = 0
diff --git a/src/opendr/perception/object_tracking_3d/datasets/kitti_tracking.py b/src/opendr/perception/object_tracking_3d/datasets/kitti_tracking.py
index 6332b8889c31da2303db3871f529ecb52c09c632..5be2974301f444f3783cf7039c0a8b5fb1c4c1c5 100644
--- a/src/opendr/perception/object_tracking_3d/datasets/kitti_tracking.py
+++ b/src/opendr/perception/object_tracking_3d/datasets/kitti_tracking.py
@@ -247,10 +247,10 @@ class KittiTrackingDatasetIterator(DatasetIterator):
 
                 if frame not in results:
                     results[frame] = []
+                    max_frame = max(max_frame, frame)
 
                 if not (remove_dontcare and box.name == "DontCare"):
                     results[frame].append(box)
-                    max_frame = max(max_frame, frame)
 
             if return_format == "tracking":
 
diff --git a/src/opendr/perception/panoptic_segmentation/README.md b/src/opendr/perception/panoptic_segmentation/README.md
index 1fc4b77ea213593c6df90b76e720a86850dee4bf..7f5b602dd3baa17a0c2f9bf8b25fcc1ebce7439a 100644
--- a/src/opendr/perception/panoptic_segmentation/README.md
+++ b/src/opendr/perception/panoptic_segmentation/README.md
@@ -36,7 +36,7 @@ Please note that the original repository is heavily based on
 
 ## Example Usage
 
-More code snippets can be found in [example_usage.py](../../../../projects/perception/panoptic_segmentation/efficient_ps/example_usage.py) with the corresponding [readme](../../../../projects/perception/panoptic_segmentation/efficient_ps/README.md).
+More code snippets can be found in [example_usage.py](../../../../projects/python/perception/panoptic_segmentation/efficient_ps/example_usage.py) with the corresponding [readme](../../../../projects/python/perception/panoptic_segmentation/efficient_ps/README.md).
 
 **Prepare the downloaded Cityscapes dataset** (see the [datasets' readme](./datasets/README.md) as well)
 ```python
diff --git a/src/opendr/perception/panoptic_segmentation/efficient_ps/efficient_ps_learner.py b/src/opendr/perception/panoptic_segmentation/efficient_ps/efficient_ps_learner.py
index 469bd016c16b04802a33c72ced0aea5ca26fe303..4036a82c127ea6e9fd56cda63445f11668149d6e 100644
--- a/src/opendr/perception/panoptic_segmentation/efficient_ps/efficient_ps_learner.py
+++ b/src/opendr/perception/panoptic_segmentation/efficient_ps/efficient_ps_learner.py
@@ -306,17 +306,18 @@ class EfficientPsLearner(Learner):
             warnings.warn('The current model has not been trained.')
         self.model.eval()
 
-        # Build the data pipeline
-        test_pipeline = Compose(self._cfg.test_pipeline[1:])
-        device = next(self.model.parameters()).device
-
-        # Convert to the format expected by the mmdetection API
         single_image_mode = False
         if isinstance(batch, Image):
             batch = [batch]
             single_image_mode = True
+
+        # Convert to the format expected by the mmdetection API
         mmdet_batch = []
+        device = next(self.model.parameters()).device
         for img in batch:
+            # Change the processing size according to the input image
+            self._cfg.test_pipeline[1:][0]['img_scale'] = batch[0].data.shape[1:]
+            test_pipeline = Compose(self._cfg.test_pipeline[1:])
             # Convert from OpenDR convention (CHW/RGB) to the expected format (HWC/BGR)
             img_ = img.convert('channels_last', 'bgr')
             mmdet_img = {'filename': None, 'img': img_, 'img_shape': img_.shape, 'ori_shape': img_.shape}
@@ -455,15 +456,15 @@ class EfficientPsLearner(Learner):
         """
         if mode == 'model':
             models = {
-                'cityscapes': f'{OPENDR_SERVER_URL}perception/panoptic_segmentation/models/model_cityscapes.pth',
-                'kitti': f'{OPENDR_SERVER_URL}perception/panoptic_segmentation/models/model_kitti.pth'
+                'cityscapes': f'{OPENDR_SERVER_URL}perception/panoptic_segmentation/efficient_ps/models/model_cityscapes.pth',
+                'kitti': f'{OPENDR_SERVER_URL}perception/panoptic_segmentation/efficient_ps/models/model_kitti.pth'
             }
             if trained_on not in models.keys():
                 raise ValueError(f'Could not find model weights pre-trained on {trained_on}. '
                                  f'Valid options are {list(models.keys())}')
             url = models[trained_on]
         elif mode == 'test_data':
-            url = f'{OPENDR_SERVER_URL}perception/panoptic_segmentation/test_data/test_data.zip'
+            url = f'{OPENDR_SERVER_URL}perception/panoptic_segmentation/efficient_ps/test_data.zip'
         else:
             raise ValueError('Invalid mode. Valid options are ["model", "test_data"]')
 
@@ -481,8 +482,12 @@ class EfficientPsLearner(Learner):
 
             return update_to
 
-        with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=f'Downloading {filename}') as pbar:
-            urllib.request.urlretrieve(url, filename, pbar_hook(pbar))
+        if os.path.exists(filename) and os.path.isfile(filename):
+            print(f'File already downloaded: {filename}')
+        else:
+            with tqdm(unit='B', unit_scale=True, unit_divisor=1024, miniters=1, desc=f'Downloading {filename}') \
+                    as pbar:
+                urllib.request.urlretrieve(url, filename, pbar_hook(pbar))
         return filename
 
     @staticmethod
diff --git a/src/opendr/simulation/human_model_generation/dependencies.ini b/src/opendr/simulation/human_model_generation/dependencies.ini
index a98d181d66f43d17d9e318ad60b3e585b0162df9..580c774824e32327c64b133629d6ba3652654e20 100644
--- a/src/opendr/simulation/human_model_generation/dependencies.ini
+++ b/src/opendr/simulation/human_model_generation/dependencies.ini
@@ -7,7 +7,7 @@ python=torch==1.9.0
        opencv-python==4.5.1.48
        pillow>=8.3.2
        trimesh==3.5.23
-       scikit-image>=0.16.2
+       scikit-image>0.16.2
        matplotlib>=2.2.2
 
 opendr=opendr-toolkit-engine
diff --git a/src/opendr/simulation/human_model_generation/utilities/PIFu/lib/mesh_util.py b/src/opendr/simulation/human_model_generation/utilities/PIFu/lib/mesh_util.py
index d1c450ff9e144838f2cab3ecb18f3935f82c3dd5..0ad38a66f14bcc12a300bd83cf882864b34c143b 100644
--- a/src/opendr/simulation/human_model_generation/utilities/PIFu/lib/mesh_util.py
+++ b/src/opendr/simulation/human_model_generation/utilities/PIFu/lib/mesh_util.py
@@ -42,7 +42,7 @@ def reconstruction(net, cuda, calib_tensor,
 
     # Finally we do marching cubes
     try:
-        verts, faces, normals, values = measure.marching_cubes_lewiner(sdf, 0.5)
+        verts, faces, normals, values = measure.marching_cubes(sdf, 0.5)
         # transform verts into world coordinate system
         verts = np.matmul(mat[:3, :3], verts.T) + mat[:3, 3:4]
         verts = verts.T
diff --git a/tests/Makefile b/tests/Makefile
index 3c2797ee0e919799a5918137acdfb3a56ceec138..b5e23d8a6cc78d6be10518d3b4c0791674f09bd4 100644
--- a/tests/Makefile
+++ b/tests/Makefile
@@ -57,7 +57,7 @@ $(BUILD_DIR)/test_face_recognition:
 	@+echo "Building face recognition test..."
 	$(CC)  $(CFLAGS) -o $(BUILD_DIR)/test_face_recognition sources/c_api/test_face_recognition.c $(INC) $(OPENDR_INC) $(OPENDR_LD) $(LD)
 
-FMP_INC = -I$(OPENDR_HOME)/projects/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include
+FMP_INC = -I$(OPENDR_HOME)/projects/python/perception/slam/full_map_posterior_gmapping/src/openslam_gmapping/include
 $(BUILD_DIR)/test_fmp_gmapping:
 	@+echo "Building Full-Map-Posterior GMapping test..."
 	$(CPP)  $(CFLAGS) -o $(BUILD_DIR)/test_fmp_gmapping sources/c_api/test_fmp_gmapping.cpp -lboost_unit_test_framework $(INC) $(OPENDR_INC) $(OPENDR_LD) $(LD) $(FMP_INC)
diff --git a/tests/sources/tools/control/mobile_manipulation/run_ros.sh b/tests/sources/tools/control/mobile_manipulation/run_ros.sh
index ee27242c6663b138713f2a8a55792092fe5cbee7..b166d2f3839e972986786f98cf0cc86226889232 100644
--- a/tests/sources/tools/control/mobile_manipulation/run_ros.sh
+++ b/tests/sources/tools/control/mobile_manipulation/run_ros.sh
@@ -1,4 +1,4 @@
-source ${OPENDR_HOME}/projects/control/mobile_manipulation/mobile_manipulation_ws/devel/setup.bash
+source ${OPENDR_HOME}/projects/python/control/mobile_manipulation/mobile_manipulation_ws/devel/setup.bash
 roscore &
 sleep 5
 roslaunch mobile_manipulation_rl pr2_analytical.launch &
\ No newline at end of file
diff --git a/tests/sources/tools/perception/object_detection_2d/nanodet/__init__.py b/tests/sources/tools/perception/object_detection_2d/nanodet/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/sources/tools/perception/object_detection_2d/nanodet/test_nanodet.py b/tests/sources/tools/perception/object_detection_2d/nanodet/test_nanodet.py
new file mode 100644
index 0000000000000000000000000000000000000000..583404d933e4f4c2b55c1559d62e679ed54a1556
--- /dev/null
+++ b/tests/sources/tools/perception/object_detection_2d/nanodet/test_nanodet.py
@@ -0,0 +1,131 @@
+# Copyright 2020-2022 OpenDR European Project
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#     http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+
+import cv2
+import unittest
+import gc
+import shutil
+import os
+import numpy as np
+from opendr.perception.object_detection_2d import NanodetLearner
+from opendr.engine.datasets import ExternalDataset
+
+device = os.getenv('TEST_DEVICE') if os.getenv('TEST_DEVICE') else 'cpu'
+
+_DEFAULT_MODEL = "plus_m_416"
+
+
+def rmfile(path):
+    try:
+        os.remove(path)
+    except OSError as e:
+        print("Error: %s - %s." % (e.filename, e.strerror))
+
+
+def rmdir(_dir):
+    try:
+        shutil.rmtree(_dir)
+    except OSError as e:
+        print("Error: %s - %s." % (e.filename, e.strerror))
+
+
+class TestNanodetLearner(unittest.TestCase):
+
+    @classmethod
+    def setUpClass(cls):
+        print("\n\n**********************************\nTEST Nanodet Learner\n"
+              "**********************************")
+
+        cls.temp_dir = os.path.join(".", "tests", "sources", "tools", "perception", "object_detection_2d",
+                                    "nanodet", "nanodet_temp")
+        cls.detector = NanodetLearner(model_to_use=_DEFAULT_MODEL, device=device, temp_path=cls.temp_dir, batch_size=1,
+                                      iters=1, checkpoint_after_iter=2, lr=1e-4)
+        # Download all required files for testing
+        cls.detector.download(path=cls.temp_dir, mode="pretrained")
+        cls.detector.download(path=cls.temp_dir, mode="images")
+        cls.detector.download(path=cls.temp_dir, mode="test_data")
+
+    @classmethod
+    def tearDownClass(cls):
+        print('Removing temporary directories for Nanodet...')
+        # Clean up downloaded files
+        rmfile(os.path.join(cls.temp_dir, "000000000036.jpg"))
+        rmdir(os.path.join(cls.temp_dir, "test_data"))
+        rmdir(os.path.join(cls.temp_dir, "nanodet_{}".format(_DEFAULT_MODEL)))
+        rmdir(os.path.join(cls.temp_dir))
+
+        del cls.detector
+        gc.collect()
+        print('Finished cleaning for Nanodet...')
+
+    def test_fit(self):
+        print('Starting training test for Nanodet...')
+        training_dataset = ExternalDataset(path=os.path.join(self.temp_dir, "test_data"), dataset_type="voc")
+        m = list(self.detector._model.parameters())[0].clone().detach().clone().to(device)
+        self.detector.fit(dataset=training_dataset, verbose=False)
+        n = list(self.detector._model.parameters())[0].clone().detach().clone().to(device)
+        self.assertFalse(np.array_equal(m, n),
+                         msg="Model parameters did not change after running fit.")
+        del training_dataset, m, n
+        gc.collect()
+
+        rmfile(os.path.join(self.temp_dir, "checkpoints", "model_iter_0.ckpt"))
+        rmfile(os.path.join(self.temp_dir, "checkpoints", "epoch=0-step=0.ckpt"))
+        rmdir(os.path.join(self.temp_dir, "checkpoints"))
+
+        print('Finished training test for Nanodet...')
+
+    def test_eval(self):
+        print('Starting evaluation test for Nanodet...')
+        eval_dataset = ExternalDataset(path=os.path.join(self.temp_dir, "test_data"), dataset_type="voc")
+        self.detector.load(path=os.path.join(self.temp_dir, "nanodet_{}".format(_DEFAULT_MODEL)), verbose=False)
+        results_dict = self.detector.eval(dataset=eval_dataset, verbose=False)
+        self.assertNotEqual(len(results_dict), 0,
+                            msg="Eval results dictionary list is empty.")
+        del eval_dataset, results_dict
+        gc.collect()
+
+        rmfile(os.path.join(self.temp_dir, "results.json"))
+        rmfile(os.path.join(self.temp_dir, "eval_results.txt"))
+        print('Finished evaluation test for Nanodet...')
+
+    def test_infer(self):
+        print('Starting inference test for Nanodet...')
+        self.detector.load(os.path.join(self.temp_dir, "nanodet_{}".format(_DEFAULT_MODEL)), verbose=False)
+        img = cv2.imread(os.path.join(self.temp_dir, "000000000036.jpg"))
+        self.assertIsNotNone(self.detector.infer(input=img, verbose=False),
+                             msg="Returned empty BoundingBoxList.")
+        gc.collect()
+        print('Finished inference test for Nanodet...')
+
+    def test_save_load(self):
+        print('Starting save/load test for Nanodet...')
+        self.detector.save(path=os.path.join(self.temp_dir, "test_model"), verbose=False)
+        starting_param_1 = list(self.detector._model.parameters())[0].detach().clone().to(device)
+        self.detector.model = None
+        detector2 = NanodetLearner(model_to_use=_DEFAULT_MODEL, device=device, temp_path=self.temp_dir, batch_size=1,
+                                   iters=1, checkpoint_after_iter=1, lr=1e-4)
+        detector2.load(path=os.path.join(self.temp_dir, "test_model"), verbose=False)
+        new_param = list(detector2._model.parameters())[0].detach().clone().to(device)
+        self.assertTrue(starting_param_1.allclose(new_param))
+
+        # Cleanup
+        rmfile(os.path.join(self.temp_dir, "test_model", "nanodet_{}.json".format(_DEFAULT_MODEL)))
+        rmfile(os.path.join(self.temp_dir, "test_model", "nanodet_{}.pth".format(_DEFAULT_MODEL)))
+        rmdir(os.path.join(self.temp_dir, "test_model"))
+        print('Finished save/load test for Nanodet...')
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py b/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py
index ae805ea1fde8dd52070d7591523be128aedcda0e..5e544c2de2894a541dea7c45851acc88625c9cac 100644
--- a/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py
+++ b/tests/sources/tools/perception/object_detection_3d/voxel_object_detection_3d/test_object_detection_3d.py
@@ -57,40 +57,15 @@ class TestVoxelObjectDetection3DLearner(unittest.TestCase):
                                             "second_detector", "configs", "tanet",
                                             "car", "test_short.proto")
 
-        cls.config_tanet_ped_cycle = os.path.join(".", "src", "opendr", "perception",
-                                                  "object_detection_3d",
-                                                  "voxel_object_detection_3d",
-                                                  "second_detector", "configs", "tanet",
-                                                  "ped_cycle",
-                                                  "test_short.proto")
-
         cls.config_pointpillars_car = os.path.join(
             ".", "src", "opendr", "perception", "object_detection_3d",
             "voxel_object_detection_3d", "second_detector", "configs", "pointpillars",
             "car", "test_short.proto")
 
-        cls.config_pointpillars_ped_cycle = os.path.join(
-            ".", "src", "opendr", "perception", "object_detection_3d",
-            "voxel_object_detection_3d", "second_detector", "configs", "pointpillars",
-            "ped_cycle", "test_short.proto")
-
         cls.subsets_path = os.path.join(
             ".", "src", "opendr", "perception", "object_detection_3d",
             "datasets", "nano_kitti_subsets")
 
-        cls.download_model_names = {
-            "tanet_car": "tanet_car_xyres_16",
-            "tanet_ped_cycle": "tanet_ped_cycle_xyres_16",
-            "pointpillars_car": "pointpillars_car_xyres_16",
-            "pointpillars_ped_cycle": "pointpillars_ped_cycle_xyres_16",
-        }
-
-        cls.all_configs = {
-            "tanet_car": cls.config_tanet_car,
-            "tanet_ped_cycle": cls.config_tanet_ped_cycle,
-            "pointpillars_car": cls.config_pointpillars_car,
-            "pointpillars_ped_cycle": cls.config_pointpillars_ped_cycle,
-        }
         cls.car_configs = {
             "tanet_car": cls.config_tanet_car,
             "pointpillars_car": cls.config_pointpillars_car,
@@ -102,13 +77,6 @@ class TestVoxelObjectDetection3DLearner(unittest.TestCase):
 
         print("Dataset downloaded", file=sys.stderr)
 
-        for model_name in cls.download_model_names.values():
-            VoxelObjectDetection3DLearner.download(
-                model_name, cls.temp_dir
-            )
-
-        print("Models downloaded", file=sys.stderr)
-
     @classmethod
     def tearDownClass(cls):
         # Clean up downloaded files
@@ -138,7 +106,6 @@ class TestVoxelObjectDetection3DLearner(unittest.TestCase):
             new_param = list(learner.model.parameters())[0].clone()
             self.assertFalse(torch.equal(starting_param, new_param))
 
-            del learner
             print("Fit", name, "ok", file=sys.stderr)
 
         for name, config in self.car_configs.items():
@@ -175,58 +142,11 @@ class TestVoxelObjectDetection3DLearner(unittest.TestCase):
             new_param = list(learner.model.parameters())[0].clone()
             self.assertFalse(torch.equal(starting_param, new_param))
 
-            del learner
             print("Fit iterator", name, "ok", file=sys.stderr)
 
         for name, config in self.car_configs.items():
             test_model(name, config)
 
-    def test_eval(self):
-        def test_model(name, config):
-            print("Eval", name, "start", file=sys.stderr)
-            model_path = os.path.join(self.temp_dir, self.download_model_names[name])
-            dataset = KittiDataset(self.dataset_path, self.subsets_path)
-
-            learner = VoxelObjectDetection3DLearner(model_config_path=config, device=DEVICE)
-            learner.load(model_path)
-            mAPbbox, mAPbev, mAP3d, mAPaos = learner.eval(dataset, count=2)
-
-            self.assertTrue(mAPbbox[0][0][0] > 1 and mAPbbox[0][0][0] < 95, msg=mAPbbox[0][0][0])
-
-            del learner
-            print("Eval", name, "ok", file=sys.stderr)
-
-        for name, config in self.car_configs.items():
-            test_model(name, config)
-
-    def test_infer(self):
-        def test_model(name, config):
-            print("Infer", name, "start", file=sys.stderr)
-
-            dataset = PointCloudsDatasetIterator(self.dataset_path + "/testing/velodyne_reduced")
-
-            learner = VoxelObjectDetection3DLearner(
-                model_config_path=config, device=DEVICE
-            )
-
-            result = learner.infer(
-                dataset[0]
-            )
-
-            self.assertTrue(len(result) > 0)
-
-            result = learner.infer(
-                [dataset[0], dataset[1], dataset[2]]
-            )
-            self.assertTrue(len(result) == 3)
-            self.assertTrue(len(result[0]) > 0)
-
-            del learner
-            print("Infer", name, "ok", file=sys.stderr)
-
-        for name, config in self.car_configs.items():
-            test_model(name, config)
-
     def test_save(self):
         def test_model(name, config):
             print("Save", name, "start", file=sys.stderr)
@@ -249,8 +169,6 @@ class TestVoxelObjectDetection3DLearner(unittest.TestCase):
             self.assertFalse(torch.equal(starting_param_1, starting_param_2))
             self.assertTrue(torch.equal(starting_param_1, new_param))
 
-            del learner
-            del learner2
             print("Save", name, "ok", file=sys.stderr)
 
         for name, config in self.car_configs.items():
@@ -283,8 +201,6 @@ class TestVoxelObjectDetection3DLearner(unittest.TestCase):
 
             self.assertTrue(learner2.model.rpn_ort_session is not None)
 
-            del learner
-            del learner2
             print("Optimize", name, "ok", file=sys.stderr)
 
         for name, config in self.car_configs.items():
diff --git a/tests/sources/tools/perception/object_tracking_2d/__init__.py b/tests/sources/tools/perception/object_tracking_2d/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/tests/sources/tools/perception/object_tracking_3d/ab3dmot/test_object_tracking_3d_ab3dmot.py b/tests/sources/tools/perception/object_tracking_3d/ab3dmot/test_object_tracking_3d_ab3dmot.py
index 723c0b2e158074524a804ba60a9391c58305a458..88b474617e40968dacbb29c0c4cead1d677a3389 100644
--- a/tests/sources/tools/perception/object_tracking_3d/ab3dmot/test_object_tracking_3d_ab3dmot.py
+++ b/tests/sources/tools/perception/object_tracking_3d/ab3dmot/test_object_tracking_3d_ab3dmot.py
@@ -48,6 +48,9 @@ class TestObjectTracking3DAb3dmot(unittest.TestCase):
             cls.temp_dir, True
         )
 
+        cls.use_long_tests = os.environ.get("OPENDR_USE_LONG_TESTS", "False") == "True"
+        cls.long_tracking_dataset_path = os.environ.get("OPENDR_KITTI_TRACKING_PATH", "")
+
         print("Dataset downloaded", file=sys.stderr)
 
     @classmethod
@@ -70,11 +73,25 @@ class TestObjectTracking3DAb3dmot(unittest.TestCase):
     def test_eval(self):
 
         learner = ObjectTracking3DAb3dmotLearner()
-        results = learner.eval(self.dataset, count=1)
 
-        self.assertTrue("car" in results)
-        self.assertTrue("pedestrian" in results)
-        self.assertTrue("cyclist" in results)
+        if self.use_long_tests:
+
+            self.assertTrue(len(self.long_tracking_dataset_path) > 0)
+
+            dataset = KittiTrackingDatasetIterator(self.long_tracking_dataset_path, self.long_tracking_dataset_path, "tracking")
+
+            results = learner.eval(dataset)
+            self.assertTrue("car" in results)
+            self.assertTrue("pedestrian" in results)
+            self.assertTrue("cyclist" in results)
+            for k, v in results.items():
+                print(k, v)
+        else:
+            results = learner.eval(self.dataset, count=1)
+
+            self.assertTrue("car" in results)
+            self.assertTrue("pedestrian" in results)
+            self.assertTrue("cyclist" in results)
 
     def test_infer(self):
 
diff --git a/tests/sources/tools/simulation/human_model_generation/test_human_model_generation.py b/tests/sources/tools/simulation/human_model_generation/test_human_model_generation.py
index 307e3ee22055ec713903d42d07ca5dddd3f827ce..aa2f3d3a2bd29cfa900fac7fd2408ee730cf820e 100644
--- a/tests/sources/tools/simulation/human_model_generation/test_human_model_generation.py
+++ b/tests/sources/tools/simulation/human_model_generation/test_human_model_generation.py
@@ -44,10 +44,10 @@ class TestPIFuGeneratorLearner(unittest.TestCase):
 
     def test_infer(self):
 
-        img_rgb = Image.open(os.path.join(os.environ['OPENDR_HOME'], "projects", "simulation", "human_model_generation",
-                                          "demos", "imgs_input", "rgb", "result_0004.jpg"))
-        img_msk = Image.open(os.path.join(os.environ['OPENDR_HOME'], "projects", "simulation", "human_model_generation",
-                                          "demos", "imgs_input", "msk", "result_0004.jpg"))
+        img_rgb = Image.open(os.path.join(os.environ['OPENDR_HOME'], "projects", "python", "simulation",
+                                          "human_model_generation", "demos", "imgs_input", "rgb", "result_0004.jpg"))
+        img_msk = Image.open(os.path.join(os.environ['OPENDR_HOME'], "projects", "python", "simulation",
+                                          "human_model_generation", "demos", "imgs_input", "msk", "result_0004.jpg"))
         model_3D = self.learner.infer(imgs_rgb=[img_rgb], imgs_msk=[img_msk], extract_pose=False)
 
         # Default pretrained mobilenet model detects 18 keypoints on img with id 785
diff --git a/tests/test_license.py b/tests/test_license.py
index 90f9726d7de32f5776550210f92b6f59251ee16c..e8d39d4b37da43336eb0cc08b3716e65f2e7b7de 100755
--- a/tests/test_license.py
+++ b/tests/test_license.py
@@ -98,18 +98,19 @@ class TestLicense(unittest.TestCase):
             'src/opendr/simulation/human_model_generation/utilities/PIFu',
             'src/opendr/perception/multimodal_human_centric/rgbd_hand_gesture_learner/algorithm/architectures',
             'src/opendr/perception/skeleton_based_action_recognition/algorithm',
-            'projects/data_generation/synthetic_multi_view_facial_image_generation/algorithm',
+            'projects/python/simulation/synthetic_multi_view_facial_image_generation/algorithm',
             'src/opendr/perception/semantic_segmentation/bisenet/algorithm',
             'src/opendr/perception/object_detection_2d/retinaface/algorithm',
             'src/opendr/perception/object_detection_2d/gem/algorithm',
             'src/opendr/perception/object_detection_2d/detr/algorithm',
+            'src/opendr/perception/object_detection_2d/nanodet/algorithm',
             'src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS',
             'src/opendr/perception/facial_expression_recognition/landmark_based_facial_expression_recognition',
         ]
 
         skippedFilePaths = [
             'src/opendr/perception/activity_recognition/datasets/utils/decoder.py',
-            'projects/perception/lightweight_open_pose/jetbot/utils/pid.py',
+            'projects/python/perception/lightweight_open_pose/jetbot/utils/pid.py',
             'src/opendr/perception/compressive_learning/multilinear_compressive_learning/algorithm/trainers.py',
             'src/opendr/perception/object_detection_2d/retinaface/Makefile',
             'src/opendr/perception/multimodal_human_centric/audiovisual_emotion_learner/algorithm/efficientface_modulator.py',
diff --git a/tests/test_pep8.py b/tests/test_pep8.py
index cecddcc7c7aae4ef4e1d22dfe84c3c4753137954..800c6064c418230473494f9598412ab4c6e86e05 100755
--- a/tests/test_pep8.py
+++ b/tests/test_pep8.py
@@ -32,7 +32,7 @@ skippedDirectories = [
     'dependencies',
     'lib',
     'src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS',
-    'projects/control/eagerx',
+    'projects/python/control/eagerx',
     'venv',
     'build',
 ]