Skip to content
Snippets Groups Projects
Unverified Commit 6113fa29 authored by ad-daniel's avatar ad-daniel Committed by GitHub
Browse files

Merge pull request #235 from opendr-eu/merge-master-into-develop

Merge `master` into `develop`
parents d42dcc5f 092e2da1
No related branches found
No related tags found
No related merge requests found
Showing
with 273 additions and 44 deletions
......@@ -2,7 +2,7 @@ name: Test Suite (master)
on:
pull_request:
types: [opened, synchronize, reopened, ready_for_review, labeled, unlabeled]
types: [opened, synchronize, reopened, labeled, unlabeled]
schedule:
- cron: '0 23 * * *'
......@@ -128,6 +128,7 @@ jobs:
- name: Upload wheel as artifact
uses: actions/upload-artifact@v2
with:
name: wheel-artifact
path:
dist/*.tar.gz
build-docker:
......@@ -145,6 +146,7 @@ jobs:
- name: Upload image artifact
uses: actions/upload-artifact@v2
with:
name: docker-artifact
path:
cpu_test.zip
test-wheel:
......@@ -205,9 +207,9 @@ jobs:
fi
echo "Installing $package package"
if [ "$package" == "opendr" ]; then
pip install ./artifact/artifact/opendr-toolkit-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-*.tar.gz
else
pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-$package-*.tar.gz
fi
done < packages.txt
python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
......@@ -261,10 +263,10 @@ jobs:
source venv/bin/activate
pip install wheel
# Install engine and requirements for other packages
pip install ./artifact/artifact/opendr-toolkit-engine-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-engine-*.tar.gz
# The following two are dependecies for some other packages and pip cannot automatically install them if they are not on a repo
pip install ./artifact/artifact/opendr-toolkit-compressive-learning-*.tar.gz
pip install ./artifact/artifact/opendr-toolkit-object-detection-2d-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-compressive-learning-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-object-detection-2d-*.tar.gz
# Install specific package for testing
package=$(sed "s/_/-/g" <<< ${{ matrix.package }})
......@@ -275,10 +277,10 @@ jobs:
echo "Installing $package package"
# Utils contains hyperparameter tuning
if [ "$package" == "utils" ]; then
pip install ./artifact/artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-hyperparameter-tuner-*.tar.gz
else
pip install ./artifact/artifact/opendr-toolkit-$package-*.tar.gz
pip install ./artifact/wheel-artifact/opendr-toolkit-$package-*.tar.gz
fi
python -m unittest discover -s tests/sources/tools/${{ matrix.package }}
test-docker:
......@@ -315,7 +317,31 @@ jobs:
path: artifact
- name: Test docker
run: |
docker load < ./artifact/artifact/cpu_test.zip
docker load < ./artifact/docker-artifact/cpu_test.zip
docker run --name toolkit -i opendr/opendr-toolkit:cpu_test bash
docker start toolkit
docker exec -i toolkit bash -c "source bin/activate.sh && source tests/sources/tools/control/mobile_manipulation/run_ros.sh && python -m unittest discover -s tests/sources/tools/${{ matrix.package }}"
delete-docker-artifacts:
needs: [build-docker, test-docker]
if: ${{ always() }}
strategy:
matrix:
os: [ubuntu-20.04]
runs-on: ${{ matrix.os }}
steps:
- name: Delete docker artifacts
uses: geekyeggo/delete-artifact@v1
with:
name: docker-artifact
delete-wheel-artifacts:
needs: [build-wheel, test-wheel, test-wheel-separate]
if: ${{ always() }}
strategy:
matrix:
os: [ubuntu-20.04]
runs-on: ${{ matrix.os }}
steps:
- name: Delete wheel artifacts
uses: geekyeggo/delete-artifact@v1
with:
name: wheel-artifact
......@@ -12,10 +12,13 @@ Released on XX, XXth, 2022.
- Added support for `post-install` scripts and `opendr` dependencies in `.ini` files ([#201](https://github.com/opendr-eu/opendr/pull/201)).
- Bug Fixes:
- Updated wheel building pipeline to include missing files and removed unnecessary dependencies ([#200](https://github.com/opendr-eu/opendr/pull/200)).
- `panoptic_segmentation/efficient_ps`: updated dataset preparation scripts to create correct validation ground truth ([#221](https://github.com/opendr-eu/opendr/pull/221)).
- `panoptic_segmentation/efficient_ps`: added specific configuration files for the provided pretrained models ([#221](https://github.com/opendr-eu/opendr/pull/221)).
- `c_api/face_recognition`: pass key by const reference in `json_get_key_string()` ([#221](https://github.com/opendr-eu/opendr/pull/221)).
- Dependency Updates:
- `heart anomaly detection`: upgraded scikit-learn runtime dependency from 0.21.3 to 0.22 ([#198](https://github.com/opendr-eu/opendr/pull/198)).
- Relaxed all dependencies to allow future versions of non-critical tools to be used ([#201](https://github.com/opendr-eu/opendr/pull/201)).
## Version 1.0
Released on December 31th, 2021.
\ No newline at end of file
Released on December 31st, 2021.
\ No newline at end of file
......@@ -12,6 +12,7 @@
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from pathlib import Path
from opendr.engine.data import Image
......@@ -37,7 +38,10 @@ def train():
train_dataset = CityscapesDataset(path=f'{CITYSCAPES_ROOT}/train')
val_dataset = CityscapesDataset(path=f'{CITYSCAPES_ROOT}/val')
config_file = Path(sys.modules[
EfficientPsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_cityscapes.py'
learner = EfficientPsLearner(
str(config_file),
iters=2,
batch_size=1,
checkpoint_after_iter=2
......@@ -50,12 +54,19 @@ def train():
def evaluate():
val_dataset = CityscapesDataset(path=f'{CITYSCAPES_ROOT}/val')
learner = EfficientPsLearner()
config_file = Path(sys.modules[EfficientPsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_cityscapes.py'
learner = EfficientPsLearner(str(config_file))
learner.load(path=f'{DATA_ROOT}/checkpoints/model_cityscapes.pth')
eval_stats = learner.eval(val_dataset, print_results=True)
assert eval_stats # This assert is just a workaround since pyflakes does not support the NOQA comment
val_dataset = KittiDataset(path=f'{KITTI_ROOT}/val')
config_file = Path(sys.modules[EfficientPsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_kitti.py'
learner = EfficientPsLearner(str(config_file))
learner.load(path=f'{DATA_ROOT}/checkpoints/model_kitti.pth')
eval_stats = learner.eval(val_dataset, print_results=True)
assert eval_stats # This assert is just a workaround since pyflakes does not support the NOQA comment
def inference():
image_filenames = [
......@@ -65,7 +76,8 @@ def inference():
]
images = [Image.open(f) for f in image_filenames]
learner = EfficientPsLearner()
config_file = Path(sys.modules[EfficientPsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_cityscapes.py'
learner = EfficientPsLearner(str(config_file))
learner.load(path=f'{DATA_ROOT}/checkpoints/model_cityscapes.pth')
predictions = learner.infer(images)
for image, prediction in zip(images, predictions):
......
......@@ -57,7 +57,8 @@ image_filenames = [
f'{DATA_ROOT}/val/images/lindau_000003_000019.png',
]
images = [Image.open(f) for f in image_filenames]
learner = EfficientPsLearner()
config_file = 'singlegpu_cityscapes.py' # stored in efficient_ps/configs
learner = EfficientPsLearner(config_file)
learner.load('model.pth') # alternatively, one can just specify the path to the folder
predictions = learner.infer(images)
for image, prediction in zip(images, predictions):
......@@ -69,7 +70,8 @@ for image, prediction in zip(images, predictions):
from opendr.perception.panoptic_segmentation import EfficientPsLearner, CityscapesDataset
DATA_ROOT = '~/data/cityscapes'
val_dataset = CityscapesDataset(path=f'{DATA_ROOT}/val')
learner = EfficientPsLearner()
config_file = 'singlegpu_cityscapes.py' # stored in efficient_ps/configs
learner = EfficientPsLearner(config_file)
learner.load('model.pth') # alternatively, one can just specify the path to the folder
learner.eval(val_dataset, print_results=True)
```
......@@ -80,6 +82,7 @@ from opendr.perception.panoptic_segmentation import EfficientPsLearner, Cityscap
DATA_ROOT = '~/data/cityscapes'
train_dataset = CityscapesDataset(path=f'{DATA_ROOT}/training')
val_dataset = CityscapesDataset(path=f'{DATA_ROOT}/val')
learner = EfficientPsLearner()
config_file = 'singlegpu_cityscapes.py' # stored in efficient_ps/configs
learner = EfficientPsLearner(config_file)
learner.fit(train_dataset, val_dataset)
```
......@@ -268,13 +268,6 @@ class CityscapesDataset(ExternalDataset, DatasetIterator):
if label.trainId != 255 and label.trainId != -1 and label.hasInstances:
coco_categories.append({"id": label.trainId, "name": label.name})
coco_out = {
"info": {"version": "1.0"},
"images": [],
"categories": coco_categories,
"annotations": []
}
# Process splits
for split, (split_img_subdir, split_mask_subdir) in splits.items():
img_split_dir = output_path / split / 'images'
......@@ -287,6 +280,13 @@ class CityscapesDataset(ExternalDataset, DatasetIterator):
img_list = [(file.parent.name, file.stem.replace('_gtFine_instanceIds', ''), 'gtFine') for file in
mask_input_dir.glob('*/*_instanceIds.png')]
coco_out = {
"info": {"version": "1.0"},
"images": [],
"categories": coco_categories,
"annotations": []
}
# Convert to COCO detection format
with tqdm(total=len(img_list), desc=f'Converting {split}') as pbar:
with mp.Pool(processes=num_workers, initializer=_Counter.init_counter, initargs=(_Counter(0),)) as pool:
......
......@@ -15,7 +15,6 @@
import json
import multiprocessing as mp
import os
import shutil
import warnings
from functools import partial
from pathlib import Path
......@@ -43,7 +42,7 @@ class KittiDataset(ExternalDataset, DatasetIterator):
The KITTI panoptic segmentation dataset can be found on the EfficientPS website: http://panoptic.cs.uni-freiburg.de
Use the static method prepare_data() to convert the raw Cityscapes dataset to the structure below.
Use the static method prepare_data() to convert the raw KITTI panoptic segmentation dataset to the structure below.
The folder structure should look like this:
path
......@@ -277,13 +276,6 @@ class KittiDataset(ExternalDataset, DatasetIterator):
if label.trainId != 255 and label.trainId != -1 and label.hasInstances:
coco_categories.append({"id": label.trainId, "name": label.name})
coco_out = {
"info": {"version": "1.0"},
"images": [],
"categories": coco_categories,
"annotations": []
}
# Process splits
for split, (split_img_subdir, split_mask_subdir) in splits.items():
img_split_dir = output_path / split / 'images'
......@@ -304,6 +296,13 @@ class KittiDataset(ExternalDataset, DatasetIterator):
images = []
annotations = []
coco_out = {
"info": {"version": "1.0"},
"images": [],
"categories": coco_categories,
"annotations": []
}
# Convert to COCO detection format
with tqdm(total=len(img_list), desc=f'Converting {split}') as pbar:
with mp.Pool(processes=num_workers, initializer=_Counter.init_counter, initargs=(_Counter(0),)) as pool:
......@@ -408,7 +407,11 @@ def _process_data(img_id: str, image_input_dir: Path, mask_input_dir: Path, imag
# Write output
PilImage.fromarray(lbl_out).save(mask_output_dir / f'{img_id}.png')
shutil.copy(image_input_dir / f'{img_id}.png', image_output_dir / f'{img_id}.png')
# Resize input to match the size of the ground truth annotations
with PilImage.open(image_input_dir / f'{img_id}.png') as img:
img = cv2.resize(np.array(img), (1280, 384), interpolation=cv2.INTER_NEAREST)
PilImage.fromarray(img).save(image_output_dir / f'{img_id}.png')
if eval_output_dir is not None:
PilImage.fromarray(lbl).save(eval_output_dir / f'{img_id}.png')
......
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# model settings
model = dict(
type='EfficientPS',
pretrained=True,
backbone=dict(
type='tf_efficientnet_b5',
act_cfg=dict(type="Identity"),
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
style='pytorch'),
neck=dict(
type='TWOWAYFPN',
in_channels=[40, 64, 176, 2048],
out_channels=256,
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
act_cfg=None,
num_outs=4),
rpn_head=dict(
type='RPNHead',
in_channels=256,
feat_channels=256,
anchor_scales=[8],
anchor_ratios=[0.5, 1.0, 2.0],
anchor_strides=[4, 8, 16, 32],
target_means=[.0, .0, .0, .0],
target_stds=[1.0, 1.0, 1.0, 1.0],
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0 / 9.0, loss_weight=1.0)),
bbox_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=7, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
bbox_head=dict(
type='SharedFCBBoxHead',
num_fcs=2,
in_channels=256,
fc_out_channels=1024,
roi_feat_size=7,
num_classes=9,
target_means=[0., 0., 0., 0.],
target_stds=[0.1, 0.1, 0.2, 0.2],
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
reg_class_agnostic=False,
loss_cls=dict(
type='CrossEntropyLoss', use_sigmoid=False, loss_weight=1.0),
loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)),
mask_roi_extractor=dict(
type='SingleRoIExtractor',
roi_layer=dict(type='RoIAlign', out_size=14, sample_num=2),
out_channels=256,
featmap_strides=[4, 8, 16, 32]),
mask_head=dict(
type='FCNSepMaskHead',
num_convs=4,
in_channels=256,
conv_out_channels=256,
num_classes=9,
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
act_cfg=None,
loss_mask=dict(
type='CrossEntropyLoss', use_mask=True, loss_weight=1.0)),
semantic_head=dict(
type='EfficientPSSemanticHead',
in_channels=256,
conv_out_channels=128,
num_classes=19,
ignore_label=255,
loss_weight=1.0,
ohem=0.25,
norm_cfg=dict(type='InPlaceABN', activation='leaky_relu', activation_param=0.01, requires_grad=True),
act_cfg=None))
# model training and testing settings
train_cfg = dict(
rpn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.7,
neg_iou_thr=0.3,
min_pos_iou=0.3,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.5,
neg_pos_ub=-1,
add_gt_as_proposals=False),
allowed_border=0,
pos_weight=-1,
debug=False),
rpn_proposal=dict(
nms_across_levels=False,
nms_pre=2000,
nms_post=2000,
max_num=2000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
assigner=dict(
type='MaxIoUAssigner',
pos_iou_thr=0.5,
neg_iou_thr=0.5,
min_pos_iou=0.5,
ignore_iof_thr=-1),
sampler=dict(
type='RandomSampler',
num=256,
pos_fraction=0.25,
neg_pos_ub=-1,
add_gt_as_proposals=False),
mask_size=28,
pos_weight=-1,
debug=False))
test_cfg = dict(
rpn=dict(
nms_across_levels=False,
nms_pre=1000,
nms_post=1000,
max_num=1000,
nms_thr=0.7,
min_bbox_size=0),
rcnn=dict(
score_thr=0.5,
nms=dict(type='nms', iou_thr=0.5),
max_per_img=100,
mask_thr_binary=0.5),
panoptic=dict(
overlap_thr=0.5,
min_stuff_area=2048))
# dataset settings
img_norm_cfg = dict(
mean=[106.433, 116.617, 119.559], std=[65.496, 67.6, 74.123], to_rgb=True)
train_pipeline = [
dict(type='LoadImageFromFile'),
dict(type='LoadAnnotations', with_bbox=True, with_mask=True, with_seg=True),
dict(type='Resize', img_scale=(1280, 384), ratio_range=(0.5, 2.0), keep_ratio=True),
dict(type='RandomCrop', crop_size=(384, 1280)),
dict(type='RandomFlip', flip_ratio=0.5),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='DefaultFormatBundle'),
dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels', 'gt_masks', 'gt_semantic_seg']),
]
test_pipeline = [
dict(type='LoadImageFromFile'),
dict(
type='MultiScaleFlipAug',
img_scale=(1280, 384),
flip=False,
transforms=[
dict(type='Resize', keep_ratio=True),
dict(type='RandomFlip'),
dict(type='Normalize', **img_norm_cfg),
dict(type='Pad', size_divisor=32),
dict(type='ImageToTensor', keys=['img']),
dict(type='Collect', keys=['img']),
])
]
......@@ -12,7 +12,7 @@ python=
pillow>=8.3.2
matplotlib
scikit-image
git+git://github.com/waspinator/pycococreator.git@0.2.0
git+https://github.com/waspinator/pycococreator.git@0.2.0
git+https://github.com/mapillary/inplace_abn.git
${OPENDR_HOME}/src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS/efficientNet
${OPENDR_HOME}/src/opendr/perception/panoptic_segmentation/efficient_ps/algorithm/EfficientPS
......
......@@ -58,6 +58,7 @@ class EfficientPsLearner(Learner):
"""
def __init__(self,
config_file: str,
lr: float=.07,
iters: int=160,
batch_size: int=1,
......@@ -71,9 +72,10 @@ class EfficientPsLearner(Learner):
device: str="cuda:0",
num_workers: int=1,
seed: Optional[float]=None,
config_file: str=str(Path(__file__).parent / 'configs' / 'singlegpu_sample.py')
):
"""
:param config_file: path to a config file that contains the model and the data loading pipelines
:type config_file: str
:param lr: learning rate [training]
:type lr: float
:param iters: number of iterations [training]
......@@ -100,8 +102,6 @@ class EfficientPsLearner(Learner):
:type num_workers: int
:param seed: random seed to shuffle the data during training [training]
:type seed: float, optional
:param config_file: path to a config file that contains the model and the data loading pipelines
:type config_file: str
"""
super().__init__(lr=lr, iters=iters, batch_size=batch_size, optimizer=optimizer, temp_path=temp_path,
device=device)
......
......@@ -12,10 +12,12 @@
# limitations under the License.
import os
import sys
import shutil
import unittest
import warnings
import zipfile
from pathlib import Path
from opendr.engine.data import Image
from opendr.engine.target import Heatmap
......@@ -54,6 +56,10 @@ class TestEfficientPsLearner(unittest.TestCase):
with zipfile.ZipFile(test_data_zipped, 'r') as f:
f.extractall(cls.test_data)
# Configuration for the weights pre-trained on Cityscapes
cls.config_file = str(Path(sys.modules[
EfficientPsLearner.__module__].__file__).parent / 'configs' / 'singlegpu_cityscapes.py')
@classmethod
def tearDownClass(cls):
# Clean up downloaded files
......@@ -61,7 +67,8 @@ class TestEfficientPsLearner(unittest.TestCase):
def test_init(self):
# Verify that the internal variables are initialized as expected by the other functions
learner = EfficientPsLearner()
learner = EfficientPsLearner(self.config_file)
self.assertFalse(learner._is_model_trained)
def test_fit(self):
......@@ -73,7 +80,7 @@ class TestEfficientPsLearner(unittest.TestCase):
warnings.simplefilter('ignore', DeprecationWarning)
val_dataset = CityscapesDataset(path=os.path.join(self.test_data, 'eval_data'))
learner = EfficientPsLearner(batch_size=1)
learner = EfficientPsLearner(self.config_file, batch_size=1)
learner.load(self.model_weights)
eval_results = learner.eval(val_dataset)
self.assertIsInstance(eval_results, dict)
......@@ -81,7 +88,7 @@ class TestEfficientPsLearner(unittest.TestCase):
def test_infer_single_image(self):
image_filename = os.path.join(self.test_data, 'infer_data', 'lindau_000001_000019.png')
image = Image.open(image_filename)
learner = EfficientPsLearner()
learner = EfficientPsLearner(self.config_file)
learner.load(self.model_weights)
prediction = learner.infer(image)
for heatmap in prediction:
......@@ -93,7 +100,7 @@ class TestEfficientPsLearner(unittest.TestCase):
os.path.join(self.test_data, 'infer_data', 'lindau_000003_000019.png'),
]
images = [Image.open(f) for f in image_filenames]
learner = EfficientPsLearner()
learner = EfficientPsLearner(self.config_file)
learner.load(self.model_weights)
predictions = learner.infer(images)
for prediction in predictions:
......@@ -104,7 +111,7 @@ class TestEfficientPsLearner(unittest.TestCase):
# The model has not been trained.
warnings.simplefilter('ignore', UserWarning)
learner = EfficientPsLearner()
learner = EfficientPsLearner(self.config_file)
temp_model_path = os.path.join(self.temp_dir, 'checkpoints')
# Make sure that no model has been written to that path yet
if os.path.exists(temp_model_path):
......@@ -116,7 +123,7 @@ class TestEfficientPsLearner(unittest.TestCase):
rmdir(temp_model_path)
def test_load_pretrained(self):
learner = EfficientPsLearner()
learner = EfficientPsLearner(self.config_file)
successful = learner.load(self.model_weights)
self.assertTrue(learner._is_model_trained)
self.assertTrue(successful)
......@@ -125,7 +132,7 @@ class TestEfficientPsLearner(unittest.TestCase):
image_filename = os.path.join(self.test_data, 'infer_data', 'lindau_000001_000019.png')
temp_prediction_path = os.path.join(self.temp_dir, 'prediction.png')
image = Image.open(image_filename)
learner = EfficientPsLearner()
learner = EfficientPsLearner(self.config_file)
learner.load(self.model_weights)
prediction = learner.infer(image)
# Make sure that no file has been written to that path yet
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment