diff --git a/.cookiecutterrc b/.cookiecutterrc
new file mode 100644
index 0000000..c1c0865
--- /dev/null
+++ b/.cookiecutterrc
@@ -0,0 +1,41 @@
+# This file exists so you can easily regenerate your project.
+#
+# `cookiepatcher` is a convenient shim around `cookiecutter`
+# for regenerating projects (it will generate a .cookiecutterrc
+# automatically for any template). To use it:
+#
+# pip install cookiepatcher
+# cookiepatcher gh:itisfoundation/cookiecutter-osparc-service project-path
+#
+# See:
+# https://pypi.python.org/pypi/cookiepatcher
+#
+# Alternatively, you can run:
+#
+# cookiecutter --overwrite-if-exists --config-file=project-path/.cookiecutterrc gh:itisfoundation/cookiecutter-osparc-service
+#
+
+default_context:
+
+ _checkout: None
+ _output_dir: '/home/ordonez/osparc_projects/cookiecutter-osparc-service'
+ _repo_dir: '.'
+ _template: '.'
+ author_affiliation: 'ZMT ZurichMedTech AG'
+ author_email: 'ordonez@zmt.swiss'
+ author_name: 'Javier Garcia Ordonez'
+ contact_email: 'ordonez@zmt.swiss'
+ default_docker_registry: 'itisfoundation'
+ docker_base: 'alpine:3.8'
+ git_repo: 'github'
+ git_username: 'JavierGOrdonnez'
+ number_of_inputs: '4'
+ number_of_outputs: '4'
+ project_name: 'Medical Image Processing - Computational Service'
+ project_package_name: 'comp-medimproc'
+ project_short_description: 'Medical Image Processing - Computational Service'
+ project_slug: 'comp-medimproc'
+ project_type: 'computational'
+ release_date: '2024'
+ version: '0.1.0'
+ version_display: '0.1.0'
diff --git a/.dockerignore b/.dockerignore
new file mode 100644
index 0000000..fbeeae1
--- /dev/null
+++ b/.dockerignore
@@ -0,0 +1,47 @@
+## Common.dockerignore
+
+*
+!src/
+!service.cli/
+!docker/
+!.osparc/
+
+# Common
+README.md
+CHANGELOG.md
+docker-compose.yml
+Dockerfile
+
+# git
+.git
+.gitattributes
+.gitignore
+.git*
+
+## Common.gitignore
+
+# output folders
+build/
+output/
+out/
+
+# temporary folders
+tmp/
+
+# explicit mark
+*ignore*
+.tmp*
+
+# vscode configuration
+.vscode
+
+# make outputs
+pytest_*.xml
+.compose*
+
+# validation folder
+!validation/**/*
+# docker ignore
+!.dockerignore
+# git ignore
+!.gitignore
diff --git a/.github/dockerhub_login.bash b/.github/dockerhub_login.bash
new file mode 100755
index 0000000..29972d7
--- /dev/null
+++ b/.github/dockerhub_login.bash
@@ -0,0 +1,32 @@
+#!/bin/bash
+# http://redsymbol.net/articles/unofficial-bash-strict-mode/
+set -euo pipefail
+IFS=$'\n\t'
+
+# check needed variables are defined
+if [ ! -v DOCKER_USERNAME ] ||\
+ [ ! -v DOCKER_PASSWORD ] ||\
+ [ ! -v DOCKER_REGISTRY ]; then
+ echo "## ERROR: Please define the environs (DOCKER_USERNAME, DOCKER_PASSWORD, DOCKER_REGISTRY) in your CI settings!"
+ exit 1
+fi
+
+# check script needed variables
+if [ ! -v OWNER ]; then
+ echo "## ERROR: incorrect usage of CI. OWNER (e.g. dockerhub organization like itisfoundation or user private name) not defined!"
+ exit 1
+fi
+
+# only upstream is allowed to push to itisfoundation repo
+if [ "${OWNER,,}" != "itisfoundation" ] &&\
+ { [ ! -v DOCKER_REGISTRY ] || [ -z "${DOCKER_REGISTRY}" ] || [ "$DOCKER_REGISTRY" = "itisfoundation" ]; }; then
+ echo "## ERROR: it is not allowed to push to the main dockerhub repository from a fork!"
+ echo "## Please adapt your CI-defined environs (DOCKER_USERNAME, DOCKER_PASSWORD, DOCKER_REGISTRY)"
+ exit 1
+fi
+
+# these variable must be available securely from the CI
+echo "$DOCKER_PASSWORD" | docker login -u "$DOCKER_USERNAME" --password-stdin
+
+echo "logged into dockerhub successfully, ready to push"
+exit 0
diff --git a/.github/show_system_versions.bash b/.github/show_system_versions.bash
new file mode 100755
index 0000000..c2a3b86
--- /dev/null
+++ b/.github/show_system_versions.bash
@@ -0,0 +1,31 @@
+#!/bin/bash
+# http://redsymbol.net/articles/unofficial-bash-strict-mode/
+set -euo pipefail
+IFS=$'\n\t'
+
+echo "------------------------------ environs -----------------------------------"
+env
+
+echo "------------------------------ uname -----------------------------------"
+uname -a
+lsb_release -a
+
+echo "------------------------------ python -----------------------------------"
+if command -v python; then
+ python --version
+fi
+
+echo "------------------------------ python3 -----------------------------------"
+if command -v python3; then
+ python3 --version
+fi
+
+echo "------------------------------ docker -----------------------------------"
+if command -v docker; then
+ docker version
+fi
+
+echo "------------------------------ docker-compose -----------------------------------"
+if command -v docker-compose; then
+ docker-compose version
+fi
diff --git a/.github/workflows/build-test-publish.yml b/.github/workflows/build-test-publish.yml
new file mode 100644
index 0000000..5ff9083
--- /dev/null
+++ b/.github/workflows/build-test-publish.yml
@@ -0,0 +1,53 @@
+name: Github-CI Push/PR comp-medimproc
+
+on:
+ push:
+ pull_request:
+
+env:
+ # secrets can be set in settings/secrets on github
+ DOCKER_REGISTRY: ${{ secrets.DOCKER_REGISTRY }}
+ DOCKER_USERNAME: ${{ secrets.DOCKER_USERNAME }}
+ DOCKER_PASSWORD: ${{ secrets.DOCKER_PASSWORD }}
+
+jobs:
+ build:
+ name: building comp-medimproc
+ runs-on: ${{ matrix.os }}
+ strategy:
+ matrix:
+ python: [3.9]
+ os: [ubuntu-22.04]
+ fail-fast: false
+ steps:
+ - uses: actions/checkout@v3
+ - name: setup python environment
+ uses: actions/setup-python@v4
+ with:
+ python-version: ${{ matrix.python }}
+ - name: show versions
+ run: ./.github/show_system_versions.bash
+ - name: set owner variable
+ run: echo "OWNER=${GITHUB_REPOSITORY%/*}" >> $GITHUB_ENV
+ - name: set docker image tag
+ if: github.ref != 'refs/heads/master'
+ run: echo "DOCKER_IMAGE_TAG=${GITHUB_REF##*/}" >> $GITHUB_ENV
+ - name: get current image if available
+ run: make pull-latest || true
+ - name: build
+ run: |
+ make VERSION
+ make build
+ make info-build
+ - name: test
+ run: make tests
+ # - if: github.event_name == 'push' && github.ref == 'refs/heads/master'
+ # name: push
+ # run: |
+ # ./.github/dockerhub_login.bash
+ # make push
+ # - if: github.event_name == 'push' && github.ref != 'refs/heads/master'
+ # name: push
+ # run: |
+ # ./.github/dockerhub_login.bash
+ # make push-version
diff --git a/.github/workflows/check-image.yml b/.github/workflows/check-image.yml
new file mode 100644
index 0000000..0aed71b
--- /dev/null
+++ b/.github/workflows/check-image.yml
@@ -0,0 +1,24 @@
+name: Build and check image
+
+on: [push, pull_request]
+
+jobs:
+ verify-image-build:
+ runs-on: ubuntu-latest
+ steps:
+ - name: Checkout repo content
+ uses: actions/checkout@v2
+ - name: ooil version
+ uses: docker://itisfoundation/ci-service-integration-library:v1.0.4
+ with:
+ args: ooil --version
+ - name: Assemble docker compose spec
+ uses: docker://itisfoundation/ci-service-integration-library:v1.0.4
+ with:
+ args: ooil compose
+ - name: Build all images if multiple
+ uses: docker://itisfoundation/ci-service-integration-library:v1.0.4
+ with:
+ args: docker compose build
+ - name: test
+ run: make tests
diff --git a/.gitignore b/.gitignore
new file mode 100644
index 0000000..e692c51
--- /dev/null
+++ b/.gitignore
@@ -0,0 +1,27 @@
+## Common.gitignore
+
+# output folders
+build/
+output/
+out/
+
+# temporary folders
+tmp/
+
+# explicit mark
+*ignore*
+.tmp*
+
+# vscode configuration
+.vscode
+
+# make outputs
+pytest_*.xml
+.compose*
+
+# validation folder
+!validation/**/*
+# docker ignore
+!.dockerignore
+# git ignore
+!.gitignore
diff --git a/.osparc/docker-compose.overwrite.yml b/.osparc/docker-compose.overwrite.yml
new file mode 100644
index 0000000..7b9e8ce
--- /dev/null
+++ b/.osparc/docker-compose.overwrite.yml
@@ -0,0 +1,6 @@
+version: "3.7"
+services:
+ comp-medimproc:
+ build:
+ dockerfile: docker/alpine/Dockerfile
+ target: production
diff --git a/.osparc/metadata.yml b/.osparc/metadata.yml
new file mode 100644
index 0000000..b0c840e
--- /dev/null
+++ b/.osparc/metadata.yml
@@ -0,0 +1,76 @@
+name: Medical Image Processing - Computational Service
+key: simcore/services/comp/comp-medimproc
+type: computational
+integration-version: 2.0.0
+version: 0.1.0
+description: Medical Image Processing - Computational Service
+contact: ordonez@zmt.swiss
+thumbnail: https://github.com/ITISFoundation/osparc-assets/blob/cb43207b6be2f4311c93cd963538d5718b41a023/assets/default-thumbnail-cookiecutter-osparc-service.png?raw=true
+authors:
+ - name: Javier Garcia Ordonez
+ email: ordonez@zmt.swiss
+ affiliation: ZMT ZurichMedTech AG
+inputs:
+ input_1:
+ displayOrder: 1
+ label: input_1_label
+ description: The input 1 description
+ type: string
+ defaultValue: some_value(optional)
+ fileToKeyMap:
+ somefilename.ext: input_1
+ input_2:
+ displayOrder: 2
+ label: input_2_label
+ description: The input 2 description
+ type: string
+ defaultValue: some_value(optional)
+ fileToKeyMap:
+ somefilename.ext: input_2
+ input_3:
+ displayOrder: 3
+ label: input_3_label
+ description: The input 3 description
+ type: string
+ defaultValue: some_value(optional)
+ fileToKeyMap:
+ somefilename.ext: input_3
+ input_4:
+ displayOrder: 4
+ label: input_4_label
+ description: The input 4 description
+ type: string
+ defaultValue: some_value(optional)
+ fileToKeyMap:
+ somefilename.ext: input_4
+
+outputs:
+ output_1:
+ displayOrder: 1
+ label: output_1_label
+ description: The input 1 description
+ type: string
+ fileToKeyMap:
+ somefilename.ext: output_1
+ output_2:
+ displayOrder: 2
+ label: output_2_label
+ description: The input 2 description
+ type: string
+ fileToKeyMap:
+ somefilename.ext: output_2
+ output_3:
+ displayOrder: 3
+ label: output_3_label
+ description: The input 3 description
+ type: string
+ fileToKeyMap:
+ somefilename.ext: output_3
+ output_4:
+ displayOrder: 4
+ label: output_4_label
+ description: The input 4 description
+ type: string
+ fileToKeyMap:
+ somefilename.ext: output_4
+
diff --git a/.osparc/runtime.yml b/.osparc/runtime.yml
new file mode 100644
index 0000000..c0d3d9f
--- /dev/null
+++ b/.osparc/runtime.yml
@@ -0,0 +1,9 @@
+restart-policy: no-restart
+settings:
+ - name: Resources
+ type: Resources
+ value:
+ Limits:
+ NanoCPUs: 1000000000 # 100% of CPU cycles on 1 CPU
+ MemoryBytes: 2147483648 # 2 Gigabytes
+
diff --git a/Makefile b/Makefile
new file mode 100644
index 0000000..37ffb58
--- /dev/null
+++ b/Makefile
@@ -0,0 +1,105 @@
+#
+# Author: Javier Garcia Ordonez
+
+SHELL = /bin/sh
+.DEFAULT_GOAL := help
+
+export VCS_URL := $(shell git config --get remote.origin.url 2> /dev/null || echo unversioned repo)
+export VCS_REF := $(shell git rev-parse --short HEAD 2> /dev/null || echo unversioned repo)
+export VCS_STATUS := $(if $(shell git status -s 2> /dev/null || echo unversioned repo),'modified/untracked','clean')
+export BUILD_DATE := $(shell date -u +"%Y-%m-%dT%H:%M:%SZ")
+
+export DOCKER_IMAGE_NAME ?= comp-medimproc
+export DOCKER_IMAGE_TAG ?= 0.1.0
+
+OSPARC_DIR:=$(CURDIR)/.osparc
+
+APP_NAME := comp-medimproc
+
+# Builds new service version ----------------------------------------------------------------------------
+
+define _bumpversion
+ # upgrades as $(subst $(1),,$@) version, commits and tags
+ @docker run -it --rm -v $(PWD):/${DOCKER_IMAGE_NAME} \
+ -u $(shell id -u):$(shell id -g) \
+ itisfoundation/ci-service-integration-library:v1.0.4 \
+ sh -c "cd /${DOCKER_IMAGE_NAME} && bump2version --verbose --list --config-file $(1) $(subst $(2),,$@)"
+endef
+
+.PHONY: version-patch version-minor version-major
+version-patch version-minor version-major: .bumpversion.cfg ## increases service's version
+ @make compose-spec
+ @$(call _bumpversion,$<,version-)
+ @make compose-spec
+
+.PHONY: compose-spec
+compose-spec: ## runs ooil to assemble the docker-compose.yml file
+ @docker run --rm -v $(PWD):/${DOCKER_IMAGE_NAME} \
+ -u $(shell id -u):$(shell id -g) \
+ itisfoundation/ci-service-integration-library:v1.0.4 \
+ sh -c "cd /${DOCKER_IMAGE_NAME} && ooil compose"
+
+build: | compose-spec ## build docker image
+ docker compose build
+
+# To test built service locally -------------------------------------------------------------------------
+.PHONY: run-local
+run-local: ## runs image with local configuration
+ docker compose --file docker-compose-local.yml up
+
+.PHONY: publish-local
+publish-local: ## push to local oSPARC to test integration. It requires the oSPARC platform running on your computer, you can find more information here: https://github.com/ITISFoundation/osparc-simcore/blob/master/README.md
+ docker tag simcore/services/dynamic/${DOCKER_IMAGE_NAME}:${DOCKER_IMAGE_TAG} registry:5000/simcore/services/dynamic/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)
+ docker push registry:5000/simcore/services/dynamic/$(DOCKER_IMAGE_NAME):$(DOCKER_IMAGE_TAG)
+ @curl registry:5000/v2/_catalog | jq
+
+.PHONY: help
+help: ## this colorful help
+ @echo "Recipes for '$(notdir $(CURDIR))':"
+ @echo ""
+ @awk 'BEGIN {FS = ":.*?## "} /^[[:alpha:][:space:]_-]+:.*?## / {printf "\033[36m%-20s\033[0m %s\n", $$1, $$2}' $(MAKEFILE_LIST)
+ @echo ""
+
+
+# COOKIECUTTER -----------------------------------------------------------------
+
+.PHONY: replay
+replay: .cookiecutterrc ## re-applies cookiecutter
+ # Replaying . ...
+ @cookiecutter --no-input --overwrite-if-exists \
+ --config-file=$< \
+ --output-dir="$(abspath $(CURDIR)/..)" \
+ "."
+
+
+.PHONY: info
+info: ## general info
+ # env vars: version control
+ @echo " VCS_URL : $(VCS_URL)"
+ @echo " VCS_REF : $(VCS_REF)"
+ @echo " VCS_STATUS : $(VCS_STATUS)"
+ # env vars: docker
+ @echo " DOCKER_IMAGE_TAG : $(DOCKER_IMAGE_TAG)"
+ @echo " BUILD_DATE : $(BUILD_DATE)"
+ # exe: recommended dev tools
+ @echo ' git : $(shell git --version 2>/dev/null || echo not found)'
+ @echo ' make : $(shell make --version 2>&1 | head -n 1)'
+ @echo ' jq : $(shell jq --version 2>/dev/null || echo not found z)'
+ @echo ' awk : $(shell awk -W version 2>&1 | head -n 1 2>/dev/null || echo not found)'
+ @echo ' python : $(shell python3 --version 2>/dev/null || echo not found )'
+ @echo ' docker : $(shell docker --version)'
+ @echo ' docker buildx : $(shell docker buildx version)'
+ @echo ' docker compose : $(shell docker compose --version)'
+
+# MISC -----------------------------------------------------------------
+
+
+.PHONY: clean
+git_clean_args = -dxf --exclude=.vscode/
+
+clean: ## cleans all unversioned files in project and temp files create by this makefile
+ # Cleaning unversioned
+ @git clean -n $(git_clean_args)
+ @echo -n "Are you sure? [y/N] " && read ans && [ $${ans:-N} = y ]
+ @echo -n "$(shell whoami), are you REALLY sure? [y/N] " && read ans && [ $${ans:-N} = y ]
+ @git clean $(git_clean_args)
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..4acc521
--- /dev/null
+++ b/README.md
@@ -0,0 +1,30 @@
+# comp-medimproc
+
+Medical Image Processing - Computational Service
+
+## Usage
+
+```console
+$ make help
+
+$ make build
+$ make info-build
+$ make tests
+```
+
+## Workflow
+
+1. The source code shall be copied to the [src](comp-medimproc/src/comp-medimproc) folder.
+2. The [Dockerfile](comp-medimproc/src/Dockerfile) shall be modified to compile the source code.
+3. The [.osparc](.osparc) is the configuration folder and source of truth for metadata: describes service info and expected inputs/outputs of the service.
+4. The [execute](comp-medimproc/service.cli/execute) shell script shall be modified to run the service using the expected inputs and retrieve the expected outputs.
+5. The test input/output shall be copied to [validation](comp-medimproc/validation).
+6. The service docker image may be built and tested as ``make build tests`` (see usage above)
+7. Optional: if your code requires specific CPU/RAM resources, edit [runtime.yml](.osparc/runtime.yml). In doubt, leave it as default.
+
+## Have an issue or question?
+Please open an issue [in this repository](https://github.com/ITISFoundation/cookiecutter-osparc-service/issues/).
+---
+
+
+
diff --git a/docker-compose.yml b/docker-compose.yml
new file mode 100644
index 0000000..de34659
--- /dev/null
+++ b/docker-compose.yml
@@ -0,0 +1,48 @@
+version: '3.7'
+services:
+ comp-medimproc:
+ build:
+ context: ./
+ dockerfile: docker/alpine/Dockerfile
+ labels:
+ io.simcore.name: '{"name": "Medical Image Processing - Computational Service"}'
+ io.simcore.thumbnail: '{"thumbnail": "https://github.com/ITISFoundation/osparc-assets/blob/cb43207b6be2f4311c93cd963538d5718b41a023/assets/default-thumbnail-cookiecutter-osparc-service.png?raw=true"}'
+ io.simcore.description: '{"description": "Medical Image Processing - Computational
+ Service"}'
+ io.simcore.key: '{"key": "simcore/services/comp/comp-medimproc"}'
+ io.simcore.version: '{"version": "0.1.0"}'
+ io.simcore.integration-version: '{"integration-version": "2.0.0"}'
+ io.simcore.type: '{"type": "computational"}'
+ io.simcore.authors: '{"authors": [{"name": "Javier Garcia Ordonez", "email":
+ "ordonez@zmt.swiss", "affiliation": "ZMT ZurichMedTech AG"}]}'
+ io.simcore.contact: '{"contact": "ordonez@zmt.swiss"}'
+ io.simcore.inputs: '{"inputs": {"input_1": {"displayOrder": 1.0, "label":
+ "input_1_label", "description": "The input 1 description", "type": "string",
+ "fileToKeyMap": {"somefilename.ext": "input_1"}, "defaultValue": "some_value(optional)"},
+ "input_2": {"displayOrder": 2.0, "label": "input_2_label", "description":
+ "The input 2 description", "type": "string", "fileToKeyMap": {"somefilename.ext":
+ "input_2"}, "defaultValue": "some_value(optional)"}, "input_3": {"displayOrder":
+ 3.0, "label": "input_3_label", "description": "The input 3 description",
+ "type": "string", "fileToKeyMap": {"somefilename.ext": "input_3"}, "defaultValue":
+ "some_value(optional)"}, "input_4": {"displayOrder": 4.0, "label": "input_4_label",
+ "description": "The input 4 description", "type": "string", "fileToKeyMap":
+ {"somefilename.ext": "input_4"}, "defaultValue": "some_value(optional)"}}}'
+ io.simcore.outputs: '{"outputs": {"output_1": {"displayOrder": 1.0, "label":
+ "output_1_label", "description": "The input 1 description", "type": "string",
+ "fileToKeyMap": {"somefilename.ext": "output_1"}}, "output_2": {"displayOrder":
+ 2.0, "label": "output_2_label", "description": "The input 2 description",
+ "type": "string", "fileToKeyMap": {"somefilename.ext": "output_2"}}, "output_3":
+ {"displayOrder": 3.0, "label": "output_3_label", "description": "The input
+ 3 description", "type": "string", "fileToKeyMap": {"somefilename.ext": "output_3"}},
+ "output_4": {"displayOrder": 4.0, "label": "output_4_label", "description":
+ "The input 4 description", "type": "string", "fileToKeyMap": {"somefilename.ext":
+ "output_4"}}}}'
+ org.label-schema.build-date: '2024-08-28T16:18:24Z'
+ org.label-schema.schema-version: '1.0'
+ org.label-schema.vcs-ref: ''
+ org.label-schema.vcs-url: ''
+ simcore.service.restart-policy: no-restart
+ simcore.service.settings: '[{"name": "Resources", "type": "Resources", "value":
+ {"Limits": {"NanoCPUs": 1000000000, "MemoryBytes": 2147483648}}}]'
+ target: production
+ image: simcore/services/comp/comp-medimproc:0.1.0
diff --git a/docker/alpine/Dockerfile b/docker/alpine/Dockerfile
new file mode 100644
index 0000000..ec2a3d7
--- /dev/null
+++ b/docker/alpine/Dockerfile
@@ -0,0 +1,223 @@
+FROM ubuntu:18.04 as base
+# FROM alpine:3.8 as base
+## using alpine would be slimer but doesnt have apt-get so it would imply reworking quite a few things
+## however could be bad for compatibility with the Jupyter version (eg copy-paste between Dockerfiles might not work anymore)
+
+## Separating build & production could also help reduce image size
+
+LABEL maintainer="ordonez"
+
+# simcore-user uid=8004(${SC_USER_NAME}) gid=8004(${SC_USER_NAME}) groups=8004(${SC_USER_NAME})
+ENV SC_USER_ID 8004
+ENV SC_USER_NAME scu
+# ubuntu:
+RUN adduser --uid ${SC_USER_ID} --disabled-password --gecos "" --shell /bin/sh --home /home/${SC_USER_NAME} ${SC_USER_NAME}
+# # alpine:
+# RUN adduser -D -u ${SC_USER_ID} -s /bin/sh -h /home/${SC_USER_NAME} ${SC_USER_NAME}
+
+
+# ubuntu:
+RUN apt-get update \
+ && apt-get -y install --no-install-recommends \
+ jq \
+ && rm --recursive --force /var/lib/apt/lists/*
+
+# # alpine:
+# RUN apk add --no-cache \
+# su-exec \
+# jq
+
+# -------------------------- Build stage -------------------
+# Installs build/package management tools and third party dependencies
+#
+# + /build WORKDIR
+#
+
+FROM base as build
+
+ENV SC_BUILD_TARGET build
+
+WORKDIR /build
+# defines the output of the build
+RUN mkdir -p /build/bin
+# copy src code
+COPY --chown=${SC_USER_NAME}:${SC_USER_NAME} src/comp-medimproc src/comp-medimproc
+# ------------------------------------------------------------------------------------
+#TODO:
+# uncomment and adapt if build steps are necessary
+# RUN cp -R src/comp-medimproc/* /build/bin
+# ------------------------------------------------------------------------------------
+
+# --------------------------Production stage -------------------
+# Final cleanup up to reduce image size and startup setup
+# Runs as ${SC_USER_NAME} (non-root user)
+#
+# + /home/${SC_USER_NAME} $HOME = WORKDIR
+# + comp-medimproc [${SC_USER_NAME}:${SC_USER_NAME}]
+# + docker [${SC_USER_NAME}:${SC_USER_NAME}]
+# + service.cli [${SC_USER_NAME}:${SC_USER_NAME}]
+#
+FROM base as production
+
+ENV SC_BUILD_TARGET production
+ENV SC_BOOT_MODE production
+
+
+ENV INPUT_FOLDER="/input" \
+ OUTPUT_FOLDER="/output"
+
+ENV HOME="/home/${SC_USER_NAME}"
+WORKDIR ${HOME}
+
+
+############################################################
+## MRtrix(3)
+RUN apt-get -qq update \
+ && apt-get install -yq --no-install-recommends \
+ curl \
+ dc \
+ libeigen3-dev \
+ libfftw3-dev \
+ libgl1-mesa-dev \
+ libpng-dev \
+ libqt5opengl5-dev \
+ libqt5svg5-dev \
+ libtiff5-dev \
+ qt5-default \
+ zlib1g-dev \
+ && rm -rf /var/lib/apt/lists/*
+
+WORKDIR ${HOME}/mrtrix3
+RUN git clone -b "master" --depth 1 https://github.com/MRtrix3/mrtrix3.git . \
+ && ./configure "" && NUMBER_OF_PROCESSORS=4 ./build -persistent -nopaginate \
+ && rm -rf tmp
+
+WORKDIR ${HOME}/art
+RUN curl -fsSL https://osf.io/73h5s/download \
+ | tar xz --strip-components 1
+
+WORKDIR ${HOME}/ants
+RUN curl -fsSL https://osf.io/yswa4/download \
+ | tar xz --strip-components 1
+
+ENV ANTSPATH="$HOME/ants/bin" \
+ ARTHOME="$HOME/art" \
+ PATH="$HOME/mrtrix3/bin:$HOME/ants/bin:$HOME/art/bin:$PATH"
+
+############################################################
+## Freesurfer
+WORKDIR ${HOME}
+RUN apt-get update && apt-get install -y tcsh bc libgomp1 perl-modules \
+ && rm -rf /var/lib/apt/lists/*
+RUN wget -N -qO- ftp://surfer.nmr.mgh.harvard.edu/pub/dist/freesurfer/6.0.0/freesurfer-Linux-centos6_x86_64-stable-pub-v6.0.0.tar.gz | tar -xzv -C ${HOME}
+# \ && rm -rf ${HOME}/freesurfer/subjects # we actually need subjects/fsaverage for recon-all
+ENV FREESURFER_HOME ${HOME}/freesurfer
+COPY --chown=${SC_USER_NAME}:${SC_USER_NAME} docker/alpine/freesurfer_license.txt ${FREESURFER_HOME}/license.txt
+ENV FSFAST_HOME==$FREESURFER_HOME/fsfast \
+ MINC_BIN_DIR=$FREESURFER_HOME/mni/bin \
+ MNI_DIR=$FREESURFER_HOME/mni \
+ PERL5LIB=$FREESURFER_HOME/mni/share/perl5
+ENV PATH=$FREESURFER_HOME/bin:$MINC_BIN_DIR:$PATH
+
+############################################################
+## FSL
+WORKDIR ${HOME}
+ENV FSLDIR ${HOME}/fsl
+RUN wget https://fsl.fmrib.ox.ac.uk/fsldownloads/fslinstaller.py &&\
+ echo "" | python fslinstaller.py -d ${FSLDIR} &&\
+ . ${FSLDIR}/etc/fslconf/fsl.sh
+ENV FSLOUTPUTTYPE="NIFTI_GZ" \
+ FSLTCLSH="$FSLDIR/bin/fsltclsh" \
+ FSLWISH="$FSLDIR/bin/fslwish" \
+ LD_LIBRARY_PATH=$FSLDIR/fslpython/envs/fslpython/lib/ \
+ LD_LIBRARY_PATH="$FSLDIR/lib:$LD_LIBRARY_PATH" \
+ PATH=$FSLDIR/share/fsl/bin:$PATH
+
+#############################################################################
+## non-containerized Synb0-Disco
+# 1: clone the github repo
+WORKDIR ${HOME}
+## install PyTorch for Synb0-Disco
+RUN .venv/bin/pip --no-cache install torch torchvision torchaudio --index-url https://download.pytorch.org/whl/cpu &&\
+ ## clone synb0-disco
+ mkdir synb0-disco && git clone -b "master" --depth 1 https://github.com/MASILab/Synb0-DISCO ${HOME}/synb0-disco &&\
+ rm -rf ${HOME}/synb0-disco/v1_0 &&\
+ # save a bit of space; only 430MB and most of it is the Neural Net save files (75MB each * 5 folds)
+ ### create symbolic links for other .sh files used by synb0-disco
+ ln -s ${HOME}/synb0-disco/data_processing/normalize_T1.sh /usr/local/bin &&\
+ ## pre-create INPUTS / OUTPUTS directories in synb0-disco; set all permissions
+ mkdir synb0-disco/INPUTS &&\
+ chmod gua+rwx synb0-disco/INPUTS &&\
+ mkdir synb0-disco/OUTPUTS &&\
+ chmod gua+rwx synb0-disco/OUTPUTS
+
+# 2: overwrite pipeline.sh with the correct paths in our system
+ENV PIPELINE_PATH=${HOME}/synb0-disco/src
+COPY --chown=${SC_USER_NAME}:${SC_USER_NAME} docker/alpine/pipeline_synb0_disco.sh ${PIPELINE_PATH}/pipeline_no_docker.sh
+
+# 3: make "synb0-disco" a recognized command for the bash console
+### create a symbolic link and make it executable
+RUN mkdir -p /usr/local/bin && \
+ ln -s -f ${PIPELINE_PATH}/pipeline_no_docker.sh /usr/local/bin &&\
+ mv /usr/local/bin/pipeline_no_docker.sh /usr/local/bin/synb0-disco &&\
+ chmod +x /usr/local/bin/synb0-disco && \
+ ## make synb0-disco TORCH to execute in CPU (mo)
+ sed -i '83s/.*/ device = torch.device("cpu")/' $HOME/synb0-disco/src/inference.py &&\
+ sed -i '87s/.*/ model.load_state_dict(torch.load(model_path, map_location=torch.device("cpu")))/' $HOME/synb0-disco/src/inference.py
+
+## install ANTS & c3d for synb0-disco
+RUN curl -SL https://github.com/ANTsX/ANTs/releases/download/v2.4.4/ants-2.4.4-ubuntu-20.04-X64-gcc.zip -o ./ants-2-4-4.zip &&\
+ unzip ./ants-2-4-4.zip &&\
+ rm -rf ./ants-2-4-4.zip && \
+ curl -SL https://sourceforge.net/projects/c3d/files/c3d/1.0.0/c3d-1.0.0-Linux-x86_64.tar.gz/download | tar xz
+ENV PATH=$PATH:$HOME/ants-2.4.4/bin/
+ENV ANTSPATH=$HOME/ants-2.4.4/bin/
+ENV PATH=$PATH:$HOME/c3d-1.0.0-Linux-x86_64/bin/
+
+### Temporarily removed for GitHub building space issues
+# ############################################################
+# ## Spinal Cord Toolbox (command line)
+# # RUN apt update && apt-get install -y curl ## already installed for MRTrix3
+# WORKDIR ${HOME}
+# RUN curl --location https://github.com/neuropoly/spinalcordtoolbox/archive/4.2.1.tar.gz | gunzip | tar x &&\
+# cd spinalcordtoolbox-4.2.1 && (yes "y" 2>/dev/null || true) | ./install_sct && cd - && rm -rf spinalcordtoolbox-4.2.1
+
+############################################################
+## python packages in requirements.in
+## before pip install fsleyes, we need to install wxPython:
+WORKDIR ${HOME}
+RUN .venv/bin/pip --no-cache install -f https://extras.wxpython.org/wxPython4/extras/linux/gtk3/ubuntu-20.04 wxpython &&\
+ .venv/bin/pip install attrdict
+
+# COPY --chown=$NB_UID:$NB_GID requirements.in requirements.in
+RUN .venv/bin/pip --no-cache install pip-tools &&\
+ ## rename the previously existing "requirements.txt" from the jupyter-math service (we want to keep it for user reference)
+ mv requirements.txt requirements_base_math.txt &&\
+ .venv/bin/pip-compile --build-isolation --output-file requirements.txt requirements.in &&\
+ .venv/bin/pip --no-cache install -r requirements.txt && \
+ rm requirements.in && \
+ echo "Your environment contains these python packages:" && \
+ .venv/bin/pip list
+
+# copy docker bootup scripts
+COPY --chown=${SC_USER_NAME}:${SC_USER_NAME} docker/alpine/*.sh docker/
+# copy simcore service cli
+COPY --chown=${SC_USER_NAME}:${SC_USER_NAME} service.cli/ service.cli/
+# necessary to be able to call run directly without sh in front
+ENV PATH="/home/${SC_USER_NAME}/service.cli:${PATH}"
+
+# copy binaries from build
+COPY --from=build --chown=${SC_USER_NAME}:${SC_USER_NAME} /build/bin comp-medimproc
+
+# ------------------------------------------------------------------------------------
+#TODO:
+# uncomment and provide a healtchecker if possible
+# HEALTHCHECK --interval=30s \
+# --timeout=120s \
+# --start-period=30s \
+# --retries=3 \
+# CMD ["healthchecker app"]
+# ------------------------------------------------------------------------------------
+
+ENTRYPOINT [ "/bin/sh", "docker/entrypoint.sh", "/bin/sh", "-c" ]
+CMD ["run"]
diff --git a/docker/alpine/entrypoint.sh b/docker/alpine/entrypoint.sh
new file mode 100755
index 0000000..95fd5d8
--- /dev/null
+++ b/docker/alpine/entrypoint.sh
@@ -0,0 +1,73 @@
+#!/bin/sh
+set -o errexit
+set -o nounset
+
+IFS=$(printf '\n\t')
+# This entrypoint script:
+#
+# - Executes *inside* of the container upon start as --user [default root]
+# - Notice that the container *starts* as --user [default root] but
+# *runs* as non-root user [$SC_USER_NAME]
+#
+echo Entrypoint for stage "${SC_BUILD_TARGET}" ...
+echo User : "$(id "$(whoami)")"
+echo Workdir : "$(pwd)"
+
+
+# expect input/output folders to be mounted
+stat "${INPUT_FOLDER}" > /dev/null 2>&1 || \
+ (echo "ERROR: You must mount '${INPUT_FOLDER}' to deduce user and group ids" && exit 1)
+stat "${OUTPUT_FOLDER}" > /dev/null 2>&1 || \
+ (echo "ERROR: You must mount '${OUTPUT_FOLDER}' to deduce user and group ids" && exit 1)
+
+# NOTE: expects docker run ... -v /path/to/input/folder:${INPUT_FOLDER}
+# check input/output folders are owned by the same user
+if [ "$(stat -c %u "${INPUT_FOLDER}")" -ne "$(stat -c %u "${OUTPUT_FOLDER}")" ]
+then
+ echo "ERROR: '${INPUT_FOLDER}' and '${OUTPUT_FOLDER}' have different user id's. not allowed" && exit 1
+fi
+# check input/outputfolders are owned by the same group
+if [ "$(stat -c %g "${INPUT_FOLDER}")" -ne "$(stat -c %g "${OUTPUT_FOLDER}")" ]
+then
+ echo "ERROR: '${INPUT_FOLDER}' and '${OUTPUT_FOLDER}' have different group id's. not allowed" && exit 1
+fi
+
+echo "setting correct user id/group id..."
+HOST_USERID=$(stat -c %u "${INPUT_FOLDER}")
+HOST_GROUPID=$(stat -c %g "${INPUT_FOLDER}")
+CONT_GROUPNAME=$(getent group "${HOST_GROUPID}" | cut -d: -f1)
+if [ "$HOST_USERID" -eq 0 ]
+then
+ echo "Warning: Folder mounted owned by root user... adding $SC_USER_NAME to root..."
+ addgroup "$SC_USER_NAME" root
+else
+ echo "Folder mounted owned by user $HOST_USERID:$HOST_GROUPID-'$CONT_GROUPNAME'..."
+ # take host's credentials in $SC_USER_NAME
+ if [ -z "$CONT_GROUPNAME" ]
+ then
+ echo "Creating new group my$SC_USER_NAME"
+ CONT_GROUPNAME=my$SC_USER_NAME
+ addgroup -g "$HOST_GROUPID" "$CONT_GROUPNAME"
+ else
+ echo "group already exists"
+ fi
+
+ echo "changing $SC_USER_NAME $SC_USER_ID:$SC_USER_ID to $HOST_USERID:$HOST_GROUPID"
+ # in alpine there is no such thing as usermod... so we delete the user and re-create it as part of $CONT_GROUPNAME
+ deluser "$SC_USER_NAME" > /dev/null 2>&1
+ adduser -u "$HOST_USERID" -G "$CONT_GROUPNAME" -D -s /bin/sh "$SC_USER_NAME"
+
+ echo "Changing group properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
+ find / -group "$SC_USER_ID" -exec chgrp -h "$CONT_GROUPNAME" {} \;
+ # change user property of files already around
+ echo "Changing ownership properties of files around from $SC_USER_ID to group $CONT_GROUPNAME"
+ find / -user "$SC_USER_ID" -exec chown -h "$SC_USER_NAME" {} \;
+fi
+
+echo "Starting $* ..."
+echo " $SC_USER_NAME rights : $(id "$SC_USER_NAME")"
+echo " local dir : $(ls -al)"
+echo " input dir : $(ls -al "${INPUT_FOLDER}")"
+echo " output dir : $(ls -al "${OUTPUT_FOLDER}")"
+
+su-exec "$SC_USER_NAME" "$@"
diff --git a/docker/alpine/freesurfer_license.txt b/docker/alpine/freesurfer_license.txt
new file mode 100644
index 0000000..d9cb3db
--- /dev/null
+++ b/docker/alpine/freesurfer_license.txt
@@ -0,0 +1,4 @@
+ordonez@z43.swiss
+66124
+ *CfNSB2H6VkQA
+ FSJdrSCiz/GKc
diff --git a/docker/alpine/pipeline_synb0_disco.sh b/docker/alpine/pipeline_synb0_disco.sh
new file mode 100644
index 0000000..7e99f0a
--- /dev/null
+++ b/docker/alpine/pipeline_synb0_disco.sh
@@ -0,0 +1,69 @@
+#!/bin/bash
+
+TOPUP=1
+MNI_T1_1_MM_FILE=${HOME}/synb0-disco/atlases/mni_icbm152_t1_tal_nlin_asym_09c.nii.gz
+
+for arg in "$@"
+do
+ case $arg in
+ -i|--notopup)
+ TOPUP=0
+ ;;
+ -s|--stripped)
+ MNI_T1_1_MM_FILE=${HOME}/synb0-disco/atlases/mni_icbm152_t1_tal_nlin_asym_09c_mask.nii.gz
+ ;;
+ esac
+done
+
+# # Set path for executable
+# export PATH=$PATH:/extra
+
+# # Set up freesurfer
+# export FREESURFER_HOME=/extra/freesurfer
+# source $FREESURFER_HOME/SetUpFreeSurfer.sh
+
+# Set up FSL
+# . /extra/fsl/etc/fslconf/fsl.sh
+# export PATH=$PATH:/extra/fsl/bin
+# export FSLDIR=/extra/fsl
+
+# # Set up ANTS
+# export ANTSPATH=/extra/ANTS/bin/ants/bin/
+# export PATH=$PATH:$ANTSPATH:/extra/ANTS/ANTs/Scripts
+
+# # Set up pytorch
+# source /extra/pytorch/bin/activate
+
+# Prepare input
+${HOME}/synb0-disco/data_processing/prepare_input.sh ${HOME}/synb0-disco/INPUTS/b0.nii.gz ${HOME}/synb0-disco/INPUTS/T1.nii.gz $MNI_T1_1_MM_FILE ${HOME}/synb0-disco/atlases/mni_icbm152_t1_tal_nlin_asym_09c_2_5.nii.gz ${HOME}/synb0-disco/OUTPUTS
+
+# Run inference
+NUM_FOLDS=5
+for i in $(seq 1 $NUM_FOLDS);
+ do echo Performing inference on FOLD: "$i"
+ $HOME/.venv/bin/python ${HOME}/synb0-disco/src/inference.py ${HOME}/synb0-disco/OUTPUTS/T1_norm_lin_atlas_2_5.nii.gz ${HOME}/synb0-disco/OUTPUTS/b0_d_lin_atlas_2_5.nii.gz ${HOME}/synb0-disco/OUTPUTS/b0_u_lin_atlas_2_5_FOLD_"$i".nii.gz ${HOME}/synb0-disco/src/train_lin/num_fold_"$i"_total_folds_"$NUM_FOLDS"_seed_1_num_epochs_100_lr_0.0001_betas_\(0.9\,\ 0.999\)_weight_decay_1e-05_num_epoch_*.pth
+done
+
+# Take mean
+echo Taking ensemble average
+fslmerge -t ${HOME}/synb0-disco/OUTPUTS/b0_u_lin_atlas_2_5_merged.nii.gz ${HOME}/synb0-disco/OUTPUTS/b0_u_lin_atlas_2_5_FOLD_*.nii.gz
+fslmaths ${HOME}/synb0-disco/OUTPUTS/b0_u_lin_atlas_2_5_merged.nii.gz -Tmean ${HOME}/synb0-disco/OUTPUTS/b0_u_lin_atlas_2_5.nii.gz
+
+# Apply inverse xform to undistorted b0
+echo Applying inverse xform to undistorted b0
+antsApplyTransforms -d 3 -i ${HOME}/synb0-disco/OUTPUTS/b0_u_lin_atlas_2_5.nii.gz -r ${HOME}/synb0-disco/INPUTS/b0.nii.gz -n BSpline -t [${HOME}/synb0-disco/OUTPUTS/epi_reg_d_ANTS.txt,1] -t [${HOME}/synb0-disco/OUTPUTS/ANTS0GenericAffine.mat,1] -o ${HOME}/synb0-disco/OUTPUTS/b0_u.nii.gz
+
+# Smooth image
+echo Applying slight smoothing to distorted b0
+fslmaths ${HOME}/synb0-disco/INPUTS/b0.nii.gz -s 1.15 ${HOME}/synb0-disco/OUTPUTS/b0_d_smooth.nii.gz
+
+if [[ $TOPUP -eq 1 ]]; then
+ # Merge results and run through topup
+ echo Running topup
+ fslmerge -t ${HOME}/synb0-disco/OUTPUTS/b0_all.nii.gz ${HOME}/synb0-disco/OUTPUTS/b0_d_smooth.nii.gz ${HOME}/synb0-disco/OUTPUTS/b0_u.nii.gz
+ topup -v --imain=${HOME}/synb0-disco/OUTPUTS/b0_all.nii.gz --datain=${HOME}/synb0-disco/INPUTS/acqparams.txt --config=b02b0.cnf --iout=${HOME}/synb0-disco/OUTPUTS/b0_all_topup.nii.gz --out=${HOME}/synb0-disco/OUTPUTS/topup --subsamp=1,1,1,1,1,1,1,1,1 --miter=10,10,10,10,10,20,20,30,30 --lambda=0.00033,0.000067,0.0000067,0.000001,0.00000033,0.000000033,0.0000000033,0.000000000033,0.00000000000067 --scale=0
+fi
+
+
+# Done
+echo FINISHED!!!
diff --git a/docker/alpine/requirements.in b/docker/alpine/requirements.in
new file mode 100755
index 0000000..b62f4d1
--- /dev/null
+++ b/docker/alpine/requirements.in
@@ -0,0 +1,7 @@
+nibabel
+pyvista
+
+PyOpenGL
+PyOpenGL_accelerate
+fsleyes
+connected-components-3d
\ No newline at end of file
diff --git a/service.cli/execute.sh b/service.cli/execute.sh
new file mode 100755
index 0000000..3d99bb6
--- /dev/null
+++ b/service.cli/execute.sh
@@ -0,0 +1,36 @@
+#!/bin/sh
+# set sh strict mode
+set -o errexit
+set -o nounset
+IFS=$(printf '\n\t')
+
+cd /home/scu/comp-medimproc
+
+echo "starting service as"
+echo User : "$(id "$(whoami)")"
+echo Workdir : "$(pwd)"
+echo "..."
+echo
+# ----------------------------------------------------------------
+# This script shall be modified according to the needs in order to run the service
+# The inputs defined in ${INPUT_FOLDER}/inputs.json are available as env variables by their key in capital letters
+# For example: input_1 -> $INPUT_1
+
+# put the code to execute the service here
+# For example:
+env
+ls -al "${INPUT_FOLDER}"
+
+# then retrieve the output and move it to the $OUTPUT_FOLDER
+# as defined in the output labels
+# For example: cp output.csv $OUTPUT_FOLDER or to $OUTPUT_FOLDER/outputs.json using jq
+#TODO: Replace following
+cat > "${OUTPUT_FOLDER}"/outputs.json << EOF
+{
+ "output_1":"some_stuff",
+ "output_2":"some_stuff",
+ "output_3":"some_stuff",
+ "output_4":"some_stuff"
+}
+EOF
+
diff --git a/src/comp-medimproc/.gitkeep b/src/comp-medimproc/.gitkeep
new file mode 100644
index 0000000..788d55e
--- /dev/null
+++ b/src/comp-medimproc/.gitkeep
@@ -0,0 +1 @@
+add source code here
diff --git a/validation/input/inputs.json b/validation/input/inputs.json
new file mode 100644
index 0000000..6304774
--- /dev/null
+++ b/validation/input/inputs.json
@@ -0,0 +1,6 @@
+{
+ "input_1": "some_stuff",
+ "input_2": "some_stuff",
+ "input_3": "some_stuff",
+ "input_4": "some_stuff"
+}
diff --git a/validation/output/outputs.json b/validation/output/outputs.json
new file mode 100644
index 0000000..debe324
--- /dev/null
+++ b/validation/output/outputs.json
@@ -0,0 +1,6 @@
+{
+ "output_1": "some_output_stuff",
+ "output_2": "some_output_stuff",
+ "output_3": "some_output_stuff",
+ "output_4": "some_output_stuff"
+}