Skip to content

Commit

Permalink
Merge branch 'main' into 2024/traefikFixWWWRedirect
Browse files Browse the repository at this point in the history
  • Loading branch information
mrnicegyu11 authored Dec 5, 2024
2 parents 9aca16a + c37a5a6 commit d2e24ca
Show file tree
Hide file tree
Showing 23 changed files with 111 additions and 251 deletions.
2 changes: 1 addition & 1 deletion .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -142,7 +142,7 @@ yq
**/.env-devel
**/.stack.*.yml
**/.stack.*.yaml
./docker-compose.yml
docker-compose.yml
stack.yml
stack_with_prefix.yml
docker-compose.simcore.yml
Expand Down
2 changes: 1 addition & 1 deletion .pylintrc
Original file line number Diff line number Diff line change
Expand Up @@ -58,7 +58,7 @@ ignore-paths=^.*\\generated_models\\.*$|^.*/generated_models/.*$
# Files or directories matching the regex patterns are skipped. The regex
# matches against base names, not paths. The default value ignores Emacs file
# locks
ignore-patterns=venv,.venv
ignore-patterns=venv,.venv,jupyter_server_config.py

# List of module names for which member attributes should not be checked
# (useful for modules/projects where namespaces are manipulated during runtime
Expand Down
11 changes: 1 addition & 10 deletions Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -26,7 +26,7 @@ certificates/domain.key:
# Done: Creating docker secrets

.PHONY: up-local
up-local: .install-fqdn certificates/domain.crt certificates/domain.key .create-secrets ## deploy osparc ops stacks and simcore, use minio_disabled=1 if minio s3 should not be started (if you have custom S3 set up)
up-local: .init .venv .install-fqdn certificates/domain.crt certificates/domain.key .create-secrets ## deploy osparc ops stacks and simcore, use minio_disabled=1 if minio s3 should not be started (if you have custom S3 set up)
@bash scripts/deployments/deploy_everything_locally.bash --stack_target=local --minio_enabled=0 --vcs_check=1
@$(MAKE) info-local

Expand Down Expand Up @@ -71,15 +71,6 @@ down-maintenance: ## Stop the maintenance mode
fi \
,)


.PHONY: venv
venv: .venv ## Creates a python virtual environment with dev tools (pip, pylint, ...)
.venv:
@python3 -m venv .venv
@.venv/bin/pip3 install --upgrade pip wheel setuptools
@.venv/bin/pip3 install typer
@echo "To activate the venv, execute 'source .venv/bin/activate'"

# Misc: info & clean
.PHONY: info info-vars info-local
info: ## Displays some important info
Expand Down
31 changes: 7 additions & 24 deletions scripts/common.Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -7,14 +7,12 @@ VERSION := $(shell uname -a)

# Checks for handling various operating systems
ifeq ($(filter Windows_NT,$(OS)),)
IS_WSL := $(if $(findstring microsoft,$(shell uname -a | tr '[:upper:]' '[:lower:]')),WSL,)
IS_WSL2 := $(if $(findstring -microsoft-,$(shell uname -a)),WSL2,)
IS_OSX := $(filter Darwin,$(shell uname -a))
IS_LINUX:= $(if $(or $(IS_WSL),$(IS_OSX)),,$(filter Linux,$(shell uname -a)))
endif
IS_WIN := $(strip $(if $(or $(IS_LINUX),$(IS_OSX),$(IS_WSL)),,$(OS)))

$(if $(IS_WIN),$(error Windows is not supported in all recipes. Use WSL2 instead. Follow instructions in README.md),)
$(if $(IS_WSL2),,$(if $(IS_WSL),$(error WSL1 is not supported in all recipes. Use WSL2 instead. Follow instructions in README.md),))

# Check that a valid location to a config file is set.
Expand All @@ -37,13 +35,13 @@ endif
export DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL:=$(shell set -o allexport; \
source $(REPO_CONFIG_LOCATION); \
if [ -z "$${DEPLOYMENT_FQDNS}" ]; then \
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="(Host(\`$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (Host(\`invitations.$$MACHINE_FQDN\`))|| (Host(\`storage.$$MACHINE_FQDN\`)) || (HostRegexp(\`services.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (HostRegexp(\`services.testing.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.testing.$$MACHINE_FQDN\`) && PathPrefix(\`/\`))"; \
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="(Host(\`$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (Host(\`invitations.$$MACHINE_FQDN\`))|| (HostRegexp(\`services.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (HostRegexp(\`services.testing.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.testing.$$MACHINE_FQDN\`) && PathPrefix(\`/\`))"; \
else \
IFS=', ' read -r -a hosts <<< "$${DEPLOYMENT_FQDNS}"; \
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="(Host(\`$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (Host(\`invitations.$$MACHINE_FQDN\`))|| (Host(\`storage.$$MACHINE_FQDN\`)) || (HostRegexp(\`services.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (HostRegexp(\`services.testing.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.testing.$$MACHINE_FQDN\`) && PathPrefix(\`/\`))"; \
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="(Host(\`$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (Host(\`invitations.$$MACHINE_FQDN\`))|| (HostRegexp(\`services.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.$$MACHINE_FQDN\`) && PathPrefix(\`/\`)) || (HostRegexp(\`services.testing.$$MACHINE_FQDN\`,\`{subhost:[a-zA-Z0-9-]+}.services.testing.$$MACHINE_FQDN\`) && PathPrefix(\`/\`))"; \
for element in "$${hosts[@]}"; \
do \
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="$$DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL || (Host(\`$$element\`) && PathPrefix(\`/\`)) || (Host(\`invitations.$$element\`)) || (Host(\`storage.$$element\`)) || (HostRegexp(\`services.$$element\`,\`{subhost:[a-zA-Z0-9-]+}.services.$$element\`) && PathPrefix(\`/\`)) || (HostRegexp(\`services.testing.$$element\`,\`{subhost:[a-zA-Z0-9-]+}.services.testing.$$element\`) && PathPrefix(\`/\`))";\
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="$$DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL || (Host(\`$$element\`) && PathPrefix(\`/\`)) || (Host(\`invitations.$$element\`)) || (HostRegexp(\`services.$$element\`,\`{subhost:[a-zA-Z0-9-]+}.services.$$element\`) && PathPrefix(\`/\`)) || (HostRegexp(\`services.testing.$$element\`,\`{subhost:[a-zA-Z0-9-]+}.services.testing.$$element\`) && PathPrefix(\`/\`))";\
done; \
DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL="$$DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_CATCHALL"; \
fi; \
Expand All @@ -66,21 +64,6 @@ export DEPLOYMENT_FQDNS_CAPTURE_INVITATIONS:=$(shell set -o allexport; \
echo $$DEPLOYMENT_FQDNS_CAPTURE_INVITATIONS; \
set +o allexport; )
export DEPLOYMENT_FQDNS_CAPTURE_STORAGE:=$(shell set -o allexport; \
source $(REPO_CONFIG_LOCATION); \
if [ -z "$${DEPLOYMENT_FQDNS}" ]; then \
DEPLOYMENT_FQDNS_CAPTURE_STORAGE="(Host(\`storage.$$MACHINE_FQDN\`))"; \
else \
IFS=', ' read -r -a hosts <<< "$${DEPLOYMENT_FQDNS}"; \
DEPLOYMENT_FQDNS_CAPTURE_STORAGE="(Host(\`storage.$$MACHINE_FQDN\`))"; \
for element in "$${hosts[@]}"; \
do \
DEPLOYMENT_FQDNS_CAPTURE_STORAGE="$$DEPLOYMENT_FQDNS_CAPTURE_STORAGE || (Host(\`storage.$$element\`))";\
done; \
DEPLOYMENT_FQDNS_CAPTURE_STORAGE="$$DEPLOYMENT_FQDNS_CAPTURE_STORAGE"; \
fi; \
echo $$DEPLOYMENT_FQDNS_CAPTURE_STORAGE; \
set +o allexport; )
export DEPLOYMENT_FQDNS_CAPTURE_TRAEFIK_RULE_MAINTENANCE_PAGE:=$(shell set -o allexport; \
source $(REPO_CONFIG_LOCATION); \
Expand Down Expand Up @@ -207,7 +190,6 @@ clean-default: .check_clean ## Cleans all outputs
export DEPLOYMENT_FQDNS_TESTING_CAPTURE_TRAEFIK_RULE='${DEPLOYMENT_FQDNS_TESTING_CAPTURE_TRAEFIK_RULE}'; \
export DEPLOYMENT_API_DOMAIN_TESTING_CAPTURE_TRAEFIK_RULE='${DEPLOYMENT_API_DOMAIN_TESTING_CAPTURE_TRAEFIK_RULE}'; \
export DEPLOYMENT_FQDNS_CAPTURE_INVITATIONS='${DEPLOYMENT_FQDNS_CAPTURE_INVITATIONS}'; \
export DEPLOYMENT_FQDNS_CAPTURE_STORAGE='${DEPLOYMENT_FQDNS_CAPTURE_STORAGE}'; \
export DOLLAR='$$'; \
set +o allexport; \
envsubst < $< > .env
Expand Down Expand Up @@ -243,20 +225,21 @@ clean-default: .check_clean ## Cleans all outputs
# creating virtual environment with tooling (jinja, etc)
@python3 -m venv .venv
@.venv/bin/pip3 install --upgrade pip wheel setuptools
@.venv/bin/pip3 install jinja2 j2cli[yaml]
@.venv/bin/pip3 install jinja2 j2cli[yaml] typer
@echo "To activate the venv, execute 'source .venv/bin/activate'"
# https://github.com/kolypto/j2cli?tab=readme-ov-file#customization
ifeq ($(shell test -f j2cli_customization.py && echo -n yes),yes)
define jinja
.venv/bin/j2 --format=env $(1) .env -o $(2) --customize j2cli_customization.py
$(REPO_BASE_DIR)/.venv/bin/j2 --format=env $(1) .env -o $(2) --customize j2cli_customization.py
endef
else
define jinja
.venv/bin/j2 --format=env $(1) .env -o $(2)
$(REPO_BASE_DIR)/.venv/bin/j2 --format=env $(1) .env -o $(2)
endef
endif
Expand Down
3 changes: 3 additions & 0 deletions scripts/deployments/deploy_everything_locally.bash
Original file line number Diff line number Diff line change
Expand Up @@ -235,6 +235,9 @@ if [ "$start_opsstack" -eq 0 ]; then
# -------------------------------- ADMIN-PANELS -------------------------------

log_info "starting admin-panels..."
# Check if the stack 'admin-panels' exists and delete it if it does
# shellcheck disable=2015
docker stack ls | grep -q admin-panels && docker stack rm admin-panels >/dev/null 2>&1 || true
# Pushd because a call with call_make trigger a strange behavior
pushd "${repo_basedir}"/services/admin-panels;
call_make "." up-"$stack_target";
Expand Down
10 changes: 8 additions & 2 deletions scripts/deployments/start_simcore_locally.bash
Original file line number Diff line number Diff line change
Expand Up @@ -71,6 +71,7 @@ if [[ "$devel_repo_path" = "0" ]] ; then
#
# IF GETREPO DOESNT EXIST
if [ ! -d osparc-simcore ]; then
export GIT_SIMCORE_REPO_URL="https://github.com/ITISFoundation/osparc-simcore.git"
git clone "$GIT_SIMCORE_REPO_URL"
fi
# FI
Expand Down Expand Up @@ -130,8 +131,12 @@ scripts/deployments/compose_stack_yml.bash
log_info "Adding prefix $PREFIX_STACK_NAME to all services..."
./yq "with(.services; with_entries(.key |= \"${PREFIX_STACK_NAME}_\" + .))" stack.yml > stack_with_prefix.yml
log_info "Deleting the $SIMCORE_STACK_NAME docker stack if present"
docker stack rm "$SIMCORE_STACK_NAME" || true
sleep 3 # Wait for stack to be deleted, the networks often take a while, not waiting might lead to docker network creation issues
# Wait for stack to be deleted, the networks often take a while, not waiting might lead to docker network creation issues
# shellcheck disable=2015
docker stack rm "$SIMCORE_STACK_NAME" && sleep 3 || true
log_info "Copying dask-certificates into place"
mkdir -p "$repo_basedir"/services/simcore/dask-sidecar/.dask-certificates
cp -r "$(dirname "${repo_config}")"/assets/dask-certificates/*.pem "$repo_basedir"/services/simcore/dask-sidecar/.dask-certificates
log_info "Deploying: Running docker stack deploy for stack $SIMCORE_STACK_NAME..."

# Retry logic via https://unix.stackexchange.com/a/82610
Expand All @@ -141,4 +146,5 @@ for i in {1..5}; do docker stack deploy -c stack_with_prefix.yml "$SIMCORE_STACK

############
# CLEANUP
# shellcheck disable=1073
rm -r "${repo_basedir:?}"/"${tempdirname:?}" 2>/dev/null || true
15 changes: 0 additions & 15 deletions scripts/s3-previous-versions/README.md

This file was deleted.

17 changes: 0 additions & 17 deletions scripts/s3-previous-versions/launch.bash

This file was deleted.

3 changes: 0 additions & 3 deletions scripts/s3-previous-versions/template.env

This file was deleted.

8 changes: 6 additions & 2 deletions services/graylog/scripts/configure.py
Original file line number Diff line number Diff line change
Expand Up @@ -48,7 +48,9 @@
before=before_log(logger, logging.INFO),
)
def wait_graylog_is_online():
_r = requests.get(GRAYLOG_BASE_DOMAIN + "/api/system", auth=REQUESTS_AUTH)
_r = requests.get(
GRAYLOG_BASE_DOMAIN + "/api/system", auth=REQUESTS_AUTH, verify=False
)

if _r.status_code == 401:
raise TypeError(f"Graylog unauthorized HTTP response: {_r}")
Expand All @@ -58,7 +60,9 @@ def wait_graylog_is_online():


def validate_graylog_version_is_supported():
_r = requests.get(GRAYLOG_BASE_DOMAIN + "/api/system", auth=REQUESTS_AUTH)
_r = requests.get(
GRAYLOG_BASE_DOMAIN + "/api/system", auth=REQUESTS_AUTH, verify=False
)
_r.raise_for_status()

graylog_version = _r.json()["version"]
Expand Down
44 changes: 6 additions & 38 deletions services/minio/Makefile
Original file line number Diff line number Diff line change
@@ -1,9 +1,7 @@
.DEFAULT_GOAL := help



# Internal VARIABLES ------------------------------------------------
# STACK_NAME defaults to name of the current directory. Should not to be changed if you follow GitOps operating procedures.
# STACK_NAME defaults to name of the current directory.
STACK_NAME = $(notdir $(shell pwd))
DOCKER_MINIO_ACCESS_KEY = $(shell docker secret inspect --format {{.Spec.Name}} minio_secret_key 2>/dev/null)
DOCKER_MINIO_SECRET_KEY = $(shell docker secret inspect --format {{.Spec.Name}} minio_access_key 2>/dev/null)
Expand All @@ -14,52 +12,22 @@ REPO_BASE_DIR := $(shell git rev-parse --show-toplevel)
include ${REPO_BASE_DIR}/scripts/common.Makefile

.PHONY: up
up: .init .env ${TEMP_COMPOSE} .create-secrets ## Deploys or updates current stack "$(STACK_NAME)" using replicas=X (defaults to 1)
up: .init .env ${TEMP_COMPOSE} .create-secrets
@docker stack deploy --with-registry-auth --prune --compose-file ${TEMP_COMPOSE} $(STACK_NAME)
# "in case you created more than 1 replicas, you need to label the nodes accordingly using"
# "'docker node update --label-add minioX=true' with X being from 1 to number of replicas."

.PHONY: up-letsencrypt-http
up-letsencrypt-http: .init .env ${TEMP_COMPOSE}-letsencrypt-http .create-secrets ## Deploys minio stack using let's encrypt http challenge
@docker stack deploy --with-registry-auth --prune --compose-file ${TEMP_COMPOSE}-letsencrypt-http ${STACK_NAME}
# "in case you created more than 1 replicas, you need to label the nodes accordingly using"
# "'docker node update --label-add minioX=true' with X being from 1 to number of replicas."

.PHONY: up-letsencrypt-dns
up-letsencrypt-dns: .init .env ${TEMP_COMPOSE}-letsencrypt-dns .create-secrets ## Deploys minio stack using let's encrypt dns challenge
@docker stack deploy --with-registry-auth --prune --compose-file ${TEMP_COMPOSE}-letsencrypt-dns ${STACK_NAME}
# "in case you created more than 1 replicas, you need to label the nodes accordingly using"
# "'docker node update --label-add minioX=true' with X being from 1 to number of replicas."

.PHONY: up-dalco
up-dalco: up ## Deploys minio stack for Dalco Cluster
up-dalco: up

.PHONY: up-master
up-master: up ## Deploys minio stack for Master Cluster
up-master: up

.PHONY: up-local
up-local: up

.PHONY: ${TEMP_COMPOSE}
${TEMP_COMPOSE}: docker-compose.yaml.j2 .venv .env
$(call jinja, $<, tmp.yaml)
@${REPO_BASE_DIR}/scripts/docker-stack-config.bash -e .env tmp.yaml > $@
@rm tmp.yaml

.PHONY: ${TEMP_COMPOSE}-letsencrypt-http
${TEMP_COMPOSE}-letsencrypt-http: docker-compose.yaml.j2 docker-compose.letsencrypt.http.yaml.j2 .venv .env
$(call jinja, $<, tmp.yaml)
$(call jinja, docker-compose.letsencrypt.http.yaml.j2, tmp-letsencrypt.http.yaml)
@${REPO_BASE_DIR}/scripts/docker-stack-config.bash -e .env tmp.yaml tmp-letsencrypt.http.yaml > $@
@rm tmp.yaml tmp-letsencrypt.http.yaml

${TEMP_COMPOSE}: docker-compose.yaml .venv .env
@${REPO_BASE_DIR}/scripts/docker-stack-config.bash -e .env docker-compose.yaml > $@

.PHONY: ${TEMP_COMPOSE}-letsencrypt-dns
${TEMP_COMPOSE}-letsencrypt-dns: docker-compose.yaml.j2 docker-compose.letsencrypt.dns.yaml.j2 .venv .env
$(call jinja, $<, tmp.yaml)
$(call jinja, docker-compose.letsencrypt.dns.yaml.j2, tmp-letsencrypt.dns.yaml)
@${REPO_BASE_DIR}/scripts/docker-stack-config.bash -e .env tmp.yaml tmp-letsencrypt.dns.yaml > $@
@rm tmp.yaml tmp-letsencrypt.dns.yaml

.create-secrets:
@$(if $(DOCKER_MINIO_ACCESS_KEY), \
Expand Down
27 changes: 0 additions & 27 deletions services/minio/README.md

This file was deleted.

11 changes: 0 additions & 11 deletions services/minio/docker-compose.letsencrypt.dns.yaml.j2

This file was deleted.

11 changes: 0 additions & 11 deletions services/minio/docker-compose.letsencrypt.http.yaml.j2

This file was deleted.

Loading

0 comments on commit d2e24ca

Please sign in to comment.