From 2dc63bf69e4f130aedc9473fce9be55c58d89f5e Mon Sep 17 00:00:00 2001 From: gardlt Date: Thu, 9 Feb 2017 14:01:31 -0600 Subject: [PATCH] adding-swift-chart * created chart for swift support * keystone swift user db endpoint creation * kolla-images * updated-values file * create swift config templates * added README * updated helm with min updates Closes-Feature: #177 --- Makefile | 6 +- helm-toolkit/templates/_endpoints.tpl | 43 +- .../templates/scripts/_job-create-db.py.tpl | 117 +++ .../snippets/_db_secret_root.yaml.tpl | 13 + swift/.helmignore | 21 + swift/Chart.yaml | 18 + swift/README.md | 20 + swift/requirements.yaml | 18 + swift/templates/configmap-bin.yaml | 27 + swift/templates/configmap-etc.yaml | 29 + swift/templates/deployment-account.yaml | 77 ++ swift/templates/deployment-container.yaml | 77 ++ swift/templates/deployment-object.yaml | 77 ++ swift/templates/deployment-proxy.yaml | 79 ++ swift/templates/etc/_account-server.conf.tpl | 261 ++++++ .../templates/etc/_container-server.conf.tpl | 303 +++++++ swift/templates/etc/_object-server.conf.tpl | 415 +++++++++ swift/templates/etc/_proxy-server.conf.tpl | 845 ++++++++++++++++++ swift/templates/etc/_swift.conf.tpl | 185 ++++ swift/templates/job-db-init.yaml | 79 ++ swift/templates/job-db-sync.yaml | 60 ++ swift/templates/job-ks-entrypoint.yaml | 79 ++ swift/templates/job-ks-service.yaml | 68 ++ swift/templates/job-ks-user.yaml | 69 ++ swift/templates/secret-db-root.env.yaml | 2 + .../templates/secret-keystone-admin.env.yaml | 34 + swift/templates/secret-keystone-user.env.yaml | 34 + swift/templates/service-api.yaml | 24 + swift/templates/service.yaml | 22 + swift/values.yaml | 280 ++++++ 30 files changed, 3379 insertions(+), 3 deletions(-) create mode 100644 helm-toolkit/templates/scripts/_job-create-db.py.tpl create mode 100644 helm-toolkit/templates/snippets/_db_secret_root.yaml.tpl create mode 100644 swift/.helmignore create mode 100644 swift/Chart.yaml create mode 100644 swift/README.md create mode 100644 swift/requirements.yaml create mode 100644 swift/templates/configmap-bin.yaml create mode 100644 swift/templates/configmap-etc.yaml create mode 100644 swift/templates/deployment-account.yaml create mode 100644 swift/templates/deployment-container.yaml create mode 100644 swift/templates/deployment-object.yaml create mode 100644 swift/templates/deployment-proxy.yaml create mode 100644 swift/templates/etc/_account-server.conf.tpl create mode 100644 swift/templates/etc/_container-server.conf.tpl create mode 100644 swift/templates/etc/_object-server.conf.tpl create mode 100644 swift/templates/etc/_proxy-server.conf.tpl create mode 100644 swift/templates/etc/_swift.conf.tpl create mode 100644 swift/templates/job-db-init.yaml create mode 100644 swift/templates/job-db-sync.yaml create mode 100644 swift/templates/job-ks-entrypoint.yaml create mode 100644 swift/templates/job-ks-service.yaml create mode 100644 swift/templates/job-ks-user.yaml create mode 100644 swift/templates/secret-db-root.env.yaml create mode 100644 swift/templates/secret-keystone-admin.env.yaml create mode 100644 swift/templates/secret-keystone-user.env.yaml create mode 100644 swift/templates/service-api.yaml create mode 100644 swift/templates/service.yaml create mode 100644 swift/values.yaml diff --git a/Makefile b/Makefile index 1dfa20f5..ef818340 100644 --- a/Makefile +++ b/Makefile @@ -17,10 +17,10 @@ B64_DIRS := helm-toolkit/secrets B64_EXCLUDE := $(wildcard helm-toolkit/secrets/*.b64) -CHARTS := ceph mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack +CHARTS := ceph mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat swift maas openstack TOOLKIT_TPL := helm-toolkit/templates/_globals.tpl -all: helm-toolkit ceph bootstrap mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat maas openstack +all: helm-toolkit ceph bootstrap mariadb etcd postgresql rabbitmq memcached keystone glance horizon neutron nova cinder heat swift maas openstack helm-toolkit: build-helm-toolkit @@ -51,6 +51,8 @@ nova: build-nova heat: build-heat +swift: build-swift + maas: build-maas memcached: build-memcached diff --git a/helm-toolkit/templates/_endpoints.tpl b/helm-toolkit/templates/_endpoints.tpl index 7c2134bd..dcc4f2bd 100644 --- a/helm-toolkit/templates/_endpoints.tpl +++ b/helm-toolkit/templates/_endpoints.tpl @@ -126,6 +126,29 @@ {{- end -}} {{- end -}} +# this function returns the endpoint uri for a service, it takes an tuple +# input in the form: service-type, endpoint-class, port-name. eg: +# { tuple "orchestration" "public" "api" . | include "helm-toolkit.endpoint_type_lookup_addr" } +# will return the appropriate URI. Once merged this should phase out the above. + +{{- define "helm-toolkit.endpoint_type_lookup_addr" -}} +{{- $type := index . 0 -}} +{{- $endpoint := index . 1 -}} +{{- $port := index . 2 -}} +{{- $context := index . 3 -}} +{{- $endpointMap := index $context.Values.endpoints $type }} +{{- $fqdn := $context.Release.Namespace -}} +{{- if $context.Values.endpoints.fqdn -}} +{{- $fqdn := $context.Values.endpoints.fqdn -}} +{{- end -}} +{{- with $endpointMap -}} +{{- $endpointScheme := .scheme }} +{{- $endpointHost := index .hosts $endpoint | default .hosts.default}} +{{- $endpointPort := index .port $port }} +{{- $endpointPath := .path | default "" }} +{{- printf "%s://%s.%s:%1.f%s" $endpointScheme $endpointHost $fqdn $endpointPort $endpointPath -}} +{{- end -}} +{{- end -}} #------------------------------- # endpoint type lookup @@ -145,7 +168,25 @@ {{- $endpointType | quote -}} {{- end -}} + +#------------------------------- +# endpoint name lookup +#------------------------------- + +# this function is used in endpoint management templates +# it returns the service type for an openstack service eg: +# { tuple orchestration . | include "ks_endpoint_type" } +# will return "heat" + +{{- define "helm-toolkit.endpoint_name_lookup" -}} +{{- $type := index . 0 -}} +{{- $context := index . 1 -}} +{{- $endpointMap := index $context.Values.endpoints $type }} +{{- $endpointName := index $endpointMap "name" }} +{{- $endpointName | quote -}} +{{- end -}} + #------------------------------- # kolla helpers #------------------------------- -{{ define "helm-toolkit.keystone_auth" }}{'auth_url':'{{ include "helm-toolkit.endpoint_keystone_internal" . }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}} +{{ define "helm-toolkit.keystone_auth" }}{'auth_url':'{{ tuple "identity" "internal" "api" . | include "helm-toolkit.endpoint_type_lookup_addr" }}', 'username':'{{ .Values.keystone.admin_user }}','password':'{{ .Values.keystone.admin_password }}','project_name':'{{ .Values.keystone.admin_project_name }}','domain_name':'default'}{{end}} diff --git a/helm-toolkit/templates/scripts/_job-create-db.py.tpl b/helm-toolkit/templates/scripts/_job-create-db.py.tpl new file mode 100644 index 00000000..31f3d667 --- /dev/null +++ b/helm-toolkit/templates/scripts/_job-create-db.py.tpl @@ -0,0 +1,117 @@ +{{- define "helm-toolkit.job_create_db" }} +#!/usr/bin/env python + +# Copyright 2017 Pete Birley +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Creates db and user for an OpenStack Service: +# Set ROOT_DB_CONNECTION and DB_CONNECTION environment variables to contain +# SQLAlchemy strings for the root connection to the database and the one you +# wish the service to use. Alternatively, you can use an ini formatted config +# at the location specified by OPENSTACK_CONFIG_FILE, and extract the string +# from the key OPENSTACK_CONFIG_DB_KEY, in the section specified by +# OPENSTACK_CONFIG_DB_SECTION. + +import os +import sys +import ConfigParser +from sqlalchemy import create_engine + +# Get the connection string for the service db root user +if "ROOT_DB_CONNECTION" in os.environ: + db_connection = os.environ['ROOT_DB_CONNECTION'] +else: + print 'ROOT_DB_CONNECTION env var missing' + sys.exit(1) + +# Get the connection string for the service db +if "OPENSTACK_CONFIG_FILE" in os.environ: + try: + os_conf = os.environ['OPENSTACK_CONFIG_FILE'] + if "OPENSTACK_CONFIG_DB_SECTION" in os.environ: + os_conf_section = os.environ['OPENSTACK_CONFIG_DB_SECTION'] + else: + print 'Env var OPENSTACK_CONFIG_DB_SECTION not set' + sys.exit(1) + if "OPENSTACK_CONFIG_DB_KEY" in os.environ: + os_conf_key = os.environ['OPENSTACK_CONFIG_DB_KEY'] + else: + print 'Env var OPENSTACK_CONFIG_DB_KEY not set' + sys.exit(1) + config = ConfigParser.RawConfigParser() + print("Using {0} as db config source".format(os_conf)) + config.read(os_conf) + print("Trying to load db config from {0}:{1}".format( + os_conf_section, os_conf_key)) + user_db_conn = config.get(os_conf_section, os_conf_key) + print("Got config from {0}".format(os_conf)) + except: + print("Tried to load config from {0} but failed.".format(os_conf)) + sys.exit(1) +elif "DB_CONNECTION" in os.environ: + user_db_conn = os.environ['DB_CONNECTION'] + print 'Got config from DB_CONNECTION env var' +else: + print 'Could not get db config, either from config file or env var' + sys.exit(1) + +# Root DB engine +try: + root_engine = create_engine(db_connection) + connection = root_engine.connect() + connection.close() +except: + print 'Could not connect to database as root user' + sys.exit(1) + +# User DB engine +try: + user_engine = create_engine(user_db_conn) + # Get our user data out of the user_engine + database = user_engine.url.database + user = user_engine.url.username + password = user_engine.url.password + print 'Got user db config' +except: + print 'Could not get user database config' + sys.exit(1) + +# Create DB +try: + root_engine.execute("CREATE DATABASE IF NOT EXISTS {0}".format(database)) + print("Created database {0}".format(database)) +except: + print("Could not create database {0}".format(database)) + sys.exit(1) + +# Create DB User +try: + root_engine.execute( + "GRANT ALL ON `{0}`.* TO \'{1}\'@\'%%\' IDENTIFIED BY \'{2}\'".format( + database, user, password)) + print("Created user {0} for {1}".format(user, database)) +except: + print("Could not create user {0} for {1}".format(user, database)) + sys.exit(1) + +# Test connection +try: + connection = user_engine.connect() + connection.close() + print 'Database connection for user ok' +except: + print 'Could not connect to database as user' + sys.exit(1) +{{- end }} + diff --git a/helm-toolkit/templates/snippets/_db_secret_root.yaml.tpl b/helm-toolkit/templates/snippets/_db_secret_root.yaml.tpl new file mode 100644 index 00000000..e4d7ca95 --- /dev/null +++ b/helm-toolkit/templates/snippets/_db_secret_root.yaml.tpl @@ -0,0 +1,13 @@ +{{- define "helm-toolkit.secret_db_root" }} +{{- $envAll := . -}} +{{- $dbRootConnection := printf "mysql+pymysql://%s:%s@%s:%1.f" $envAll.Values.database.root_user $envAll.Values.database.root_password $envAll.Values.database.address $envAll.Values.database.port }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $envAll.Values.database.secret.root }} +type: Opaque +data: + DB_CONNECTION: | +{{ $dbRootConnection | b64enc | indent 4 }} +{{- end }} + diff --git a/swift/.helmignore b/swift/.helmignore new file mode 100644 index 00000000..f0c13194 --- /dev/null +++ b/swift/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/swift/Chart.yaml b/swift/Chart.yaml new file mode 100644 index 00000000..75512174 --- /dev/null +++ b/swift/Chart.yaml @@ -0,0 +1,18 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +description: A Helm chart for Swift +name: swift +version: 0.1.0 diff --git a/swift/README.md b/swift/README.md new file mode 100644 index 00000000..10229164 --- /dev/null +++ b/swift/README.md @@ -0,0 +1,20 @@ +# OpenStack-Helm - Swift + +Swift is a highly available, distributed, eventually consistent object/blob store. Organizations can use Swift to store lots of data efficiently, safely, and cheaply. + +## Prequisites: + +* helm-toolkit 0.1.0+ + +## Installing the Chart + +`$ helm install --name=swift local/swift --namespace=openstack` + +## Uninstalling the Chart + +`$ helm delete swift` + +## Configuraion + +> **Tip**: You can use the default [values.yaml](values.yaml) + diff --git a/swift/requirements.yaml b/swift/requirements.yaml new file mode 100644 index 00000000..53782e69 --- /dev/null +++ b/swift/requirements.yaml @@ -0,0 +1,18 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +dependencies: + - name: helm-toolkit + repository: http://localhost:8879/charts + version: 0.1.0 diff --git a/swift/templates/configmap-bin.yaml b/swift/templates/configmap-bin.yaml new file mode 100644 index 00000000..2104a202 --- /dev/null +++ b/swift/templates/configmap-bin.yaml @@ -0,0 +1,27 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: swift-bin +data: + db-create.py: | +{{- include "helm-toolkit.job_create_db" . | indent 4 }} + ks-service.sh: | +{{- include "helm-toolkit.keystone_service" . | indent 4 }} + ks-endpoints.sh: | +{{- include "helm-toolkit.keystone_endpoints" . | indent 4 }} + ks-user.sh: | +{{- include "helm-toolkit.keystone_user" . | indent 4 }} diff --git a/swift/templates/configmap-etc.yaml b/swift/templates/configmap-etc.yaml new file mode 100644 index 00000000..e213d639 --- /dev/null +++ b/swift/templates/configmap-etc.yaml @@ -0,0 +1,29 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: ConfigMap +metadata: + name: swift-etc +data: + swift.conf: | +{{ tuple "etc/_swift.conf.tpl" . | include "template" | indent 4 }} + proxy-server.conf: | +{{ tuple "etc/_proxy-server.conf.tpl" . | include "template" | indent 4 }} + object-server.conf: | +{{ tuple "etc/_object-server.conf.tpl" . | include "template" | indent 4 }} + container-server.conf: | +{{ tuple "etc/_container-server.conf.tpl" . | include "template" | indent 4 }} + account-server.conf: | +{{ tuple "etc/_account-server.conf.tpl" . | include "template" | indent 4 }} diff --git a/swift/templates/deployment-account.yaml b/swift/templates/deployment-account.yaml new file mode 100644 index 00000000..8480a6c6 --- /dev/null +++ b/swift/templates/deployment-account.yaml @@ -0,0 +1,77 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.account }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: swift-account +spec: + replicas: {{ .Values.replicas.account }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: swift-account + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.hash" }} + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: swift-account + image: {{ .Values.images.account }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{- if .Values.resources.enabled }} + resources: + requests: + memory: {{ .Values.resources.swift_account.requests.memory | quote }} + cpu: {{ .Values.resources.swift_account.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.swift_account.limits.memory | quote }} + cpu: {{ .Values.resources.swift_account.limits.cpu | quote }} + {{- end }} + command: + - swift-ring-builder + - account.builder create {{ .Values.swift.part_power }} {{ .Values.replicas.account }} {{ .Values.swift.min_part_hours }} + volumeMounts: + - name: pod-etc-swift + mountPath: /etc/swift + - name: accountswiftconf + mountPath: /etc/swift/account-server.conf + subPath: account-server.conf + readOnly: true + volumes: + - name: pod-etc-swift + emptyDir: {} + - name: accountswiftconf + configMap: + name: account-swift-conf +{{- if .Values.development.enabled }} + - name: swift-data + hostPath: + path: {{ .Values.development.storage_path }} +{{- end }} diff --git a/swift/templates/deployment-container.yaml b/swift/templates/deployment-container.yaml new file mode 100644 index 00000000..fe7c7251 --- /dev/null +++ b/swift/templates/deployment-container.yaml @@ -0,0 +1,77 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.container }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: swift-container +spec: + replicas: {{ .Values.replicas.container }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: swift-container + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.hash" }} + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + containers: + - name: swift-container + image: {{ .Values.images.container }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{- if .Values.resources.enabled }} + resources: + requests: + memory: {{ .Values.resources.swift_container.requests.memory | quote }} + cpu: {{ .Values.resources.swift_container.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.swift_container.limits.memory | quote }} + cpu: {{ .Values.resources.swift_container.limits.cpu | quote }} + {{- end }} + command: + - swift-ring-builder + - container.builder create {{ .Values.swift.part_power }} {{ .Values.replicas.container }} {{ .Values.swift.min_part_hours }} + volumeMounts: + - name: pod-etc-swift + mountPath: /etc/swift + - name: containerserverconf + mountPath: /etc/swift/container-server.conf + readOnly: true + volumes: + - name: pod-etc-swift + emptyDir: {} + - name: containerserverconf + configMap: + name: swift-etc +{{- if .Values.development.enabled }} + - name: swift-data + hostPath: + path: {{ .Values.development.storage_path }} +{{- end }} diff --git a/swift/templates/deployment-object.yaml b/swift/templates/deployment-object.yaml new file mode 100644 index 00000000..42d16058 --- /dev/null +++ b/swift/templates/deployment-object.yaml @@ -0,0 +1,77 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.object }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: swift-object +spec: + replicas: {{ .Values.replicas.object }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: swift-object + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.hash" }} + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + containers: + - name: swift-object + image: {{ .Values.images.object }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{- if .Values.resources.enabled }} + resources: + requests: + memory: {{ .Values.resources.swift_object.requests.memory | quote }} + cpu: {{ .Values.resources.swift_object.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.swift_object.limits.memory | quote }} + cpu: {{ .Values.resources.swift_object.limits.cpu | quote }} + {{- end }} + command: + - swift-ring-builder + - object.builder create {{ .Values.swift.part_power }} {{ .Values.replicas.object }} {{ .Values.swift.min_part_hours }} + volumeMounts: + - name: pod-etc-swift + mountPath: /etc/swift + - name: objectserverconf + mountPath: /etc/swift/object-server.conf + readOnly: true + volumes: + - name: pod-etc-swift + emptyDir: {} + - name: objectserverconf + configMap: + name: swift-etc +{{- if .Values.development.enabled }} + - name: swift-data + hostPath: + path: {{ .Values.development.storage_path }} +{{- end }} diff --git a/swift/templates/deployment-proxy.yaml b/swift/templates/deployment-proxy.yaml new file mode 100644 index 00000000..cc53b53d --- /dev/null +++ b/swift/templates/deployment-proxy.yaml @@ -0,0 +1,79 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.proxy }} +apiVersion: extensions/v1beta1 +kind: Deployment +metadata: + name: swift-proxy +spec: + replicas: {{ .Values.replicas.proxy }} + revisionHistoryLimit: {{ .Values.upgrades.revision_history }} + strategy: + type: {{ .Values.upgrades.pod_replacement_strategy }} + {{ if eq .Values.upgrades.pod_replacement_strategy "RollingUpdate" }} + rollingUpdate: + maxUnavailable: {{ .Values.upgrades.rolling_update.max_unavailable }} + maxSurge: {{ .Values.upgrades.rolling_update.max_surge }} + {{ end }} + template: + metadata: + labels: + app: swift-proxy + annotations: + configmap-bin-hash: {{ tuple "configmap-bin.yaml" . | include "helm-toolkit.hash" }} + configmap-etc-hash: {{ tuple "configmap-etc.yaml" . | include "helm-toolkit.hash" }} + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + serviceAccount: default + containers: + - name: swift-proxy + image: {{ .Values.images.proxy }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{- if .Values.resources.enabled }} + resources: + requests: + memory: {{ .Values.resources.swift_proxy.requests.memory | quote }} + cpu: {{ .Values.resources.swift_proxy.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.swift_proxy.limits.memory | quote }} + cpu: {{ .Values.resources.swift_proxy.limits.cpu | quote }} + {{- end }} + ports: + - containerPort: {{ .Values.swift.proxy_server.port}} + readinessProbe: + tcpSocket: + port: {{ .Values.swift.proxy_server.port }} + volumeMounts: + - name: pod-etc-swift + mountPath: /etc/swift + - name: proxyserverconf + mountPath: /etc/swift/proxy-server.conf + readOnly: true + volumes: + - name: pod-etc-swift + emptyDir: {} + - name: proxyserverconf + configMap: + name: swift-etc +{{- if .Values.development.enabled }} + - name: swift-data + hostPath: + path: {{ .Values.development.storage_path }} +{{- end}} diff --git a/swift/templates/etc/_account-server.conf.tpl b/swift/templates/etc/_account-server.conf.tpl new file mode 100644 index 00000000..007423ad --- /dev/null +++ b/swift/templates/etc/_account-server.conf.tpl @@ -0,0 +1,261 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_port = {{ .Values.swift.account_server.bind_port }} +user = {{ .Values.swift.user }} +swift_dir = {{ .Values.swift.dir }} +devices = {{ .Values.swift.account_server }} +mount_check = {{ .Values.swift.account_server.mount_check }} +# bind_timeout = 30 +# backlog = 4096 +# disable_fallocate = false +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# If you don't mind the extra disk space usage in overhead, you can turn this +# on to preallocate disk space with SQLite databases to decrease fragmentation. +# db_preallocation = off +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes or percentage of disk +# space you'd like fallocate to reserve, whether there is space for the given +# file size or not. Percentage will be used if the value ends with a '%'. +# fallocate_reserve = 1% +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[pipeline:main] +pipeline = healthcheck recon account-server + +[app:account-server] +use = egg:swift#account +# You can override the default log routing for this app here: +# set log_name = account-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". Default is empty. +# replication_server = false +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = {{ .Values.swift.account_server.recon_cache_path }} + +[account-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Maximum number of database rows that will be sync'd in a single HTTP +# replication request. Databases with less than or equal to this number of +# differing rows will always be sync'd using an HTTP replication request rather +# than using rsync. +# per_diff = 1000 +# +# Maximum number of HTTP replication requests attempted on each replication +# pass for any one container. This caps how long the replicator will spend +# trying to sync a given database per pass so the other databases don't get +# starved. +# max_diffs = 100 +# +# Number of replication workers to spawn. +# concurrency = 8 +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# node_timeout = 10 +# conn_timeout = 0.5 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# rsync_compress = no +# +# Format of the rysnc module where the replicator will send data. See +# etc/rsyncd.conf-sample for some usage examples. +# rsync_module = {replication_ip}::account +# +# recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[account-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Will audit each account at most once per interval +# interval = 1800 +# +# accounts_per_second = 200 +# recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[account-reaper] +# You can override the default log routing for this app here (don't use set!): +# log_name = account-reaper +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# concurrency = 25 +# interval = 3600 +# node_timeout = 10 +# conn_timeout = 0.5 +# +# Normally, the reaper begins deleting account information for deleted accounts +# immediately; you can set this to delay its work however. The value is in +# seconds; 2592000 = 30 days for example. +# delay_reaping = 0 +# +# If the account fails to be reaped due to a persistent error, the +# account reaper will log a message such as: +# Account has not been reaped since +# You can search logs for this message if space is not being reclaimed +# after you delete account(s). +# Default is 2592000 seconds (30 days). This is in addition to any time +# requested by delay_reaping. +# reap_warn_after = 2592000 +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/account.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/swift/templates/etc/_container-server.conf.tpl b/swift/templates/etc/_container-server.conf.tpl new file mode 100644 index 00000000..a228f055 --- /dev/null +++ b/swift/templates/etc/_container-server.conf.tpl @@ -0,0 +1,303 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_port = {{ .Values.swift.container_server.bind_port }} +user = {{ .Values.swift.user }} +swift_dir = {{ .Values.swift.dir }} +devices = {{ .Values.swift.container_server.devices }} +mount_check = {{ .Values.swift.container_server.mount_check }} +# bind_timeout = 30 +# backlog = 4096 +# disable_fallocate = false +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# This is a comma separated list of hosts allowed in the X-Container-Sync-To +# field for containers. This is the old-style of using container sync. It is +# strongly recommended to use the new style of a separate +# container-sync-realms.conf -- see container-sync-realms.conf-sample +# allowed_sync_hosts = 127.0.0.1 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# If you don't mind the extra disk space usage in overhead, you can turn this +# on to preallocate disk space with SQLite databases to decrease fragmentation. +# db_preallocation = off +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes or percentage of disk +# space you'd like fallocate to reserve, whether there is space for the given +# file size or not. Percentage will be used if the value ends with a '%'. +# fallocate_reserve = 1% +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[pipeline:main] +pipeline = healthcheck recon container-server + +[app:container-server] +use = egg:swift#container +# You can override the default log routing for this app here: +# set log_name = container-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# node_timeout = 3 +# conn_timeout = 0.5 +# allow_versions = false +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = {{ .Values.swift.container_server.recon_cache_path }} + +[container-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Maximum number of database rows that will be sync'd in a single HTTP +# replication request. Databases with less than or equal to this number of +# differing rows will always be sync'd using an HTTP replication request rather +# than using rsync. +# per_diff = 1000 +# +# Maximum number of HTTP replication requests attempted on each replication +# pass for any one container. This caps how long the replicator will spend +# trying to sync a given database per pass so the other databases don't get +# starved. +# max_diffs = 100 +# +# Number of replication workers to spawn. +# concurrency = 8 +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# node_timeout = 10 +# conn_timeout = 0.5 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# rsync_compress = no +# +# Format of the rysnc module where the replicator will send data. See +# etc/rsyncd.conf-sample for some usage examples. +# rsync_module = {replication_ip}::container +# +# recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[container-updater] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# interval = 300 +# concurrency = 4 +# node_timeout = 3 +# conn_timeout = 0.5 +# +# slowdown will sleep that amount between containers +# slowdown = 0.01 +# +# Seconds to suppress updating an account that has generated an error +# account_suppression_time = 60 +# +# recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[container-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Will audit each container at most once per interval +# interval = 1800 +# +# containers_per_second = 200 +# recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[container-sync] +# You can override the default log routing for this app here (don't use set!): +# log_name = container-sync +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# If you need to use an HTTP Proxy, set it here; defaults to no proxy. +# You can also set this to a comma separated list of HTTP Proxies and they will +# be randomly used (simple load balancing). +# sync_proxy = http://10.1.1.1:8888,http://10.1.1.2:8888 +# +# Will sync each container at most once per interval +# interval = 300 +# +# Maximum amount of time to spend syncing each container per pass +# container_time = 60 +# +# Maximum amount of time in seconds for the connection attempt +# conn_timeout = 5 +# Server errors from requests will be retried by default +# request_tries = 3 +# +# Internal client config file path +# internal_client_conf_path = /etc/swift/internal-client.conf +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/container.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false + diff --git a/swift/templates/etc/_object-server.conf.tpl b/swift/templates/etc/_object-server.conf.tpl new file mode 100644 index 00000000..91f661cc --- /dev/null +++ b/swift/templates/etc/_object-server.conf.tpl @@ -0,0 +1,415 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_port = {{ .Values.swift.object_server.bind_port }} +user = {{ .Values.swift.user }} +swift_dir = {{ .Values.swift.dir }} +devices = {{ .Values.swift.object_server.devices }} +mount_check = {{ .Values.swift.object_server.mount_check }} +# bind_timeout = 30 +# backlog = 4096 +# disable_fallocate = false +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# Use an integer to override the number of pre-forked processes that will +# accept connections. NOTE: if servers_per_port is set, this setting is +# ignored. +# workers = auto +# +# Make object-server run this many worker processes per unique port of "local" +# ring devices across all storage policies. The default value of 0 disables this +# feature. +# servers_per_port = 0 +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# eventlet_debug = false +# +# You can set fallocate_reserve to the number of bytes or percentage of disk +# space you'd like fallocate to reserve, whether there is space for the given +# file size or not. Percentage will be used if the value ends with a '%'. +# fallocate_reserve = 1% +# +# Time to wait while attempting to connect to another backend node. +# conn_timeout = 0.5 +# Time to wait while sending each chunk of data to another backend node. +# node_timeout = 3 +# Time to wait while sending a container update on object update. +# container_update_timeout = 1.0 +# Time to wait while receiving each chunk of data from a client or another +# backend node. +# client_timeout = 60 +# +# network_chunk_size = 65536 +# disk_chunk_size = 65536 +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[pipeline:main] +pipeline = healthcheck recon object-server + +[app:object-server] +use = egg:swift#object +# You can override the default log routing for this app here: +# set log_name = object-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_requests = true +# set log_address = /dev/log +# +# max_upload_time = 86400 +# +# slow is the total amount of seconds an object PUT/DELETE request takes at +# least. If it is faster, the object server will sleep this amount of time minus +# the already passed transaction time. This is only useful for simulating slow +# devices on storage nodes during testing and development. +# slow = 0 +# +# Objects smaller than this are not evicted from the buffercache once read +# keep_cache_size = 5242880 +# +# If true, objects for authenticated GET requests may be kept in buffer cache +# if small enough +# keep_cache_private = false +# +# on PUTs, sync data every n MB +# mb_per_sync = 512 +# +# Comma separated list of headers that can be set in metadata on an object. +# This list is in addition to X-Object-Meta-* headers and cannot include +# Content-Type, etag, Content-Length, or deleted +# allowed_headers = Content-Disposition, Content-Encoding, X-Delete-At, X-Object-Manifest, X-Static-Large-Object +# +# auto_create_account_prefix = . +# +# Configure parameter for creating specific server +# To handle all verbs, including replication verbs, do not specify +# "replication_server" (this is the default). To only handle replication, +# set to a True value (e.g. "True" or "1"). To handle only non-replication +# verbs, set to "False". Unless you have a separate replication network, you +# should not specify any value for "replication_server". +# replication_server = false +# +# Set to restrict the number of concurrent incoming SSYNC requests +# Set to 0 for unlimited +# Note that SSYNC requests are only used by the object reconstructor or the +# object replicator when configured to use ssync. +# replication_concurrency = 4 +# +# Restricts incoming SSYNC requests to one per device, +# replication_currency above allowing. This can help control I/O to each +# device, but you may wish to set this to False to allow multiple SSYNC +# requests (up to the above replication_concurrency setting) per device. +# replication_one_per_device = True +# +# Number of seconds to wait for an existing replication device lock before +# giving up. +# replication_lock_timeout = 15 +# +# These next two settings control when the SSYNC subrequest handler will +# abort an incoming SSYNC attempt. An abort will occur if there are at +# least threshold number of failures and the value of failures / successes +# exceeds the ratio. The defaults of 100 and 1.0 means that at least 100 +# failures have to occur and there have to be more failures than successes for +# an abort to occur. +# replication_failure_threshold = 100 +# replication_failure_ratio = 1.0 +# +# Use splice() for zero-copy object GETs. This requires Linux kernel +# version 3.0 or greater. If you set "splice = yes" but the kernel +# does not support it, error messages will appear in the object server +# logs at startup, but your object servers should continue to function. +# +# splice = no +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE" +# disable_path = + +[filter:recon] +use = egg:swift#recon +recon_cache_path = {{ .Values.swift.object_server.recon_cache_path }} +recon_lock_path = {{ .Values.swift.object_server.recon_lock_path }} + +[object-replicator] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-replicator +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# daemonize = on +# +# Time in seconds to wait between replication passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# concurrency = 1 +# stats_interval = 300 +# +# default is rsync, alternative is ssync +# sync_method = rsync +# +# max duration of a partition rsync +# rsync_timeout = 900 +# +# bandwidth limit for rsync in kB/s. 0 means unlimited +# rsync_bwlimit = 0 +# +# passed to rsync for io op timeout +# rsync_io_timeout = 30 +# +# Allow rsync to compress data which is transmitted to destination node +# during sync. However, this is applicable only when destination node is in +# a different region than the local one. +# NOTE: Objects that are already compressed (for example: .tar.gz, .mp3) might +# slow down the syncing process. +# rsync_compress = no +# +# Format of the rysnc module where the replicator will send data. See +# etc/rsyncd.conf-sample for some usage examples. +# rsync_module = {replication_ip}::object +# +# node_timeout = +# max duration of an http request; this is for REPLICATE finalization calls and +# so should be longer than node_timeout +# http_timeout = 60 +# +# attempts to kill all workers if nothing replicates for lockup_timeout seconds +# lockup_timeout = 1800 +# +# The replicator also performs reclamation +# reclaim_age = 604800 +# +# ring_check_interval = 15 +# recon_cache_path = /var/cache/swift +# +# limits how long rsync error log lines are +# 0 means to log the entire line +# rsync_error_log_line_length = 0 +# +# handoffs_first and handoff_delete are options for a special case +# such as disk full in the cluster. These two options SHOULD NOT BE +# CHANGED, except for such an extreme situations. (e.g. disks filled up +# or are about to fill up. Anyway, DO NOT let your drives fill up) +# handoffs_first is the flag to replicate handoffs prior to canonical +# partitions. It allows to force syncing and deleting handoffs quickly. +# If set to a True value(e.g. "True" or "1"), partitions +# that are not supposed to be on the node will be replicated first. +# handoffs_first = False +# +# handoff_delete is the number of replicas which are ensured in swift. +# If the number less than the number of replicas is set, object-replicator +# could delete local handoffs even if all replicas are not ensured in the +# cluster. Object-replicator would remove local handoff partition directories +# after syncing partition when the number of successful responses is greater +# than or equal to this number. By default(auto), handoff partitions will be +# removed when it has successfully replicated to all the canonical nodes. +# handoff_delete = auto +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[object-reconstructor] +# You can override the default log routing for this app here (don't use set!): +# Unless otherwise noted, each setting below has the same meaning as described +# in the [object-replicator] section, however these settings apply to the EC +# reconstructor +# +# log_name = object-reconstructor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# daemonize = on +# +# Time in seconds to wait between reconstruction passes +# interval = 30 +# run_pause is deprecated, use interval instead +# run_pause = 30 +# +# concurrency = 1 +# stats_interval = 300 +# node_timeout = 10 +# http_timeout = 60 +# lockup_timeout = 1800 +# reclaim_age = 604800 +# ring_check_interval = 15 +# recon_cache_path = /var/cache/swift +# handoffs_first = False +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[object-updater] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-updater +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# interval = 300 +# concurrency = 1 +# node_timeout = +# slowdown will sleep that amount between objects +# slowdown = 0.01 +# +# recon_cache_path = /var/cache/swift +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[object-auditor] +# You can override the default log routing for this app here (don't use set!): +# log_name = object-auditor +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_address = /dev/log +# +# Time in seconds to wait between auditor passes +# interval = 30 +# +# You can set the disk chunk size that the auditor uses making it larger if +# you like for more efficient local auditing of larger objects +# disk_chunk_size = 65536 +# files_per_second = 20 +# concurrency = 1 +# bytes_per_second = 10000000 +# log_time = 3600 +# zero_byte_files_per_second = 50 +# recon_cache_path = /var/cache/swift + +# Takes a comma separated list of ints. If set, the object auditor will +# increment a counter for every object whose size is <= to the given break +# points and report the result after a full scan. +# object_size_stats = +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +# The auditor will cleanup old rsync tempfiles after they are "old +# enough" to delete. You can configure the time elapsed in seconds +# before rsync tempfiles will be unlinked, or the default value of +# "auto" try to use object-replicator's rsync_timeout + 900 and fallback +# to 86400 (1 day). +# rsync_tempfile_timeout = auto + +# Note: Put it at the beginning of the pipleline to profile all middleware. But +# it is safer to put this after healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/object.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false diff --git a/swift/templates/etc/_proxy-server.conf.tpl b/swift/templates/etc/_proxy-server.conf.tpl new file mode 100644 index 00000000..e421789a --- /dev/null +++ b/swift/templates/etc/_proxy-server.conf.tpl @@ -0,0 +1,845 @@ +[DEFAULT] +# bind_ip = 0.0.0.0 +bind_port = {{ .Values.swift.proxy_server.bind_port }} +# bind_timeout = 30 +# backlog = 4096 +swift_dir = {{ .Values.swift.dir }} +user = {{ .Values.swift.user }} + +# Enables exposing configuration settings via HTTP GET /info. +# expose_info = true + +# Key to use for admin calls that are HMAC signed. Default is empty, +# which will disable admin calls to /info. +# admin_key = secret_admin_key +# +# Allows the ability to withhold sections from showing up in the public calls +# to /info. You can withhold subsections by separating the dict level with a +# ".". The following would cause the sections 'container_quotas' and 'tempurl' +# to not be listed, and the key max_failed_deletes would be removed from +# bulk_delete. Default value is 'swift.valid_api_versions' which allows all +# registered features to be listed via HTTP GET /info except +# swift.valid_api_versions information +# disallowed_sections = swift.valid_api_versions, container_quotas, tempurl + +# Use an integer to override the number of pre-forked processes that will +# accept connections. Should default to the number of effective cpu +# cores in the system. It's worth noting that individual workers will +# use many eventlet co-routines to service multiple concurrent requests. +# workers = auto +# +# Maximum concurrent requests per worker +# max_clients = 1024 +# +# Set the following two lines to enable SSL. This is for testing only. +# cert_file = /etc/swift/proxy.crt +# key_file = /etc/swift/proxy.key +# +# expiring_objects_container_divisor = 86400 +# expiring_objects_account_name = expiring_objects +# +# You can specify default log routing here if you want: +# log_name = swift +# log_facility = LOG_LOCAL0 +# log_level = INFO +# log_headers = false +# log_address = /dev/log +# The following caps the length of log lines to the value given; no limit if +# set to 0, the default. +# log_max_line_length = 0 +# +# This optional suffix (default is empty) that would be appended to the swift transaction +# id allows one to easily figure out from which cluster that X-Trans-Id belongs to. +# This is very useful when one is managing more than one swift cluster. +# trans_id_suffix = +# +# comma separated list of functions to call to setup custom log handlers. +# functions get passed: conf, name, log_to_console, log_route, fmt, logger, +# adapted_logger +# log_custom_handlers = +# +# If set, log_udp_host will override log_address +# log_udp_host = +# log_udp_port = 514 +# +# You can enable StatsD logging here: +# log_statsd_host = +# log_statsd_port = 8125 +# log_statsd_default_sample_rate = 1.0 +# log_statsd_sample_rate_factor = 1.0 +# log_statsd_metric_prefix = +# +# Use a comma separated list of full url (http://foo.bar:1234,https://foo.bar) +# cors_allow_origin = +# strict_cors_mode = True +# +# client_timeout = 60 +# eventlet_debug = false +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[pipeline:main] +# This sample pipeline uses tempauth and is used for SAIO dev work and +# testing. See below for a pipeline using keystone. +pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit tempauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server + +# The following pipeline shows keystone integration. Comment out the one +# above and uncomment this one. Additional steps for integrating keystone are +# covered further below in the filter sections for authtoken and keystoneauth. +#pipeline = catch_errors gatekeeper healthcheck proxy-logging cache container_sync bulk tempurl ratelimit authtoken keystoneauth copy container-quotas account-quotas slo dlo versioned_writes proxy-logging proxy-server + +[app:proxy-server] +use = egg:swift#proxy +account_autocreate = {{ .Values.swift.proxy_server.account_autocreate }} +# You can override the default log routing for this app here: +# set log_name = proxy-server +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_address = /dev/log +# +# log_handoffs = true +# recheck_account_existence = 60 +# recheck_container_existence = 60 +# object_chunk_size = 65536 +# client_chunk_size = 65536 +# +# How long the proxy server will wait on responses from the a/c/o servers. +# node_timeout = 10 +# +# How long the proxy server will wait for an initial response and to read a +# chunk of data from the object servers while serving GET / HEAD requests. +# Timeouts from these requests can be recovered from so setting this to +# something lower than node_timeout would provide quicker error recovery +# while allowing for a longer timeout for non-recoverable requests (PUTs). +# Defaults to node_timeout, should be overriden if node_timeout is set to a +# high number to prevent client timeouts from firing before the proxy server +# has a chance to retry. +# recoverable_node_timeout = node_timeout +# +# conn_timeout = 0.5 +# +# How long to wait for requests to finish after a quorum has been established. +# post_quorum_timeout = 0.5 +# +# How long without an error before a node's error count is reset. This will +# also be how long before a node is reenabled after suppression is triggered. +# error_suppression_interval = 60 +# +# How many errors can accumulate before a node is temporarily ignored. +# error_suppression_limit = 10 +# +# If set to 'true' any authorized user may create and delete accounts; if +# 'false' no one, even authorized, can. +# allow_account_management = false +# +# If set to 'true' authorized accounts that do not yet exist within the Swift +# cluster will be automatically created. +# account_autocreate = false +# +# If set to a positive value, trying to create a container when the account +# already has at least this maximum containers will result in a 403 Forbidden. +# Note: This is a soft limit, meaning a user might exceed the cap for +# recheck_account_existence before the 403s kick in. +# max_containers_per_account = 0 +# +# This is a comma separated list of account hashes that ignore the +# max_containers_per_account cap. +# max_containers_whitelist = +# +# Comma separated list of Host headers to which the proxy will deny requests. +# deny_host_headers = +# +# Prefix used when automatically creating accounts. +# auto_create_account_prefix = . +# +# Depth of the proxy put queue. +# put_queue_depth = 10 +# +# Storage nodes can be chosen at random (shuffle), by using timing +# measurements (timing), or by using an explicit match (affinity). +# Using timing measurements may allow for lower overall latency, while +# using affinity allows for finer control. In both the timing and +# affinity cases, equally-sorting nodes are still randomly chosen to +# spread load. +# The valid values for sorting_method are "affinity", "shuffle", or "timing". +# sorting_method = shuffle +# +# If the "timing" sorting_method is used, the timings will only be valid for +# the number of seconds configured by timing_expiry. +# timing_expiry = 300 +# +# By default on a GET/HEAD swift will connect to a storage node one at a time +# in a single thread. There is smarts in the order they are hit however. If you +# turn on concurrent_gets below, then replica count threads will be used. +# With addition of the concurrency_timeout option this will allow swift to send +# out GET/HEAD requests to the storage nodes concurrently and answer with the +# first to respond. With an EC policy the parameter only affects HEAD requests. +# concurrent_gets = off +# +# This parameter controls how long to wait before firing off the next +# concurrent_get thread. A value of 0 would be fully concurrent, any other +# number will stagger the firing of the threads. This number should be +# between 0 and node_timeout. The default is what ever you set for the +# conn_timeout parameter. +# concurrency_timeout = 0.5 +# +# Set to the number of nodes to contact for a normal request. You can use +# '* replicas' at the end to have it use the number given times the number of +# replicas for the ring being used for the request. +# request_node_count = 2 * replicas +# +# Which backend servers to prefer on reads. Format is r for region +# N or rz for region N, zone M. The value after the equals is +# the priority; lower numbers are higher priority. +# +# Example: first read from region 1 zone 1, then region 1 zone 2, then +# anything in region 2, then everything else: +# read_affinity = r1z1=100, r1z2=200, r2=300 +# Default is empty, meaning no preference. +# read_affinity = +# +# Which backend servers to prefer on writes. Format is r for region +# N or rz for region N, zone M. If this is set, then when +# handling an object PUT request, some number (see setting +# write_affinity_node_count) of local backend servers will be tried +# before any nonlocal ones. +# +# Example: try to write to regions 1 and 2 before writing to any other +# nodes: +# write_affinity = r1, r2 +# Default is empty, meaning no preference. +# write_affinity = +# +# The number of local (as governed by the write_affinity setting) +# nodes to attempt to contact first, before any non-local ones. You +# can use '* replicas' at the end to have it use the number given +# times the number of replicas for the ring being used for the +# request. +# write_affinity_node_count = 2 * replicas +# +# These are the headers whose values will only be shown to swift_owners. The +# exact definition of a swift_owner is up to the auth system in use, but +# usually indicates administrative responsibilities. +# swift_owner_headers = x-container-read, x-container-write, x-container-sync-key, x-container-sync-to, x-account-meta-temp-url-key, x-account-meta-temp-url-key-2, x-container-meta-temp-url-key, x-container-meta-temp-url-key-2, x-account-access-control +# +# You can set scheduling priority of processes. Niceness values range from -20 +# (most favorable to the process) to 19 (least favorable to the process). +# nice_priority = +# +# You can set I/O scheduling class and priority of processes. I/O niceness +# class values are IOPRIO_CLASS_RT (realtime), IOPRIO_CLASS_BE (best-effort) and +# IOPRIO_CLASS_IDLE (idle). I/O niceness priority is a number which goes from +# 0 to 7. The higher the value, the lower the I/O priority of the process. +# Work only with ionice_class. +# ionice_class = +# ionice_priority = + +[filter:tempauth] +use = egg:swift#tempauth +# You can override the default log routing for this filter here: +# set log_name = tempauth +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# The reseller prefix will verify a token begins with this prefix before even +# attempting to validate it. Also, with authorization, only Swift storage +# accounts with this prefix will be authorized by this middleware. Useful if +# multiple auth systems are in use for one Swift cluster. +# The reseller_prefix may contain a comma separated list of items. The first +# item is used for the token as mentioned above. If second and subsequent +# items exist, the middleware will handle authorization for an account with +# that prefix. For example, for prefixes "AUTH, SERVICE", a path of +# /v1/SERVICE_account is handled the same as /v1/AUTH_account. If an empty +# (blank) reseller prefix is required, it must be first in the list. Two +# single quote characters indicates an empty (blank) reseller prefix. +# reseller_prefix = AUTH + +# +# The require_group parameter names a group that must be presented by +# either X-Auth-Token or X-Service-Token. Usually this parameter is +# used only with multiple reseller prefixes (e.g., SERVICE_require_group=blah). +# By default, no group is needed. Do not use .admin. +# require_group = + +# The auth prefix will cause requests beginning with this prefix to be routed +# to the auth subsystem, for granting tokens, etc. +# auth_prefix = /auth/ +# token_life = 86400 +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# This specifies what scheme to return with storage urls: +# http, https, or default (chooses based on what the server is running as) +# This can be useful with an SSL load balancer in front of a non-SSL server. +# storage_url_scheme = default +# +# Lastly, you need to list all the accounts/users you want here. The format is: +# user__ = [group] [group] [...] [storage_url] +# or if you want underscores in or , you can base64 encode them +# (with no equal signs) and use this format: +# user64__ = [group] [group] [...] [storage_url] +# There are special groups of: +# .reseller_admin = can do anything to any account for this auth +# .admin = can do anything within the account +# If neither of these groups are specified, the user can only access containers +# that have been explicitly allowed for them by a .admin or .reseller_admin. +# The trailing optional storage_url allows you to specify an alternate url to +# hand back to the user upon authentication. If not specified, this defaults to +# $HOST/v1/_ where $HOST will do its best to resolve +# to what the requester would need to use to reach this host. +# Here are example entries, required for running the tests: +user_admin_admin = admin .admin .reseller_admin +user_test_tester = testing .admin +user_test2_tester2 = testing2 .admin +user_test_tester3 = testing3 +user_test5_tester5 = testing5 service + +# To enable Keystone authentication you need to have the auth token +# middleware first to be configured. Here is an example below, please +# refer to the keystone's documentation for details about the +# different settings. +# +# You'll also need to have the keystoneauth middleware enabled and have it in +# your main pipeline, as show in the sample pipeline at the top of this file. +# +# Following parameters are known to work with keystonemiddleware v2.3.0 +# (above v2.0.0), but checking the latest information in the wiki page[1] +# is recommended. +# 1. http://docs.openstack.org/developer/keystonemiddleware/middlewarearchitecture.html#configuration +# +[filter:authtoken] +paste.filter_factory = keystonemiddleware.auth_token:filter_factory +auth_uri = {{ .Values.keystone.auth_uri }} +auth_url = {{ .Values.keystone.auth_url }} +auth_plugin = {{ .Values.keystone.swift_auth_plugin }} +project_domain_id = {{ .Values.keystone.swift_project_domain }} +user_domain_id = {{ .Values.keystone.swift_user_domain }} +project_name = {{ .Values.keystone.swift_project_name }} +username = {{ .Values.keystone.swift_user }} +password = {{ .Values.keystone.swift_password }} + +# delay_auth_decision defaults to False, but leaving it as false will +# prevent other auth systems, staticweb, tempurl, formpost, and ACLs from +# working. This value must be explicitly set to True. +# delay_auth_decision = False +# +# cache = swift.cache +# include_service_catalog = False +# + +[filter:keystoneauth] +use = egg:swift#keystoneauth +operator_roles = admin,user + +# The reseller_prefix option lists account namespaces that this middleware is +# responsible for. The prefix is placed before the Keystone project id. +# For example, for project 12345678, and prefix AUTH, the account is +# named AUTH_12345678 (i.e., path is /v1/AUTH_12345678/...). +# Several prefixes are allowed by specifying a comma-separated list +# as in: "reseller_prefix = AUTH, SERVICE". The empty string indicates a +# single blank/empty prefix. If an empty prefix is required in a list of +# prefixes, a value of '' (two single quote characters) indicates a +# blank/empty prefix. Except for the blank/empty prefix, an underscore ('_') +# character is appended to the value unless already present. +# reseller_prefix = AUTH +# +# The user must have at least one role named by operator_roles on a +# project in order to create, delete and modify containers and objects +# and to set and read privileged headers such as ACLs. +# If there are several reseller prefix items, you can prefix the +# parameter so it applies only to those accounts (for example +# the parameter SERVICE_operator_roles applies to the /v1/SERVICE_ +# path). If you omit the prefix, the option applies to all reseller +# prefix items. For the blank/empty prefix, prefix with '' (do not put +# underscore after the two single quote characters). +# operator_roles = admin, swiftoperator +# +# The reseller admin role has the ability to create and delete accounts +# reseller_admin_role = ResellerAdmin +# +# This allows middleware higher in the WSGI pipeline to override auth +# processing, useful for middleware such as tempurl and formpost. If you know +# you're not going to use such middleware and you want a bit of extra security, +# you can set this to false. +# allow_overrides = true +# +# If the service_roles parameter is present, an X-Service-Token must be +# present in the request that when validated, grants at least one role listed +# in the parameter. The X-Service-Token may be scoped to any project. +# If there are several reseller prefix items, you can prefix the +# parameter so it applies only to those accounts (for example +# the parameter SERVICE_service_roles applies to the /v1/SERVICE_ +# path). If you omit the prefix, the option applies to all reseller +# prefix items. For the blank/empty prefix, prefix with '' (do not put +# underscore after the two single quote characters). +# By default, no service_roles are required. +# service_roles = +# +# For backwards compatibility, keystoneauth will match names in cross-tenant +# access control lists (ACLs) when both the requesting user and the tenant +# are in the default domain i.e the domain to which existing tenants are +# migrated. The default_domain_id value configured here should be the same as +# the value used during migration of tenants to keystone domains. +# default_domain_id = default +# +# For a new installation, or an installation in which keystone projects may +# move between domains, you should disable backwards compatible name matching +# in ACLs by setting allow_names_in_acls to false: +# allow_names_in_acls = true + +[filter:healthcheck] +use = egg:swift#healthcheck +# An optional filesystem path, which if present, will cause the healthcheck +# URL to return "503 Service Unavailable" with a body of "DISABLED BY FILE". +# This facility may be used to temporarily remove a Swift node from a load +# balancer pool during maintenance or upgrade (remove the file to allow the +# node back into the load balancer pool). +# disable_path = + +[filter:cache] +use = egg:swift#memcache +memcache_servers = {{ include "memcached_host" . }}:11211 + +# You can override the default log routing for this filter here: +# set log_name = cache +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# If not set here, the value for memcache_servers will be read from +# memcache.conf (see memcache.conf-sample) or lacking that file, it will +# default to the value below. You can specify multiple servers separated with +# commas, as in: 10.1.2.3:11211,10.1.2.4:11211 (IPv6 addresses must +# follow rfc3986 section-3.2.2, i.e. [::1]:11211) +# memcache_servers = 127.0.0.1:11211 +# +# Sets how memcache values are serialized and deserialized: +# 0 = older, insecure pickle serialization +# 1 = json serialization but pickles can still be read (still insecure) +# 2 = json serialization only (secure and the default) +# If not set here, the value for memcache_serialization_support will be read +# from /etc/swift/memcache.conf (see memcache.conf-sample). +# To avoid an instant full cache flush, existing installations should +# upgrade with 0, then set to 1 and reload, then after some time (24 hours) +# set to 2 and reload. +# In the future, the ability to use pickle serialization will be removed. +# memcache_serialization_support = 2 +# +# Sets the maximum number of connections to each memcached server per worker +# memcache_max_connections = 2 +# +# More options documented in memcache.conf-sample + +[filter:ratelimit] +use = egg:swift#ratelimit +# You can override the default log routing for this filter here: +# set log_name = ratelimit +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# clock_accuracy should represent how accurate the proxy servers' system clocks +# are with each other. 1000 means that all the proxies' clock are accurate to +# each other within 1 millisecond. No ratelimit should be higher than the +# clock accuracy. +# clock_accuracy = 1000 +# +# max_sleep_time_seconds = 60 +# +# log_sleep_time_seconds of 0 means disabled +# log_sleep_time_seconds = 0 +# +# allows for slow rates (e.g. running up to 5 sec's behind) to catch up. +# rate_buffer_seconds = 5 +# +# account_ratelimit of 0 means disabled +# account_ratelimit = 0 + +# DEPRECATED- these will continue to work but will be replaced +# by the X-Account-Sysmeta-Global-Write-Ratelimit flag. +# Please see ratelimiting docs for details. +# these are comma separated lists of account names +# account_whitelist = a,b +# account_blacklist = c,d + +# with container_limit_x = r +# for containers of size x limit write requests per second to r. The container +# rate will be linearly interpolated from the values given. With the values +# below, a container of size 5 will get a rate of 75. +# container_ratelimit_0 = 100 +# container_ratelimit_10 = 50 +# container_ratelimit_50 = 20 + +# Similarly to the above container-level write limits, the following will limit +# container GET (listing) requests. +# container_listing_ratelimit_0 = 100 +# container_listing_ratelimit_10 = 50 +# container_listing_ratelimit_50 = 20 + +[filter:domain_remap] +use = egg:swift#domain_remap +# You can override the default log routing for this filter here: +# set log_name = domain_remap +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# storage_domain = example.com +# path_root = v1 + +# Browsers can convert a host header to lowercase, so check that reseller +# prefix on the account is the correct case. This is done by comparing the +# items in the reseller_prefixes config option to the found prefix. If they +# match except for case, the item from reseller_prefixes will be used +# instead of the found reseller prefix. When none match, the default reseller +# prefix is used. When no default reseller prefix is configured, any request +# with an account prefix not in that list will be ignored by this middleware. +# reseller_prefixes = AUTH +# default_reseller_prefix = + +[filter:catch_errors] +use = egg:swift#catch_errors +# You can override the default log routing for this filter here: +# set log_name = catch_errors +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:cname_lookup] +# Note: this middleware requires python-dnspython +use = egg:swift#cname_lookup +# You can override the default log routing for this filter here: +# set log_name = cname_lookup +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# Specify the storage_domain that match your cloud, multiple domains +# can be specified separated by a comma +# storage_domain = example.com +# +# lookup_depth = 1 + +# Note: Put staticweb just after your auth filter(s) in the pipeline +[filter:staticweb] +use = egg:swift#staticweb +# You can override the default log routing for this filter here: +# set log_name = staticweb +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log +# +# At times when it's impossible for staticweb to guess the outside +# endpoint correctly, the url_base may be used to supply the URL +# scheme and/or the host name (and port number) in order to generate +# redirects. +# Example values: +# http://www.example.com - redirect to www.example.com +# https: - changes the schema only +# https:// - same, changes the schema only +# //www.example.com:8080 - redirect www.example.com on port 8080 +# (schema unchanged) +# url_base = + +# Note: Put tempurl before dlo, slo and your auth filter(s) in the pipeline +[filter:tempurl] +use = egg:swift#tempurl +# The methods allowed with Temp URLs. +# methods = GET HEAD PUT POST DELETE +# +# The headers to remove from incoming requests. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. incoming_allow_headers is a list of exceptions to these +# removals. +# incoming_remove_headers = x-timestamp +# +# The headers allowed as exceptions to incoming_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# incoming_allow_headers = +# +# The headers to remove from outgoing responses. Simply a whitespace delimited +# list of header names and names can optionally end with '*' to indicate a +# prefix match. outgoing_allow_headers is a list of exceptions to these +# removals. +# outgoing_remove_headers = x-object-meta-* +# +# The headers allowed as exceptions to outgoing_remove_headers. Simply a +# whitespace delimited list of header names and names can optionally end with +# '*' to indicate a prefix match. +# outgoing_allow_headers = x-object-meta-public-* + +# Note: Put formpost just before your auth filter(s) in the pipeline +[filter:formpost] +use = egg:swift#formpost + +# Note: Just needs to be placed before the proxy-server in the pipeline. +[filter:name_check] +use = egg:swift#name_check +# forbidden_chars = '"`<> +# maximum_length = 255 +# forbidden_regexp = /\./|/\.\./|/\.$|/\.\.$ + +[filter:list-endpoints] +use = egg:swift#list_endpoints +# list_endpoints_path = /endpoints/ + +[filter:proxy-logging] +use = egg:swift#proxy_logging +# If not set, logging directives from [DEFAULT] without "access_" will be used +# access_log_name = swift +# access_log_facility = LOG_LOCAL0 +# access_log_level = INFO +# access_log_address = /dev/log +# +# If set, access_log_udp_host will override access_log_address +# access_log_udp_host = +# access_log_udp_port = 514 +# +# You can use log_statsd_* from [DEFAULT] or override them here: +# access_log_statsd_host = +# access_log_statsd_port = 8125 +# access_log_statsd_default_sample_rate = 1.0 +# access_log_statsd_sample_rate_factor = 1.0 +# access_log_statsd_metric_prefix = +# access_log_headers = false +# +# If access_log_headers is True and access_log_headers_only is set only +# these headers are logged. Multiple headers can be defined as comma separated +# list like this: access_log_headers_only = Host, X-Object-Meta-Mtime +# access_log_headers_only = +# +# By default, the X-Auth-Token is logged. To obscure the value, +# set reveal_sensitive_prefix to the number of characters to log. +# For example, if set to 12, only the first 12 characters of the +# token appear in the log. An unauthorized access of the log file +# won't allow unauthorized usage of the token. However, the first +# 12 or so characters is unique enough that you can trace/debug +# token usage. Set to 0 to suppress the token completely (replaced +# by '...' in the log). +# Note: reveal_sensitive_prefix will not affect the value +# logged with access_log_headers=True. +# reveal_sensitive_prefix = 16 +# +# What HTTP methods are allowed for StatsD logging (comma-sep); request methods +# not in this list will have "BAD_METHOD" for the portion of the metric. +# log_statsd_valid_http_methods = GET,HEAD,POST,PUT,DELETE,COPY,OPTIONS +# +# Note: The double proxy-logging in the pipeline is not a mistake. The +# left-most proxy-logging is there to log requests that were handled in +# middleware and never made it through to the right-most middleware (and +# proxy server). Double logging is prevented for normal requests. See +# proxy-logging docs. + +# Note: Put before both ratelimit and auth in the pipeline. +[filter:bulk] +use = egg:swift#bulk +# max_containers_per_extraction = 10000 +# max_failed_extractions = 1000 +# max_deletes_per_request = 10000 +# max_failed_deletes = 1000 +# +# In order to keep a connection active during a potentially long bulk request, +# Swift may return whitespace prepended to the actual response body. This +# whitespace will be yielded no more than every yield_frequency seconds. +# yield_frequency = 10 +# +# Note: The following parameter is used during a bulk delete of objects and +# their container. This would frequently fail because it is very likely +# that all replicated objects have not been deleted by the time the middleware got a +# successful response. It can be configured the number of retries. And the +# number of seconds to wait between each retry will be 1.5**retry +# delete_container_retry_count = 0 +# +# To speed up the bulk delete process, multiple deletes may be executed in +# parallel. Avoid setting this too high, as it gives clients a force multiplier +# which may be used in DoS attacks. The suggested range is between 2 and 10. +# delete_concurrency = 2 + +# Note: Put after auth and staticweb in the pipeline. +[filter:slo] +use = egg:swift#slo +# max_manifest_segments = 1000 +# max_manifest_size = 2097152 +# +# Rate limiting applies only to segments smaller than this size (bytes). +# rate_limit_under_size = 1048576 +# +# Start rate-limiting SLO segment serving after the Nth small segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 +# +# When deleting with ?multipart-manifest=delete, multiple deletes may be +# executed in parallel. Avoid setting this too high, as it gives clients a +# force multiplier which may be used in DoS attacks. The suggested range is +# between 2 and 10. +# delete_concurrency = 2 + +# Note: Put after auth and staticweb in the pipeline. +# If you don't put it in the pipeline, it will be inserted for you. +[filter:dlo] +use = egg:swift#dlo +# Start rate-limiting DLO segment serving after the Nth segment of a +# segmented object. +# rate_limit_after_segment = 10 +# +# Once segment rate-limiting kicks in for an object, limit segments served +# to N per second. 0 means no rate-limiting. +# rate_limit_segments_per_sec = 1 +# +# Time limit on GET requests (seconds) +# max_get_time = 86400 + +# Note: Put after auth in the pipeline. +[filter:container-quotas] +use = egg:swift#container_quotas + +# Note: Put after auth in the pipeline. +[filter:account-quotas] +use = egg:swift#account_quotas + +[filter:gatekeeper] +use = egg:swift#gatekeeper +# Set this to false if you want to allow clients to set arbitrary X-Timestamps +# on uploaded objects. This may be used to preserve timestamps when migrating +# from a previous storage system, but risks allowing users to upload +# difficult-to-delete data. +# shunt_inbound_x_timestamp = true +# +# You can override the default log routing for this filter here: +# set log_name = gatekeeper +# set log_facility = LOG_LOCAL0 +# set log_level = INFO +# set log_headers = false +# set log_address = /dev/log + +[filter:container_sync] +use = egg:swift#container_sync +# Set this to false if you want to disallow any full url values to be set for +# any new X-Container-Sync-To headers. This will keep any new full urls from +# coming in, but won't change any existing values already in the cluster. +# Updating those will have to be done manually, as knowing what the true realm +# endpoint should be cannot always be guessed. +# allow_full_urls = true +# Set this to specify this clusters //realm/cluster as "current" in /info +# current = //REALM/CLUSTER + +# Note: Put it at the beginning of the pipeline to profile all middleware. But +# it is safer to put this after catch_errors, gatekeeper and healthcheck. +[filter:xprofile] +use = egg:swift#xprofile +# This option enable you to switch profilers which should inherit from python +# standard profiler. Currently the supported value can be 'cProfile', +# 'eventlet.green.profile' etc. +# profile_module = eventlet.green.profile +# +# This prefix will be used to combine process ID and timestamp to name the +# profile data file. Make sure the executing user has permission to write +# into this path (missing path segments will be created, if necessary). +# If you enable profiling in more than one type of daemon, you must override +# it with an unique value like: /var/log/swift/profile/proxy.profile +# log_filename_prefix = /tmp/log/swift/profile/default.profile +# +# the profile data will be dumped to local disk based on above naming rule +# in this interval. +# dump_interval = 5.0 +# +# Be careful, this option will enable profiler to dump data into the file with +# time stamp which means there will be lots of files piled up in the directory. +# dump_timestamp = false +# +# This is the path of the URL to access the mini web UI. +# path = /__profile__ +# +# Clear the data when the wsgi server shutdown. +# flush_at_shutdown = false +# +# unwind the iterator of applications +# unwind = false + +# Note: Put after slo, dlo in the pipeline. +# If you don't put it in the pipeline, it will be inserted automatically. +[filter:versioned_writes] +use = egg:swift#versioned_writes +# Enables using versioned writes middleware and exposing configuration +# settings via HTTP GET /info. +# WARNING: Setting this option bypasses the "allow_versions" option +# in the container configuration file, which will be eventually +# deprecated. See documentation for more details. +# allow_versioned_writes = false + +# Note: Put after auth and before dlo and slo middlewares. +# If you don't put it in the pipeline, it will be inserted for you. +[filter:copy] +use = egg:swift#copy +# Set object_post_as_copy = false to turn on fast posts where only the metadata +# changes are stored anew and the original data file is kept in place. This +# makes for quicker posts. +# When object_post_as_copy is set to True, a POST request will be transformed +# into a COPY request where source and destination objects are the same. +# object_post_as_copy = true + +# Note: To enable encryption, add the following 2 dependent pieces of crypto +# middleware to the proxy-server pipeline. They should be to the right of all +# other middleware apart from the final proxy-logging middleware, and in the +# order shown in this example: +# keymaster encryption proxy-logging proxy-server +[filter:keymaster] +use = egg:swift#keymaster + +# Sets the root secret from which encryption keys are derived. This must be set +# before first use to a value that is a base64 encoding of at least 32 bytes. +# The security of all encrypted data critically depends on this key, therefore +# it should be set to a high-entropy value. For example, a suitable value may +# be obtained by base-64 encoding a 32 byte (or longer) value generated by a +# cryptographically secure random number generator. Changing the root secret is +# likely to result in data loss. +encryption_root_secret = changeme + +# Sets the path from which the keymaster config options should be read. This +# allows multiple processes which need to be encryption-aware (for example, +# proxy-server and container-sync) to share the same config file, ensuring +# that the encryption keys used are the same. The format expected is similar +# to other config files, with a single [keymaster] section and a single +# encryption_root_secret option. If this option is set, the root secret +# MUST NOT be set in proxy-server.conf. +# keymaster_config_path = + +[filter:encryption] +use = egg:swift#encryption + +# By default all PUT or POST'ed object data and/or metadata will be encrypted. +# Encryption of new data and/or metadata may be disabled by setting +# disable_encryption to True. However, all encryption middleware should remain +# in the pipeline in order for existing encrypted data to be read. +# disable_encryption = False + diff --git a/swift/templates/etc/_swift.conf.tpl b/swift/templates/etc/_swift.conf.tpl new file mode 100644 index 00000000..ba3b7ef2 --- /dev/null +++ b/swift/templates/etc/_swift.conf.tpl @@ -0,0 +1,185 @@ +[swift-hash] + +# swift_hash_path_suffix and swift_hash_path_prefix are used as part of the +# hashing algorithm when determining data placement in the cluster. +# These values should remain secret and MUST NOT change +# once a cluster has been deployed. +# Use only printable chars (python -c "import string; print(string.printable)") + +swift_hash_path_suffix = {{ .Values.swift.swift_hash_path_suffix }} +swift_hash_path_prefix = {{ .Values.swift.swift_hash_path_prefix }} + +# storage policies are defined here and determine various characteristics +# about how objects are stored and treated. Policies are specified by name on +# a per container basis. Names are case-insensitive. The policy index is +# specified in the section header and is used internally. The policy with +# index 0 is always used for legacy containers and can be given a name for use +# in metadata however the ring file name will always be 'object.ring.gz' for +# backwards compatibility. If no policies are defined a policy with index 0 +# will be automatically created for backwards compatibility and given the name +# Policy-0. A default policy is used when creating new containers when no +# policy is specified in the request. If no other policies are defined the +# policy with index 0 will be declared the default. If multiple policies are +# defined you must define a policy with index 0 and you must specify a +# default. It is recommended you always define a section for +# storage-policy:0. Aliases are not required when defining a storage policy. +# +# A 'policy_type' argument is also supported but is not mandatory. Default +# policy type 'replication' is used when 'policy_type' is unspecified. +[storage-policy:0] +name = {{ .Values.swift.storage_policy_name }} +default = {{ .Values.swift.storage_default }} +#policy_type = replication +aliases = yellow, orange + +# the following section would declare a policy called 'silver', the number of +# replicas will be determined by how the ring is built. In this example the +# 'silver' policy could have a lower or higher # of replicas than the +# 'Policy-0' policy above. The ring filename will be 'object-1.ring.gz'. You +# may only specify one storage policy section as the default. If you changed +# this section to specify 'silver' as the default, when a client created a new +# container w/o a policy specified, it will get the 'silver' policy because +# this config has specified it as the default. However if a legacy container +# (one created with a pre-policy version of swift) is accessed, it is known +# implicitly to be assigned to the policy with index 0 as opposed to the +# current default. Note that even without specifying any aliases, a policy +# always has at least the default name stored in aliases because this field is +# used to contain all human readable names for a storage policy. +# +#[storage-policy:1] +#name = silver +#policy_type = replication + +# The following declares a storage policy of type 'erasure_coding' which uses +# Erasure Coding for data reliability. Please refer to Swift documentation for +# details on how the 'erasure_coding' storage policy is implemented. +# +# Swift uses PyECLib, a Python Erasure coding API library, for encode/decode +# operations. Please refer to Swift documentation for details on how to +# install PyECLib. +# +# When defining an EC policy, 'policy_type' needs to be 'erasure_coding' and +# EC configuration parameters 'ec_type', 'ec_num_data_fragments' and +# 'ec_num_parity_fragments' must be specified. 'ec_type' is chosen from the +# list of EC backends supported by PyECLib. The ring configured for the +# storage policy must have it's "replica" count configured to +# 'ec_num_data_fragments' + 'ec_num_parity_fragments' - this requirement is +# validated when services start. 'ec_object_segment_size' is the amount of +# data that will be buffered up before feeding a segment into the +# encoder/decoder. More information about these configuration options and +# supported `ec_type` schemes is available in the Swift documentation. Please +# refer to Swift documentation for details on how to configure EC policies. +# +# The example 'deepfreeze10-4' policy defined below is a _sample_ +# configuration with an alias of 'df10-4' as well as 10 'data' and 4 'parity' +# fragments. 'ec_type' defines the Erasure Coding scheme. +# 'liberasurecode_rs_vand' (Reed-Solomon Vandermonde) is used as an example +# below. +# +#[storage-policy:2] +#name = deepfreeze10-4 +#aliases = df10-4 +#policy_type = erasure_coding +#ec_type = liberasurecode_rs_vand +#ec_num_data_fragments = 10 +#ec_num_parity_fragments = 4 +#ec_object_segment_size = 1048576 + + +# The swift-constraints section sets the basic constraints on data +# saved in the swift cluster. These constraints are automatically +# published by the proxy server in responses to /info requests. + +[swift-constraints] + +# max_file_size is the largest "normal" object that can be saved in +# the cluster. This is also the limit on the size of each segment of +# a "large" object when using the large object manifest support. +# This value is set in bytes. Setting it to lower than 1MiB will cause +# some tests to fail. It is STRONGLY recommended to leave this value at +# the default (5 * 2**30 + 2). + +#max_file_size = 5368709122 + + +# max_meta_name_length is the max number of bytes in the utf8 encoding +# of the name portion of a metadata header. + +#max_meta_name_length = 128 + + +# max_meta_value_length is the max number of bytes in the utf8 encoding +# of a metadata value + +#max_meta_value_length = 256 + + +# max_meta_count is the max number of metadata keys that can be stored +# on a single account, container, or object + +#max_meta_count = 90 + + +# max_meta_overall_size is the max number of bytes in the utf8 encoding +# of the metadata (keys + values) + +#max_meta_overall_size = 4096 + +# max_header_size is the max number of bytes in the utf8 encoding of each +# header. Using 8192 as default because eventlet use 8192 as max size of +# header line. This value may need to be increased when using identity +# v3 API tokens including more than 7 catalog entries. +# See also include_service_catalog in proxy-server.conf-sample +# (documented in overview_auth.rst) + +#max_header_size = 8192 + + +# By default the maximum number of allowed headers depends on the number of max +# allowed metadata settings plus a default value of 36 for swift internally +# generated headers and regular http headers. If for some reason this is not +# enough (custom middleware for example) it can be increased with the +# extra_header_count constraint. + +#extra_header_count = 0 + + +# max_object_name_length is the max number of bytes in the utf8 encoding +# of an object name + +#max_object_name_length = 1024 + + +# container_listing_limit is the default (and max) number of items +# returned for a container listing request + +#container_listing_limit = 10000 + + +# account_listing_limit is the default (and max) number of items returned +# for an account listing request +#account_listing_limit = 10000 + + +# max_account_name_length is the max number of bytes in the utf8 encoding +# of an account name + +#max_account_name_length = 256 + + +# max_container_name_length is the max number of bytes in the utf8 encoding +# of a container name + +#max_container_name_length = 256 + + +# By default all REST API calls should use "v1" or "v1.0" as the version string, +# for example "/v1/account". This can be manually overridden to make this +# backward-compatible, in case a different version string has been used before. +# Use a comma-separated list in case of multiple allowed versions, for example +# valid_api_versions = v0,v1,v2 +# This is only enforced for account, container and object requests. The allowed +# api versions are by default excluded from /info. + +# valid_api_versions = v1,v1.0 + diff --git a/swift/templates/job-db-init.yaml b/swift/templates/job-db-init.yaml new file mode 100644 index 00000000..0493ab1e --- /dev/null +++ b/swift/templates/job-db-init.yaml @@ -0,0 +1,79 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.db_init }} +apiVersion: batch/v1 +kind: Job +metadata: + name: swift-db-init +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: swift-db-init + image: {{ .Values.images.db_init }} + imagePullPolicy: {{ .Values.images.pull_policy }} + env: + - name: ROOT_DB_CONNECTION + valueFrom: + secretKeyRef: + name: {{ .Values.database.secret.root }} + key: DB_CONNECTION + - name: OPENSTACK_CONFIG_FILE + value: /etc/swift/swift.conf + - name: OPENSTACK_CONFIG_DB_SECTION + value: database + - name: OPENSTACK_CONFIG_DB_KEY + value: connection + {{- if .Values.resources.enabled }} + resources: + limits: + cpu: {{ .Values.resources.jobs.init.limits.cpu | quote }} + memory: {{ .Values.resources.jobs.init.limits.memory | quote }} + requests: + cpu: {{ .Values.resources.jobs.init.requests.cpu | quote }} + memory: {{ .Values.resources.jobs.init.requests.memory | quote }} + {{- end }} + command: + - python + - /tmp/db-create.py + volumeMounts: + - name: configmap-bin + mountPath: /tmp/db-create.py + subPath: db-create.py + readOnly: true + - name: pod-etc-service + mountPath: /etc/swift + - name: service-config + mountPath: /etc/swift/swift.conf + subPath: swift.conf + readOnly: true + volumes: + - name: configmap-bin + configMap: + name: swift-bin + - name: pod-etc-service + emptyDir: {} + - name: service-config + configMap: + name: swift-etc diff --git a/swift/templates/job-db-sync.yaml b/swift/templates/job-db-sync.yaml new file mode 100644 index 00000000..c047adcf --- /dev/null +++ b/swift/templates/job-db-sync.yaml @@ -0,0 +1,60 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.db_sync }} +apiVersion: batch/v1 +kind: Job +metadata: + name: swift-db-sync +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: swift-db-sync + image: {{ .Values.images.db_sync }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{- if .Values.resources.enabled }} + resources: + limits: + cpu: {{ .Values.resources.jobs.db_sync.limits.cpu | quote }} + memory: {{ .Values.resources.jobs.db_sync.limits.memory | quote }} + requests: + cpu: {{ .Values.resources.jobs.db_sync.requests.cpu | quote }} + memory: {{ .Values.resources.jobs.db_sync.requests.memory | quote }} + {{- end }} + command: + - bash + - /tmp/db-sync.sh + volumeMounts: + - name: pod-etc-swift + mountPath: /etc/swift + - name: swiftconf + mountPath: /etc/swift/swift.conf + subPath: swift.conf + readOnly: true + volumes: + - name: pod-etc-swift + emptyDir: {} + - name: swiftconf + configMap: + name: swift-etc diff --git a/swift/templates/job-ks-entrypoint.yaml b/swift/templates/job-ks-entrypoint.yaml new file mode 100644 index 00000000..a5dfa279 --- /dev/null +++ b/swift/templates/job-ks-entrypoint.yaml @@ -0,0 +1,79 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default values for swift. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + + +{{- $envAll := . }} +{{- $ksAdminSecret := $envAll.Values.keystone.admin_secret | default "swift-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_endpoints }} +apiVersion: batch/v1 +kind: Job +metadata: + name: swift-ks-endpoints +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + {{- range $key1, $osServiceType := tuple "proxy" }} + {{- range $key2, $osServiceEndPoint := tuple "admin" "internal" "public" }} + - name: {{ $osServiceType }}-ks-endpoints-{{ $osServiceEndPoint }} + image: {{ $envAll.Values.images.ks_endpoints }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.swift_ks_endpoints.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.swift_ks_endpoints.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.swift_ks_endpoints.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.swift_ks_endpoints.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-endpoints.sh + volumeMounts: + - name: ks-endpoints-sh + mountPath: /tmp/ks-endpoints.sh + subPath: ks-endpoints.sh + readOnly: true + env: + +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SVC_ENDPOINT + value: {{ $osServiceEndPoint }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} + - name: OS_SERVICE_ENDPOINT + value: {{ tuple $osServiceType $osServiceEndPoint "api" $envAll | include "helm-toolkit.endpoint_type_lookup_addr" }} +{{- end }} +{{- end }} + volumes: + - name: ks-endpoints-sh + configMap: + name: swift-bin \ No newline at end of file diff --git a/swift/templates/job-ks-service.yaml b/swift/templates/job-ks-service.yaml new file mode 100644 index 00000000..d340ee6d --- /dev/null +++ b/swift/templates/job-ks-service.yaml @@ -0,0 +1,68 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $envAll := . }} +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "swift-env-keystone-admin" }} +{{- $dependencies := .Values.dependencies.ks_service }} +apiVersion: batch/v1 +kind: Job +metadata: + name: swift-ks-service +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: +{{- range $key1, $osServiceType := tuple "proxy" }} + - name: {{ $osServiceType }}-ks-service-registration + image: {{ $envAll.Values.images.ks_service }} + imagePullPolicy: {{ $envAll.Values.images.pull_policy }} + {{- if $envAll.Values.resources.enabled }} + resources: + requests: + memory: {{ $envAll.Values.resources.swift_ks_service.requests.memory | quote }} + cpu: {{ $envAll.Values.resources.swift_ks_service.requests.cpu | quote }} + limits: + memory: {{ $envAll.Values.resources.swift_ks_service.limits.memory | quote }} + cpu: {{ $envAll.Values.resources.swift_ks_service.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-service.sh + volumeMounts: + - name: ks-service-sh + mountPath: /tmp/ks-service.sh + subPath: ks-service.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: OS_SERVICE_NAME + value: {{ tuple $osServiceType $envAll | include "helm-toolkit.endpoint_name_lookup" }} + - name: OS_SERVICE_TYPE + value: {{ $osServiceType }} +{{- end }} + volumes: + - name: ks-service-sh + configMap: + name: swift-bin + diff --git a/swift/templates/job-ks-user.yaml b/swift/templates/job-ks-user.yaml new file mode 100644 index 00000000..c92cbc0b --- /dev/null +++ b/swift/templates/job-ks-user.yaml @@ -0,0 +1,69 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +{{- $ksAdminSecret := .Values.keystone.admin_secret | default "swift-env-keystone-admin" }} +{{- $ksUserSecret := .Values.keystone.user_secret | default "swift-env-keystone-user" }} +{{- $envAll := . }} +{{- $dependencies := .Values.dependencies.ks_user }} +apiVersion: batch/v1 +kind: Job +metadata: + name: swift-ks-user +spec: + template: + metadata: + annotations: + pod.beta.kubernetes.io/init-containers: '[ +{{ tuple $envAll $dependencies | include "helm-toolkit.kubernetes_entrypoint_init_container" | indent 10 }} + ]' + spec: + restartPolicy: OnFailure + nodeSelector: + {{ .Values.labels.node_selector_key }}: {{ .Values.labels.node_selector_value }} + containers: + - name: swift-ks-user + image: {{ .Values.images.ks_user }} + imagePullPolicy: {{ .Values.images.pull_policy }} + {{- if .Values.resources.enabled }} + resources: + requests: + memory: {{ .Values.resources.swift_ks_user.requests.memory | quote }} + cpu: {{ .Values.resources.swift_ks_user.requests.cpu | quote }} + limits: + memory: {{ .Values.resources.swift_ks_user.limits.memory | quote }} + cpu: {{ .Values.resources.swift_ks_user.limits.cpu | quote }} + {{- end }} + command: + - bash + - /tmp/ks-user.sh + volumeMounts: + - name: ks-user-sh + mountPath: /tmp/ks-user.sh + subPath: ks-user.sh + readOnly: true + env: +{{- with $env := dict "ksUserSecret" $ksAdminSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_SERVICE_NAME + value: "swift" +{{- with $env := dict "ksUserSecret" $ksUserSecret }} +{{- include "helm-toolkit.keystone_openrc_env_vars" $env | indent 12 }} +{{- end }} + - name: SERVICE_OS_ROLE + value: {{ .Values.keystone.swift_user_role | quote }} + volumes: + - name: ks-user-sh + configMap: + name: swift-bin diff --git a/swift/templates/secret-db-root.env.yaml b/swift/templates/secret-db-root.env.yaml new file mode 100644 index 00000000..ec490e69 --- /dev/null +++ b/swift/templates/secret-db-root.env.yaml @@ -0,0 +1,2 @@ +{{- $envAll := . }} +{{- include "helm-toolkit.secret_db_root" $envAll }} diff --git a/swift/templates/secret-keystone-admin.env.yaml b/swift/templates/secret-keystone-admin.env.yaml new file mode 100644 index 00000000..490af635 --- /dev/null +++ b/swift/templates/secret-keystone-admin.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: swift-env-keystone-admin +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.admin_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.admin_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.admin_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.admin_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.admin_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.admin_password | b64enc | indent 4 }} diff --git a/swift/templates/secret-keystone-user.env.yaml b/swift/templates/secret-keystone-user.env.yaml new file mode 100644 index 00000000..c3dd53d2 --- /dev/null +++ b/swift/templates/secret-keystone-user.env.yaml @@ -0,0 +1,34 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Secret +metadata: + name: swift-env-keystone-user +type: Opaque +data: + OS_AUTH_URL: | +{{ .Values.keystone.auth_url | b64enc | indent 4 }} + OS_REGION_NAME: | +{{ .Values.keystone.swift_region_name | b64enc | indent 4 }} + OS_PROJECT_DOMAIN_NAME: | +{{ .Values.keystone.swift_project_domain | b64enc | indent 4 }} + OS_PROJECT_NAME: | +{{ .Values.keystone.swift_project_name | b64enc | indent 4 }} + OS_USER_DOMAIN_NAME: | +{{ .Values.keystone.swift_user_domain | b64enc | indent 4 }} + OS_USERNAME: | +{{ .Values.keystone.swift_user | b64enc | indent 4 }} + OS_PASSWORD: | +{{ .Values.keystone.swift_password | b64enc | indent 4 }} diff --git a/swift/templates/service-api.yaml b/swift/templates/service-api.yaml new file mode 100644 index 00000000..8a64e58b --- /dev/null +++ b/swift/templates/service-api.yaml @@ -0,0 +1,24 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: proxy +spec: + ports: + - name: {{ .Values.endpoints.proxy.hosts.default }} + port: {{ .Values.endpoints.proxy.port.api }} + selector: + app: swift-proxy diff --git a/swift/templates/service.yaml b/swift/templates/service.yaml new file mode 100644 index 00000000..a6820622 --- /dev/null +++ b/swift/templates/service.yaml @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +apiVersion: v1 +kind: Service +metadata: + name: proxy +spec: + ports: + - name: {{ .Values.endpoints.proxy.hosts.default }} + port: {{ .Values.endpoints.proxy.port.api }} + selector: + app: swift-proxy diff --git a/swift/values.yaml b/swift/values.yaml new file mode 100644 index 00000000..a599ad11 --- /dev/null +++ b/swift/values.yaml @@ -0,0 +1,280 @@ +# Copyright 2017 The Openstack-Helm Authors. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Default values for swift. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicas: + proxy: 1 + container: 1 + account: 1 + object: 1 + +development: + enabled: false + +storage: + proxy_directory: /var/lib/openstack-helm/ceph/proxy + account_directory: /var/lib/openstack-helm/ceph/account + container_directory: /var/lib/openstack-helm/ceph/container + object_directory: /var/lib/openstack-helm/ceph/object + +labels: + node_selector_key: openstack-control-plane + node_selector_value: enabled + +images: + db_init: quay.io/stackanetes/stackanetes-kolla-toolbox:newton + db_sync: kolla/centos-source-swift-proxy-server:3.0.2 + proxy: kolla/centos-source-swift-proxy-server:3.0.2 + account: kolla/centos-source-swift-account:3.0.2 + container: kolla/centos-source-swift-container:3.0.2 + object: kolla/centos-source-swift-object:3.0.2 + dep_check: quay.io/stackanetes/kubernetes-entrypoint:v0.1.1 + pull_policy: "IfNotPresent" + +upgrades: + revision_history: 3 + pod_replacement_strategy: RollingUpdate + rolling_update: + max_unavailable: 1 + max_surge: 3 + +keystone: + auth_uri: "http://keystone-api:5000" + auth_url: "http://keystone-api:35357" + admin_user: "admin" + admin_user_domain: "default" + admin_password: "password" + admin_project_name: "admin" + admin_project_domain: "default" + admin_region_name: "RegionOne" + + swift_auth_plugin: "password" + swift_user: "swift" + swift_user_domain: "default" + swift_user_role: "admin" + swift_password: "password" + swift_project_name: "service" + swift_project_domain: "default" + swift_region_name: "RegionOne" + +database: + secret: + root: swift-db-root + address: mariadb + port: 3306 + root_user: root + root_password: password + swift_database_name: swift + swift_password: password + swift_user: swift + +ceph: + enabled: true + monitors: [] + swift_user: "admin" + # a null value for the keyring will + # attempt to use the key from + # common/secrets/ceph-client-key + swift_keyring: null + +swift: + user: swift + dir: /etc/swift + max_header_size: 32768 + swift_hash_path_suffix: changeme + swift_hash_path_prefix: changeme + storage_policy_name: Policy-0 + storage_default: yes + part_power: 10 + min_part_hours: 1 + account_server: + recon_cache_path: /var/cache/swift + mount_check: true + devices: /srv/node + bind_port: 6202 + container_server: + recon_cache_path: /var/cache/swift + mount_check: true + devices: /srv/node + bind_port: 6201 + object_server: + recon_cache_path: /var/cache/swift + recon_lock_path: /var/lock + mount_check: true + devices: /srv/node + bind_port: 6200 + proxy_server: + bind_port: 8080 + +resources: + enabled: true + swift_proxy: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + swift_account: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + swift_object: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + swift_container: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + swift_ks_user: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + swift_ks_endpoints: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + swift_ks_service: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + jobs: + db_sync: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + init: + limits: + memory: "128Mi" + cpu: "500m" + requests: + memory: "128Mi" + cpu: "500m" + +backends: + enabled: + - rbd1 + rbd1: + secret: null + user: "admin" + pool: "volumes" + +dependencies: + db_init: + jobs: + - mariadb-seed + service: + - mariadb + db_sync: + jobs: + - swift-db-init + service: + - mariadb + ks_user: + service: + - keystone-api + ks_service: + service: + - keystone-api + ks_endpoints: + jobs: + - swift-ks-service + service: + - keystone-api + proxy: + jobs: + - swift-db-sync + - swift-ks-user + - swift-ks-endpoints + service: + - mariadb + - rabbitmq + - keystone-api + account: + jobs: + - swift-db-sync + - swift-ks-user + - swift-ks-endpoints + service: + - keystone-api + - swift-proxy + container: + jobs: + - swift-db-sync + - swift-ks-user + - swift-ks-endpoints + service: + - keystone-api + - swift-proxy + object: + jobs: + - swift-db-sync + - swift-ks-user + - swift-ks-endpoints + service: + - keystone-api + - swift-proxy + +# We use a different layout of the endpoints here to account for versioning +# this swaps the service name and type, and should be rolled out to other +# services. +endpoints: + identity: + name: keystone + hosts: + default: keystone-api + path: /v3 + scheme: 'http' + port: + admin: 35357 + api: 5000 + proxy: + name: swift-proxy + hosts: + default: object-store + path: '/v1/AUTH_%\(tenant_id\)s' + scheme: 'http' + port: + api: 8080 + +service: + api: + name: "swift-proxy" + port: 8080 + proto: "http" \ No newline at end of file