diff --git a/.gitignore b/.gitignore index 87940c57..1afae1a1 100644 --- a/.gitignore +++ b/.gitignore @@ -1,14 +1,15 @@ -# Created by .ignore support plugin (hsz.mobi) +# IDEA .idea +.editorconfig .vscode + + +# dependencies +*/node_modules/ +*/node/ +*/build +*/package-lock.json +*/build +.DS_Store +terraform-server/logs *.swp -*.swo -*.pid -*.log -*.pyc -static/.DS_Store -/.idea/* -test.py -test/ -*_beta/ -terraform-ui/node_modules \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 260569d9..e109fd51 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,24 +1,14 @@ -FROM python:2.7.18-slim -LABEL maintainer = "Webank CTB Team" +FROM ccr.ccs.tencentyun.com/webankpartners/terrafrom-base:v1.0.3 -RUN sed -i 's/deb.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list && \ - sed -i 's/security.debian.org/mirrors.aliyun.com/g' /etc/apt/sources.list && \ - mkdir /data && mkdir -p /app/wecube_plugins_terraform +ENV BASE_HOME=/app/terraform -WORKDIR /app/wecube_plugins_terraform/ +RUN mkdir -p $BASE_HOME $BASE_HOME/conf $BASE_HOME/logs -COPY . . +ADD build/start.sh $BASE_HOME/ +ADD build/stop.sh $BASE_HOME/ +ADD build/default.json $BASE_HOME/conf/ +ADD terraform-server/terraform-server $BASE_HOME/ +ADD ui/dist $BASE_HOME/public -RUN mkdir -p /usr/local/share/terraform/plugins && \ - tar -zxvf /app/wecube_plugins_terraform/plugins/registry.terraform.io.tar.gz -C /usr/local/share/terraform/plugins && \ - rm -rf /app/wecube_plugins_terraform/plugins/registry.terraform.io.tar.gz && \ - rm -rf /app/wecube_plugins_terraform/bin/terraform_0.15.5_linux_amd64.zip && \ - ls /app/wecube_plugins_terraform/bin && \ - \cp /app/wecube_plugins_terraform/bin/terraform /usr/bin/terraform && \ - ls -la && \ - apt update && apt-get -y install gcc python-dev && \ - pip install -i http://mirrors.aliyun.com/pypi/simple/ --trusted-host mirrors.aliyun.com -r /app/wecube_plugins_terraform/requirements.txt && \ - chmod +x /app/wecube_plugins_terraform/bin/*.sh - -EXPOSE 8999 -CMD ["/app/wecube_plugins_terraform/bin/start.sh"] \ No newline at end of file +WORKDIR $BASE_HOME +ENTRYPOINT ["/bin/sh", "start.sh"] \ No newline at end of file diff --git a/Makefile b/Makefile index 724ee167..53307669 100644 --- a/Makefile +++ b/Makefile @@ -1,39 +1,38 @@ current_dir=$(shell pwd) -project_name=$(shell basename "${current_dir}") -version=${PLUGIN_VERSION} +version=$(PLUGIN_VERSION) +project_dir=$(shell basename "${current_dir}") clean: - rm -rf package + rm -rf terraform-server/terraform-server + rm -rf ui/dist + rm -rf ui/plugin +build: clean + chmod +x ./build/*.sh + docker run --rm -v $(current_dir):/go/src/github.com/WeBankPartners/$(project_dir) --name build_$(project_dir) ccr.ccs.tencentyun.com/webankpartners/golang-ext:v1.15.6 /bin/bash /go/src/github.com/WeBankPartners/$(project_dir)/build/build-server.sh + ./build/build-ui.sh $(current_dir) -image: clean - cd bin && unzip -o terraform_0.15.5_linux_amd64.zip - docker build -t $(project_name):$(version) . +image: build + docker build -t $(project_dir):$(version) . package: image - rm -rf package - mkdir -p package - cd terraform-ui && npm --registry https://registry.npm.taobao.org install --unsafe-perm - cd terraform-ui && npm rebuild node-sass - cd terraform-ui && npm run plugin - cd terraform-ui/dist && zip -9 -r ui.zip . - cd package && mv ../terraform-ui/dist/ui.zip . - cp doc/init.sql package/init.sql - cat doc/init_data.sql >> package/init.sql - cat doc/update.sql >> package/init.sql - cd package && sed -i 's/{{PLUGIN_VERSION}}/$(version)/' ../register.xml - cd package && sed -i 's/{{IMAGENAME}}/$(project_name):$(version)/g' ../register.xml - cd package && sed -i 's/{{CONTAINERNAME}}/$(project_name)-$(version)/g' ../register.xml - cd package && docker save -o image.tar $(project_name):$(version) - cp register.xml package/ - cd package && zip -9 $(project_name)-$(version).zip image.tar register.xml init.sql ui.zip - cd package && rm -f image.tar - docker rmi $(project_name):$(version) + mkdir -p plugin + cp -r ui/dist/* plugin/ + zip -r ui.zip plugin + rm -rf plugin + cp build/register.xml ./ + cp wiki/init.sql ./init.sql + sed -i "s~{{PLUGIN_VERSION}}~$(version)~g" ./register.xml + sed -i "s~{{REPOSITORY}}~$(project_dir)~g" ./register.xml + docker save -o image.tar $(project_dir):$(version) + zip $(project_dir)-$(version).zip image.tar init.sql register.xml ui.zip + rm -f register.xml init.sql ui.zip + rm -rf ./*.tar + docker rmi $(project_dir):$(version) upload: package - $(eval container_id:=$(shell docker run -v $(current_dir)/package:/package -itd --entrypoint=/bin/sh minio/mc)) + $(eval container_id:=$(shell docker run -v $(current_dir):/package -itd --entrypoint=/bin/sh minio/mc)) docker exec $(container_id) mc config host add wecubeS3 $(s3_server_url) $(s3_access_key) $(s3_secret_key) wecubeS3 - docker exec $(container_id) mc cp /package/$(project_name)-$(version).zip wecubeS3/wecube-plugin-package-bucket - docker stop $(container_id) + docker exec $(container_id) mc cp /package/$(project_dir)-$(version).zip wecubeS3/wecube-plugin-package-bucket docker rm -f $(container_id) - rm -rf $(project_name)-$(version).zip + rm -rf $(project_dir)-$(version).zip \ No newline at end of file diff --git a/apps/api/__init__.py b/apps/api/__init__.py deleted file mode 100644 index 114a0edd..00000000 --- a/apps/api/__init__.py +++ /dev/null @@ -1,218 +0,0 @@ - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/apps/api/apibase.py b/apps/api/apibase.py deleted file mode 100644 index 7f9d85bd..00000000 --- a/apps/api/apibase.py +++ /dev/null @@ -1,801 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import base64 -import json -import traceback -from lib.logs import logger -from lib.uuid_util import get_uuid -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from apps.common.convert_keys import read_output -from apps.common.convert_keys import output_values -from apps.common.convert_keys import output_line -from apps.common.convert_keys import convert_extend_propertys -from apps.background.lib.drivers.terraform_operate import TerraformResource -from apps.api.configer.provider import ProviderApi -from apps.background.resource.configr.history import HistoryObject -from apps.background.resource.configr.resource import ResourceObject -from apps.background.resource.configr.value_config import ValueConfigObject -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.conductor.resource import ResourceConductor -from apps.api.conductor.valueReverse import ValueResetConductor - - -def fetech_property(instance_define, define_columns): - res = {} - for key, value in define_columns.items(): - if "." in key: - _keys = key.split(".") - tmp = instance_define - for x_key in _keys: - try: - x_key = int(x_key) - tmp = tmp[x_key] - except: - tmp = tmp.get(x_key) - - res[value] = tmp - else: - res[value] = instance_define.get(key) - - return res - - -class ApiBase(TerraformResource): - def __init__(self): - super(ApiBase, self).__init__() - self.resource_name = "" - self.resource_workspace = "" - self.owner_resource = "" - self.relation_resource = "" - self.resource_object = None - self.resource_keys_config = None - - def _flush_resobj(self): - self.resource_object = CrsObject(self.resource_name) - - def create_resource_exists(self, rid): - _exists_data = self.resource_object.ora_show(rid) - if _exists_data: - remote_status = self.flush_create_resource(rid=rid, origin_data=_exists_data) - - if _exists_data.get("is_deleted") and not remote_status: - logger.info("create resource check id exists and status is deleted, clear it") - HistoryObject().create(create_data={"id": rid, "resource": self.resource_name, - "ora_data": _exists_data}) - - self.resource_object.ora_delete(rid) - return - elif _exists_data.get("is_deleted") and remote_status: - logger.info("create resource remote exists, update db status") - _, data = self.resource_object.update(rid=rid, update_data={"is_deleted": 0}) - return data - elif not _exists_data.get("is_deleted") and not remote_status: - logger.info("db data exists, create resource remote not exists, clear db info") - HistoryObject().create(create_data={"id": rid, "resource": self.resource_name, - "ora_data": _exists_data}) - self.resource_object.ora_delete(rid) - return - else: - return _exists_data - else: - return - - def flush_create_resource(self, rid, origin_data): - path = self.get_workpath(rid=rid, provider=origin_data.get("provider"), - region=origin_data.get("region") - ) - - is_deleted = origin_data.get("is_deleted") - return self.refresh_remote_state(path, is_deleted=is_deleted) - - def refresh_remote_state(self, path, is_deleted=None): - if not os.path.exists(path): - if is_deleted: - logger.info("refresh resource path %s not exists" % path) - return [] - else: - raise local_exceptions.ResourceValidateError(self.resource_name, - "资源不是最新状态,请检查, 并确保数据文件状态正常") - - result = self.refresh(path) - return result.get("resources") - - def resource_info(self, provider): - ''' - - :param provider: - :return: - ''' - - self.resource_keys_config = ResourceObject().query_one(where_data={"provider": provider, - "resource_type": self.resource_name}) - if not self.resource_keys_config: - raise local_exceptions.ResourceConfigError("%s 资源未初始化完成配置" % self.resource_name) - - def values_config(self, provider): - ''' - - :param provider: - :return: - ''' - - return ValueConfigObject().resource_value_configs(provider, self.resource_name) - - def workspace_controller(self, rid, provider_name, region, provider_json): - _path = self.create_workpath(rid, - provider=provider_name, - region=region) - - if provider_json: - self.write_provider_define(_path, define_json=provider_json) - self.init_workspace(_path, provider_name) - - return _path - - def resource_filter_controller(self, provider_name, label_name, create_data, extend_info): - ''' - - :param provider_name: - :param label_name: - :param create_data: - :param extend_info: - :return: - ''' - - define_json, _ = ResourceConductor().conductor_apply_resource(provider=provider_name, - resource_name=self.resource_name, - label_name=label_name, - create_data=create_data, - extend_info=extend_info) - return define_json - - def read_output_controller(self, result): - result = self.formate_result(result) - logger.info(format_json_dumps(result)) - - res = self._read_output_result(result) - - if not res.get("resource_id"): - res["resource_id"] = self._fetch_id(result) - - return res - - def update_db_controller(self, rid, result, output_json): - resource_id = output_json.get("resource_id") - _update_data = {"status": "ok", "resource_id": resource_id, - "output_json": output_json, - "result_json": format_json_dumps(result)} - - return self.update_data(rid, data=_update_data) - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - 校验依赖的id的合法性 - :param kwargs: - :return: - ''' - - self.resource_info(provider) - return {} - - def formate_result(self, result): - ''' - 对 result 做处理 - :param result: - :return: - ''' - - return result - - def save_data(self, rid, provider, - provider_id, region, zone, - owner_id, relation_id, extend_info, - define_json, status, result_json, - create_data, **kwargs): - - ''' - - :param rid: - :param provider: - :param provider_id: - :param region: - :param zone: - :param owner_id: - :param relation_id: - :param extend_info: - :param define_json: - :param status: - :param result_json: - :param kwargs: - :return: - ''' - - create_data.update(kwargs) - if owner_id and self.owner_resource: - owner_id = self.owner_resource + "_" + owner_id - if relation_id and self.relation_resource: - relation_id = self.relation_resource + "_" + relation_id - - self.resource_object.create(create_data={"id": rid, "provider": provider, - "provider_id": provider_id, - "region": region, "zone": zone, - "resource_name": self.resource_name, - "owner_id": owner_id, - "relation_id": relation_id, - "propertys": create_data, - "status": status, - "extend_info": extend_info, - "define_json": define_json, - "result_json": result_json}) - - def update_data(self, rid, data): - ''' - - :param rid: - :param data: - :return: - ''' - if data.get("extend_info"): - if isinstance(data["extend_info"], dict): - data["extend_info"] = format_json_dumps(data["extend_info"]) - - return self.resource_object.update(rid, data) - - def rollback_data(self, rid): - ''' - - :param rid: - :return: - ''' - try: - self.resource_object.ora_delete(rid) - except: - logger.info(traceback.format_exc()) - - def _fetch_id(self, result): - ''' - - :param result: - :return: - ''' - - try: - _data = result.get("resources")[0] - _instances = _data.get("instances")[0] - _attributes = _instances.get("attributes") - return _attributes.get("id")[:62] or "0000000" - except: - logger.info(traceback.format_exc()) - raise ValueError("result can not fetch id") - - def _read_output_result(self, result): - ''' - 对于设置了output的属性, 则提取output输出值 - :param result: - :return: - ''' - - models = self.resource_keys_config["resource_output"] - if models: - result_output = result.get("outputs") - - ext_result = {} - for column, res in result_output.items(): - _out_dict = read_output(key=column, define=models.get(column), - result=res.get("value")) - ext_result.update(_out_dict) - - if "resource_id" in ext_result.keys(): - if len(ext_result["resource_id"]) > 63: - ext_result["resource_id"] = ext_result["resource_id"][:62] - logger.info("resource id length more than 64, will truncated for resource_id") - - logger.info(format_json_dumps(ext_result)) - return ext_result - - return {} - - def _run_create_and_read_result(self, rid, provider, region, provider_info, define_json): - ''' - - :param rid: - :param provider: - :param region: - :param provider_info: - :param define_json: - :return: - ''' - - _path = "" - try: - _path = self.workspace_controller(rid, provider, region, provider_info) - self.write_define(rid, _path, define_json=define_json) - - result = self.run(_path) - return result - except Exception, e: - self.rollback_data(rid) - if _path: - self.rollback_workspace(_path) - raise e - - def run_create(self, rid, region, zone, - provider_object, provider_info, - owner_id, relation_id, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param region: - :param zone: - :param owner_id: - :param relation_id: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - extend_info = extend_info or {} - label_name = self.resource_name + "_" + rid - - define_json = self.resource_filter_controller(provider_name=provider_object["name"], - label_name=label_name, - create_data=create_data, - extend_info=extend_info) - - self.save_data(rid, provider=provider_object["name"], - provider_id=provider_object["id"], - region=region, zone=zone, - owner_id=owner_id, - relation_id=relation_id, - extend_info=extend_info, - define_json=define_json, - status="applying", - create_data=create_data, - result_json={}, **kwargs) - - result = self._run_create_and_read_result(rid, provider=provider_object["name"], - region=region, provider_info=provider_info, - define_json=define_json) - - output_json = self.read_output_controller(result) - count, res = self.update_db_controller(rid, result, output_json) - return count, self.result_return_controller(res) - - def result_return_controller(self, result): - info = {} - for key in ["id", "provider", "provider_id", "region", "zone", - "resource_name", "resource_id", "owner_id", "relation_id", - "status", "created_time", "updated_time"]: - info[key] = result.get(key) - - info.update(result.get("propertys", {})) - info.update(result.get("extend_info", {})) - info.update(result.get("output_json", {})) - return info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - return None, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.rewrite_state(_path, state_file=resource_info["result_json"]) - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - if not self.ensure_provider_file(_path): - provider_object, provider_info = ProviderApi().provider_info(resource_info.get("provider_id"), - region=resource_info.get("region")) - - self.workspace_controller(rid, provider_name=resource_info["provider"], - region=resource_info["region"], provider_json=provider_info) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - def _run_upgrade_and_read_result(self, rid, provider, region, define_json): - ''' - - :param rid: - :param provider: - :param region: - :param provider_info: - :param define_json: - :return: - ''' - - _path = "" - backupfile = "" - try: - _path = self.workspace_controller(rid, provider, region, provider_json={}) - backupfile = self.write_define(rid, _path, define_json=define_json) - - result = self.run(_path) - return result - except Exception, e: - logger.info("update %s %s failed, and define file updated,the origin file: %s" % (self.resource_name, - rid, - backupfile)) - raise e - - def resource_upgrade_controller(self, provider_name, label_name, update_data, extend_info, origin_data): - ''' - - :param provider_name: - :param label_name: - :param update_data: - :param extend_info: - :return: - ''' - - define_json, _ = ResourceConductor().conductor_upgrade_resource(provider=provider_name, - resource_name=self.resource_name, - label_name=label_name, - update_data=update_data, - extend_info=extend_info, - origin_data=origin_data) - return define_json - - def run_update(self, rid, region, zone, - owner_id, relation_id, origin_data, - update_data, extend_info, **kwargs): - ''' - - :param rid: - :param region: - :param zone: - :param owner_id: - :param relation_id: - :param origin_data: - :param update_data: - :param extend_info: - :param kwargs: - :return: - ''' - - extend_info = extend_info or {} - label_name = self.resource_name + "_" + rid - - define_json = self.resource_upgrade_controller(provider_name=origin_data.get("provider"), - label_name=label_name, - update_data=update_data, - extend_info=extend_info, - origin_data=origin_data.get("define_json")) - - result = self._run_upgrade_and_read_result(rid, provider=origin_data.get("provider"), - region=region, define_json=define_json) - - output_json = self.read_output_controller(result) - # count, res = self.update_db_controller(rid, result, output_json) - - _propertys = origin_data.get("propertys", {}) - _propertys.update(kwargs) - _propertys.update(update_data) - - _extend_info = origin_data.get("extend_info") - _extend_info.update(extend_info) - - count, res = self.update_metadata(rid=rid, owner_id=owner_id, - relation_id=relation_id, - extend_info=_extend_info, - define_json=define_json, - status="ok", update_data=_propertys, - output_json=output_json, - result_json=result) - return count, self.result_return_controller(res) - - def update_metadata(self, rid, owner_id, relation_id, extend_info, - define_json, status, update_data, - output_json, result_json, **kwargs): - - ''' - - :param rid: - :param owner_id: - :param relation_id: - :param extend_info: - :param define_json: - :param status: - :param update_data: - :param kwargs: - :return: - ''' - - save_data = {"propertys": update_data, - "status": status, - "extend_info": extend_info, - "define_json": define_json, - "output_json": output_json, - "result_json": result_json} - - update_data.update(kwargs) - if owner_id and self.owner_resource: - save_data["owner_id"] = self.owner_resource + "_" + owner_id - if relation_id and self.relation_resource: - save_data["relation_id"] = self.relation_resource + "_" + relation_id - - return self.resource_object.update(rid=rid, update_data=save_data) - - def _generate_update_data(self, rid, provider, define_json, update_data, extend_info): - self.resource_info(provider) - resource_values_config = self.values_config(provider) - - resource_name = self.resource_keys_config["resource_name"] - resource_property = self.resource_keys_config["resource_property"] - resource_extend_info = self.resource_keys_config["extend_info"] - - resource_columns = {} - for key, value in update_data.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - value = convert_value(value, _values_configs.get(value)) - - resource_columns[key] = value - - resource_columns = convert_keys(resource_columns, defines=resource_property, is_update=True) - if extend_info: - _extend_columns = convert_extend_propertys(datas=extend_info, - extend_info=resource_extend_info, - is_update=True) - resource_columns.update(_extend_columns) - - _t = define_json["resource"][resource_name] - label_name = self.resource_name + "_" + rid - origin_columns = _t[label_name] - - origin_columns.update(resource_columns) - - define_json["resource"] = { - resource_name: { - label_name: origin_columns - } - } - logger.info(format_json_dumps(define_json)) - return define_json - - def generate_update_data(self, zone, update_data, **kwargs): - r_update_data = {} - return update_data, r_update_data - - def generate_owner_update_data(self, update_data, **kwargs): - return None, None - - def update(self, rid, provider, region, zone, - update_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param secret: - :param update_data: - :param extend_info: - :param kwargs: - :return: - ''' - - resource_obj = self.resource_object.show(rid) - if not resource_obj: - raise local_exceptions.ResourceNotFoundError("%s:%s 不存在" % (self.resource_name, rid)) - - extend_info = extend_info or {} - - zone = ProviderConductor().zone_info(provider=resource_obj["provider"], zone=zone) - # x_create_data, r_create_data = self.generate_create_data(zone, create_data, - # provider=provider_object["name"]) - - x_update_data, r_update_data = self.generate_update_data(zone, update_data, - provider=resource_obj["provider"]) - - _relations_id_dict = self.before_keys_checks(provider=resource_obj["provider"], - create_data=x_update_data) - - x_update_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_update_data(update_data) - count, res = self.run_update(rid=rid, region=resource_obj["region"], - zone=zone, owner_id=owner_id, - relation_id=relation_id, - origin_data=resource_obj, - update_data=x_update_data, - extend_info=extend_info, **kwargs) - - return count, res - - def source_filter_controller(self, provider_name, label_name, query_data): - ''' - - :param provider_name: - :param label_name: - :param query_data: - :return: - ''' - - define_json, resource_keys_config = ResourceConductor().conductor_reset_resource(provider=provider_name, - resource_name=self.resource_name, - label_name=label_name, - resource_data=query_data) - return define_json, resource_keys_config - - def read_data_output_controller(self, result): - result_output = result.get("outputs") - - ext_result = {} - for column, res in result_output.items(): - _out_dict = read_output(key=column, define=column, - result=res.get("value")) - ext_result.update(_out_dict) - - logger.info(format_json_dumps(ext_result)) - return ext_result - - def read_query_result_controller(self, provider, result, data_source_argument): - # instance_define = {} - if not data_source_argument: - raise ValueError("data_source_argument not config") - - logger.info(format_json_dumps(result)) - try: - _data = result.get("resources")[0] - _instances = _data.get("instances")[0] - _attributes = _instances.get("attributes") - - outlines = data_source_argument.split(".") - for outline in outlines: - _attributes = _attributes.get(outline) - - instance_list = _attributes - # todo dict change to list - instance_define = instance_list - except: - logger.info(traceback.format_exc()) - raise ValueError("query remote source failed, result read faild") - - define_columns = ResourceConductor().conductor_reset_filter(provider, self.resource_name) - - res = [] - for out_data in instance_define: - x_res = fetech_property(out_data, define_columns) - res.append(x_res) - - # output_json = self.read_data_output_controller(result) - - # res.update(output_json) - logger.info(format_json_dumps(res)) - return res - - def run_query(self, rid, region, zone, - provider_object, provider_info, - query_data, **kwargs): - ''' - - :param rid: - :param region: - :param zone: - :param owner_id: - :param relation_id: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - # extend_info = extend_info or {} - label_name = self.resource_name + "_q_" + rid - - define_json, resource_keys_config = self.source_filter_controller(provider_name=provider_object["name"], - label_name=label_name, - query_data=query_data - ) - - result = self._run_create_and_read_result(rid, provider=provider_object["name"], - region=region, provider_info=provider_info, - define_json=define_json) - - data_source_argument = resource_keys_config.get("data_source_argument") - output_json = self.read_query_result_controller(provider_object["name"], result, - data_source_argument) - - result_list = [] - for out_data in output_json: - x_json = ValueResetConductor().reset_values(provider=provider_object["name"], - resource_name=self.resource_name, - data=out_data) - result_list.append(x_json) - - return result_list - - def get_remote_source(self, rid, provider, region, zone, secret, - resource_id, **kwargs): - - rid = rid or resource_id or "rand_%s" % (get_uuid()) - - query_data = {} - if resource_id: - query_data = {"resource_id": resource_id} - - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - result = self.run_query(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - query_data=query_data, **kwargs) - - res = [] - - if resource_id: - for x_result in result: - x_result["resource_id"] = resource_id - res.append(x_result) - else: - res = result - - return res diff --git a/apps/api/apibase_backend.py b/apps/api/apibase_backend.py deleted file mode 100644 index d11939a1..00000000 --- a/apps/api/apibase_backend.py +++ /dev/null @@ -1,410 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import time -# import copy -import traceback -from lib.logs import logger -from lib.uuid_util import get_uuid -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.background.lib.drivers.terraform_operate import TerraformResource -from apps.background.resource.configr.history import HistoryObject -from apps.background.resource.configr.resource import ResourceObject -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.conductor.resource import ResourceConductor -from apps.api.conductor.valueConfiger import ValueConductor -from apps.api.conductor.apply_output_conductor import read_output_result -from apps.api.conductor.apply_data_conductor import apply_data_builder -from apps.api.conductor.source_data_conductor import query_data_builder -from apps.api.conductor.source_data_conductor import query_return_builder -# from apps.api.conductor.source_output_conductor import source_object_outer -from apps.api.conductor.source_output_conductor import read_source_output -from apps.api.conductor.source_output_conductor import SourceOuterReader -from apps.api.conductor.source_output_conductor import read_outer_property - - -class ApiBackendBase(TerraformResource): - def __init__(self, resource_name=None, resource_workspace=None): - super(ApiBackendBase, self).__init__() - self.resource_name = resource_name or "" - self.resource_workspace = resource_workspace or "" - self.owner_resource = "" - self.relation_resource = "" - self.resource_object = None - self.resource_keys_config = None - - def _flush_resobj(self): - self.resource_object = CrsObject(self.resource_name) - - def workspace_controller(self, rid, provider, region, provider_info): - # 创建workspace目录,写入provider信息并初始化目录 - _path = self.create_workpath(rid, provider=provider, region=region) - if provider_info: - self.write_provider_define(_path, define_json=provider_info) - - self.init_workspace(_path, provider) - return _path - - def get_resource_object(self, provider): - resource_keys_config = ResourceObject().query_one(where_data={"provider": provider, - "resource_type": self.resource_name}) - if not resource_keys_config: - raise local_exceptions.ResourceConfigError("%s 资源未初始化完成配置" % self.resource_name) - - return resource_keys_config - - def get_values_config(self, provider): - return ValueConductor().values_config(provider, self.resource_name) - - def get_secret(self, provider, region, secret, provider_data): - # 获取认证信息 - return ProviderConductor().producer_secret_info(provider=provider, region=region, - secret=secret, provider_data=provider_data) - - def conductor_apply_data(self, provider, data, label_name, defines, resource_values_config): - # 生成apply resource信息 - create_data = apply_data_builder(provider=provider, datas=data, - defines=defines["resource_property"], - resource_values_config=resource_values_config, - resource_name=self.resource_name) - - logger.info(format_json_dumps(create_data)) - return ResourceConductor().conductor_apply_data(label_name, create_data, - ora_resource_name=defines["resource_name"]) - - def insert_or_update(self, rid, provider, provider_id, region, zone, - resource_id, create_data, extend_info, - define_json, status, result_json, output_json, - owner_id=None, relation_id=None, **kwargs): - - create_data.update(kwargs) - if owner_id and self.owner_resource: - owner_id = self.owner_resource + "_" + owner_id - if relation_id and self.relation_resource: - relation_id = self.relation_resource + "_" + relation_id - - create_data = {"id": rid, "provider": provider, "provider_id": provider_id, - "region": region, "zone": zone, "resource_name": self.resource_name, - "owner_id": owner_id, "relation_id": relation_id, - "propertys": create_data, "extend_info": extend_info, - "status": status, "define_json": define_json, - "resource_id": resource_id, "output_json": output_json, - "result_json": format_json_dumps(result_json)} - - _exists_data = self.resource_object.ora_show(rid) - if _exists_data: - if _exists_data.get("is_deleted"): - logger.info("create id exists but status is deleted, backup and update it") - HistoryObject().create({"id": rid, "resource": self.resource_name, "ora_data": _exists_data}) - create_data.update({"is_deleted": 0, "deleted_time": None}) - - create_data.pop("id", None) - return self.resource_object.ora_update(rid, create_data) - else: - return self.resource_object.create(create_data=create_data) - - def rewrite_resource_state(self, rid, provider, region, provider_info, workpath, exists_data): - # 重写state文件 - logger.info("recovery state ..") - self.workspace_controller(rid, provider, region, provider_info) - self.rewrite_state(workpath, state_file=exists_data["result_json"]) - logger.info("rewrite state file complete, continue...") - - def _run_apply_and_read_result(self, rid, provider, region, provider_info, define_json, skip_backup=None): - ''' - - :param rid: - :param provider: - :param region: - :param provider_info: - :param define_json: - :return: - ''' - - _path = "" - try: - _path = self.workspace_controller(rid, provider, region, provider_info) - self.write_define(rid, _path, define_json=define_json) - return self.run(_path, skip_backup=skip_backup) - except Exception, e: - if _path: - self.rollback_workspace(_path) - raise e - - def source_filter_controller(self, label_name, query_data, resource_object): - return ResourceConductor().generate_data_source(label_name, query_data, resource_object) - - def import_filter_controller(self, provider_name, label_name, query_data): - return ResourceConductor().conductor_reset_resource(provider=provider_name, - resource_name=self.resource_name, - label_name=label_name, - resource_data=query_data) - - def _import_resource_(self, rid, provider, region, provider_info, asset_id, resource_id, label_name): - workpath = self.get_workpath(rid, provider, region) - try: - query_data = {"asset_id": resource_id} if resource_id else {} - - self.workspace_controller(rid, provider, region, provider_info) - define_json, resource_keys_config = self.import_filter_controller(provider_name=provider, - label_name="q_" + rid, - query_data=query_data - - ) - - import_define_json, _ = ResourceConductor().conductor_import_resource(provider=provider, - resource_name=self.resource_name, - label_name=label_name - ) - - import_define_json.update(define_json) - self.write_define(rid, workpath, define_json=import_define_json) - - dest_source = "%s.%s" % (resource_keys_config["resource_name"], label_name) - self.run_import(from_source=asset_id, dest_source=dest_source, path=workpath, state=None) - - return False - except Exception, e: - logger.info(traceback.format_exc()) - raise e - - def source_run_import(self, rid, provider, region, label_name, - provider_info, asset_id, resource_id, - skip_rewrite=None): - - workpath = self.get_workpath(rid, provider, region) - - if self.is_need_imort(workpath): - logger.info("state file not exists, try recovery ..") - exists_data = self.resource_object.show(rid) - if exists_data: - return self.rewrite_resource_state(rid=rid, provider=provider, region=region, - provider_info=provider_info, workpath=workpath, - exists_data=exists_data) - else: - logger.info("state file exists continue ...") - return False - - if asset_id and resource_id: - logger.info("asset id given, try import resource..") - self._import_resource_(rid, provider, region, provider_info, asset_id, resource_id, label_name) - - logger.info("state file not recovery, continue apply resource ... ") - return True - - def read_output_controller(self, provider, result, resource_object, resource_values_config): - logger.info(format_json_dumps(result)) - return read_output_result(provider, result, models=resource_object.get("resource_output"), - resource_values_config=resource_values_config, - resource_name=self.resource_name) - - def run_create(self, rid, region, zone, provider_object, provider_info, - asset_id, resource_id, create_data, extend_info, - resource_object, resource_values_config, **kwargs): - ''' - - :param rid: - :param region: - :param zone: - :param provider_object: - :param provider_info: - :param asset_id: - :param resource_id: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - extend_info = extend_info or {} - label_name = self.resource_name + "_" + rid - - define_json = self.conductor_apply_data(provider=provider_object["name"], - data=create_data, label_name=label_name, - defines=resource_object, - resource_values_config=resource_values_config) - - self.source_run_import(rid=rid, provider=provider_object["name"], region=region, label_name=label_name, - provider_info=provider_info, asset_id=asset_id, resource_id=resource_id) - - output_json = ResourceConductor().apply_output(label_name=label_name, resource_object=resource_object) - - define_json.update(output_json) - result = self._run_apply_and_read_result(rid, provider=provider_object["name"], - region=region, provider_info=provider_info, - define_json=define_json, skip_backup=None) - - output_res = self.read_output_controller(provider=provider_object["name"], result=result, - resource_object=resource_object, - resource_values_config=resource_values_config) - - count, res = self.insert_or_update(rid, provider=provider_object["name"], - provider_id=provider_object["id"], - region=region, zone=zone, - extend_info=extend_info, - define_json=define_json, - resource_id=output_res.get("asset_id"), - status="ok", output_json=output_res, - create_data=create_data, - result_json=result, **kwargs) - - output_res["id"] = rid - return count, output_res - - def apply(self, rid, base_info, base_bodys, create_data, extend_info, asset_id=None, resource_id=None, **kwargs): - ''' - - :param rid: - :param base_info: provider, region, zone secret 基础信息 - :param base_bodys: provider, region等object 信息 - :param create_data: 资源apply参数 - :param extend_info: 其他参数 - :param asset_id: 资产id - :param resource_id: 资源id - :param kwargs: - :return: - ''' - - # for object: - extend_info = extend_info or {} - secret = base_info.get("secret") - - provider_object = base_bodys.get("provider_data") - region_object = base_bodys.get("region_info") - zone_object = base_bodys.get("zone_info") - - provider = provider_object["name"] - region = region_object["asset_id"] - zone = zone_object.get("asset_id") - - resource_object = self.get_resource_object(provider=provider) - resource_values_config = self.get_values_config(provider) - - secret_info = self.get_secret(provider=provider, region=region_object["name"], - secret=secret, provider_data=provider_object) - provider_info = ProviderConductor().conductor_provider(provider_object, region_object, secret_info) - - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - asset_id=asset_id, resource_id=resource_id, - create_data=create_data, - resource_object=resource_object, - resource_values_config=resource_values_config, - extend_info=extend_info, **kwargs) - - return count, res - - def create(self, *args, **kwargs): - return self.apply(*args, **kwargs) - - def destroy_search_object(self, rid): - resource_info = self.resource_object.show(rid) - if not resource_info: - raise ValueError("资源不存在或没被纳管 %s" % rid) - return resource_info - - def destroy_rewrite(self, rid, path, resource_info): - if not self.destroy_ensure_file(rid, path=path): - self.rewrite_state(path, state_file=resource_info["result_json"]) - self.write_define(rid, path, define_json=resource_info["define_json"]) - if not self.ensure_provider_file(path): - # 缺失认证文件,设置为直接异常 - raise ValueError("缺失provider文件") - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.destroy_search_object(rid) - _path = self.create_workpath(rid, provider=resource_info["provider"], region=resource_info["region"]) - - self.destroy_rewrite(rid, _path, resource_info) - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - def get_remote_source(self, rid, base_info, base_bodys, query_data, extend_info=None, **kwargs): - rid = rid or "rand_%s" % (get_uuid()) - - extend_info = extend_info or {} - query_data.update(extend_info) - secret = base_info.get("secret") - - provider_object = base_bodys.get("provider_data") - region_object = base_bodys.get("region_info") - zone_object = base_bodys.get("zone_info") - - provider = provider_object["name"] - resource_object = self.get_resource_object(provider=provider) - resource_values_config = self.get_values_config(provider) - - secret_info = self.get_secret(provider=provider, region=region_object["name"], - secret=secret, provider_data=provider_object) - provider_info = ProviderConductor().conductor_provider(provider_object, region_object, secret_info) - - result = self.run_query(rid=rid, region=region_object["asset_id"], - zone=zone_object.get("asset_id"), - provider_object=provider_object, - provider_info=provider_info, - query_data=query_data, - resource_object=resource_object, - resource_values_config=resource_values_config, **kwargs) - - return result - - def run_query(self, rid, region, zone, provider_object, provider_info, - query_data, resource_object, resource_values_config, **kwargs): - - label_name = self.resource_name + "_q_" + rid - build_query_data = query_data_builder(provider=provider_object["name"], datas=query_data, - defines=resource_object.get("data_source"), - resource_values_config=resource_values_config, - resource_name=self.resource_name) - - define_json = self.source_filter_controller(label_name=label_name, - query_data=build_query_data, - resource_object=resource_object) - - result = self._run_apply_and_read_result(rid=label_name, provider=provider_object["name"], - region=region, provider_info=provider_info, - define_json=define_json, skip_backup=None) - - output_json = self.read_query_result_controller(provider=provider_object["name"], - result=result, defines=resource_object, - resource_values_config=resource_values_config) - - result_list = query_return_builder(data=query_data, defines=resource_object["data_source"], results=output_json) - return result_list - - def read_query_result_controller(self, provider, result, defines, resource_values_config): - logger.info(format_json_dumps(result)) - - try: - data_source_argument = SourceOuterReader.format_argument("data_source_argument", - defines.get("data_source_argument")) - output_results = read_source_output(result, data_source_argument) - except: - logger.info(traceback.format_exc()) - raise ValueError("query remote source failed, result read faild") - - res = [] - for out_data in output_results: - x_res = read_outer_property(provider, out_data, defines.get("data_source_output"), - resource_values_config, resouce_name=self.resource_name) - res.append(x_res) - - logger.info(format_json_dumps(res)) - return res - diff --git a/apps/api/conductor/apply_data_conductor.py b/apps/api/conductor/apply_data_conductor.py deleted file mode 100644 index c2babbb1..00000000 --- a/apps/api/conductor/apply_data_conductor.py +++ /dev/null @@ -1,116 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from apps.api.conductor.type_format import TypeFormat -from apps.api.conductor.model_format import ModelFormat - -client = ModelFormat - - -def _validate_apply_data(provider, key, value, define, resource_value_config, resource_name): - ''' - 转换传入的数据的key 以及 value映射值 - - :param key: - :param value: - :param define: string or json - { - "type":"string", - "convert":"access_key", - "allow_null":1, - "default":"", - "define":{ - } - } - :return: - ''' - - if not define: - return {} - - if isinstance(define, basestring): - if define == '-' or not define.strip(): - return {} - - if value is not None: - value = client.convert_apply_value(value, resource_value_config) - key = define or key - else: - value = client.format_apply_value(key, value, define) - if value: - value = client.convert_apply_value(value, resource_value_config) - value = client.format_type(value, type=define.get("type", "string")) - if isinstance(value, list): - # for list after format, may need revert value - value = client.convert_apply_value(value, resource_value_config) - value = client.hint_apply_infos(provider, value, define, resource_name) - - if define.get("convert"): - key = define.get("convert") or key - - if value: - return {key: value} - elif isinstance(value, (int, bool)): - return {key: value} - else: - return {} - - -def apply_data_builder(provider, datas, defines, resource_values_config, resource_name): - ''' - 依据resource定义, 转换字段, 转换value值, 生成apply resource 数据 - { - "type":"string", - "convert":"access_key", - "allow_null":1, - "default":"", - "hint":"$resource.vpc/$resource", - "define":{ - } - } - :param provider: - :param datas: - :param defines: - :param resource_values_config: - :return: - ''' - result = {} - logger.info("apply_builder ... ") - for key, define in defines.items(): - # 依据定义字段转换,只转换defines中的字段,检查必要字段的传入,未定义字段移除 - if isinstance(define, dict) and define.get("define"): - if datas.get(key): - value = TypeFormat.f_dict(datas.get(key)) - else: - value = apply_data_builder(provider=provider, - datas=datas, defines=define.get("define"), - resource_values_config=resource_values_config, - resource_name=resource_name) - if value: - result[key] = value - else: - _t = _validate_apply_data(provider=provider, key=key, - value=datas.get(key), define=define, - resource_value_config=resource_values_config.get(key, {}), - resource_name=resource_name) - if _t: - for xkey, xvalue in _t.items(): - if xkey in result.keys(): - if isinstance(xvalue, list) and isinstance(result.get(xkey), list): - xtmp = xvalue + result.get(xkey) - result[xkey] = list(set(xtmp)) - elif isinstance(xvalue, list) or isinstance(result.get(xkey), list): - raise ValueError("key -> %s 存在映射同一字段,类型不统一, 请检查" % xkey) - else: - result[xkey] = xvalue - else: - result[xkey] = xvalue - - return result - - -def apply_output_builder(datas, defines): - pass diff --git a/apps/api/conductor/apply_output_conductor.py b/apps/api/conductor/apply_output_conductor.py deleted file mode 100644 index 22a0ebe9..00000000 --- a/apps/api/conductor/apply_output_conductor.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.api.conductor.model_format import ModelFormat -from apps.api.conductor.value_conductor import ValueConfigerConductor - - -class OutputReader(object): - @staticmethod - def read_output(provider, key, define, result, resource_value_config, resource_name): - ''' - - :param key: - :param define: - example: cidr replace cidr_block - define: cidr_block - or: {"value": "cidr_block", "type": "string"} - :param result: - :return: - ''' - - add_infos = {} - if (define is None): - logger.info("output %s define is null" % key) - return {} - if isinstance(define, basestring): - value = result - elif isinstance(define, dict): - client = ModelFormat - value = client.format_type(result, type=define.get("type", "string")) - value, add_infos = client.hint_outer_infos(provider, value, define, resource_name) - else: - raise ValueError("转换配置错误, 类型错误") - - if value: - value = ValueConfigerConductor.outer_value(value, resource_value_config) - - add_infos[key] = value - return add_infos - - @classmethod - def fetch_id(cls, result): - ''' - - :param result: - :return: - ''' - - try: - _data = result.get("resources")[0] - _instances = _data.get("instances")[0] - _attributes = _instances.get("attributes") - return _attributes.get("id") or "0000000" - except: - logger.info(traceback.format_exc()) - raise ValueError("result can not fetch id") - - -def read_output_result(provider, result, models, resource_values_config, resource_name): - ''' - 对于设置了output的属性, 则提取output输出值 - :param result: - :return: - ''' - - if models: - result_output = result.get("outputs") - - ext_result = {} - for column, res in result_output.items(): - _out_dict = OutputReader.read_output(provider=provider, - key=column, define=models.get(column), - result=res.get("value"), - resource_value_config=resource_values_config.get(column), - resource_name=resource_name) - ext_result.update(_out_dict) - - if ext_result.get("asset_id"): - if len(ext_result["asset_id"]) > 512: - ext_result["asset_id"] = ext_result["asset_id"][:512] - logger.info("resource id length more than 512, will truncated for asset_id") - else: - ext_result["asset_id"] = OutputReader.fetch_id(result) - - logger.info(format_json_dumps(ext_result)) - return ext_result - - return {} diff --git a/apps/api/conductor/model_format.py b/apps/api/conductor/model_format.py deleted file mode 100644 index 806d71c2..00000000 --- a/apps/api/conductor/model_format.py +++ /dev/null @@ -1,528 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.uuid_util import get_uuid -from apps.api.conductor.type_format import TypeFormat -from apps.background.resource.resource_base import CrsObject -from apps.background.resource.configr.region import RegionObject -from apps.background.resource.configr.region import ZoneObject -from apps.background.resource.vm.instance_type import InstanceTypeObject - - -class ModelFormat(object): - @classmethod - def format_type(cls, value, type): - ''' - 校验数据类型, 并转换 - :param value: - :param type: - :return: - ''' - - if (type == "string") and (not isinstance(value, basestring)): - value = TypeFormat.f_string(value) - elif (type == "int") and not isinstance(value, int): - value = TypeFormat.f_int(value) - elif (type == "float") and (not isinstance(value, float)): - value = TypeFormat.f_float(value) - elif (type == "json") and (not isinstance(value, dict)): - value = TypeFormat.f_dict(value) - elif (type == "object") and (not isinstance(value, dict)): - value = TypeFormat.f_dict(value) - elif (type == "list") and (not isinstance(value, list)): - value = TypeFormat.f_list(value) - elif (type == "bool") and (not isinstance(value, bool)): - value = TypeFormat.f_bool(value) - else: - pass - - return value - - @classmethod - def not_null(cls, key, value): - ''' - - :param key: - :param value: - :return: - ''' - - if not value and not isinstance(value, (int, bool, float)): - raise ValueError("key: %s 不允许为空" % key) - - @classmethod - def fill_default(cls, value, default): - ''' - - :param value: - :param default: - :return: - ''' - if not value and not isinstance(value, (int, bool, float)): - value = value or default - - return value - - @classmethod - def format_apply_value(cls, key, value, define): - ''' - - :param key: - :param value: - :param define: - :return: - ''' - - value = cls.fill_default(value, define.get("default")) - allow_null = define.get("allow_null", 1) - if not int(allow_null): - cls.not_null(key, value) - - return value - - @classmethod - def format_query_value(cls, key, value, define): - ''' - - :param key: - :param value: - :param define: - :return: - ''' - - allow_null = define.get("allow_null", 1) - if not int(allow_null): - cls.not_null(key, value) - - return value - - @classmethod - def convert_value(cls, value, define): - ''' - - :param key: - :param value: - :param define: - string or json - example: cidr replace cidr_block - define: cidr_block - or: {"value": "cidr_block", "type": "string"} - :return: - ''' - - if (value is None) or (define is None): - return value - if isinstance(define, (basestring, bool, int)): - value = define or value - elif isinstance(define, dict): - value = define.get("value", value) or value - value = cls.format_type(value, define.get("type", "string")) - else: - raise ValueError("转换配置错误, 类型错误") - - return value - - @classmethod - def convert_apply_value(cls, value, define): - if not value or not define: - return value - if isinstance(value, (basestring, int, bool, float)): - value = cls.convert_value(value, define.get(value)) - elif isinstance(value, list): - res = [] - for x_value in value: - t = cls.convert_value(x_value, define.get(value)) - res.append(t) - - value = res - else: - pass - - return value - - @classmethod - def convert_output_value(cls, value, define): - # todo output update 转换output输出参数 - if not value: - return value - if isinstance(value, (basestring, int, bool, float)): - value = cls.convert_value(value, define.get(value)) - elif isinstance(value, list): - res = [] - for x_value in value: - t = cls.convert_value(x_value, define.get(value)) - res.append(t) - - value = res - else: - pass - - return value - - @classmethod - def _hint_resource_id_(cls, value, define): - ''' - - :param value: - :param define: - :return: - ''' - - def get_value(value, column, data): - if column in ["resource_id", "asset_id"] or (not column): - value = data.get("resource_id") or value - else: - x_res = data.get("output_json") or {} - value = x_res.get(instance_columns) or value - - return value - - # for define $resource, filter any resource - if "." in define: - resource_name = define.split(".")[1] - else: - resource_name = "" - - instance_columns = "" - try: - instance_columns = define.split(".")[2] - except: - pass - - logger.info("_hint_resource_, filter resource %s" % resource_name) - if isinstance(value, basestring): - t_data = CrsObject(resource_name).show(rid=value) - value = get_value(value=value, column=instance_columns, data=t_data) - elif isinstance(value, list): - c, t_data = CrsObject(resource_name).list(filter_in={"id": value}) - if resource_name and len(value) < c: - raise ValueError("资源存在重复注册,请检查") - - convertd = [] - for x in t_data: - convertd.append(x.get("id")) - x_value = get_value(value=x.get("resource_id"), column=instance_columns, data=x) - value.append(x_value) - - logger.info("_hint_resource_, convert resource id: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的资源id: %s" % value) - - return value - - @classmethod - def _hint_resource_id_outer_(cls, value, define): - ''' - - :param value: - :param define: - :return: - ''' - - # for define $resource, filter any resource - if "." in define: - resource_name = define.split(".")[1] - else: - resource_name = "" - - logger.info("_hint_resource_id_outer_, filter resource %s" % resource_name) - if isinstance(value, basestring): - value = CrsObject(resource_name).asset_object_id(value) - elif isinstance(value, list): - c, t_data = CrsObject(resource_name).list(filter_in={"resource_id": value}) - - convertd = [] - for x in t_data: - convertd.append(x.get("resource_id")) - value.append(x.get("id")) - - logger.info("_hint_resource_id_outer_, convert resource id: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - pass - # raise ValueError("不正确的资源id: %s" % value) - - return value - - @classmethod - def _hint_zone_id_(cls, value): - ''' - - :param value: - :param define: - :return: - ''' - - # for define zone - if isinstance(value, basestring): - t_data = ZoneObject().zone_object(value) - value = t_data.get("asset_id") or value - elif isinstance(value, list): - c, t_data = ZoneObject().list(filter_in={"id": value}) - if len(value) != c: - raise ValueError("zone id 列表存在未注册zone") - - convertd = [] - for x in t_data: - convertd.append(x.get("id")) - value.append(x.get("asset_id")) - - logger.info("_hint_resource_, convert resource id: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 zone id: %s" % value) - - return value - - @classmethod - def _hint_zone_id_outer_(cls, value): - ''' - - :param value: - :param define: - :return: - ''' - - # for define zone - if isinstance(value, basestring): - t_data = ZoneObject().zone_asset(value) - value = t_data.get("id") or value - elif isinstance(value, list): - c, t_data = ZoneObject().list(filter_in={"asset_id": value}) - - convertd = [] - for x in t_data: - convertd.append(x.get("asset_id")) - value.append(x.get("id")) - - logger.info("_hint_zone_id_outer_, convert resource id: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 zone id: %s" % value) - - return value - - @classmethod - def _hint_region_id_(cls, value): - ''' - - :param value: - :param define: - :return: - ''' - - # for define zone - if isinstance(value, basestring): - t_data = RegionObject().region_object(value) - value = t_data.get("asset_id") or value - elif isinstance(value, list): - c, t_data = RegionObject().list(filter_in={"id": value}) - if len(value) != c: - raise ValueError("region id 列表存在未注册region") - - convertd = [] - for x in t_data: - convertd.append(x.get("id")) - value.append(x.get("asset_id")) - - logger.info("_hint_resource_, convert resource id: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 region id: %s" % value) - - return value - - @classmethod - def _hint_region_id_outer_(cls, value): - ''' - - :param value: - :param define: - :return: - ''' - - # for define zone - if isinstance(value, basestring): - t_data = RegionObject().region_asset(value) - value = t_data.get("id") or value - elif isinstance(value, list): - c, t_data = RegionObject().list(filter_in={"asset_id": value}) - - convertd = [] - for x in t_data: - convertd.append(x.get("asset_id")) - value.append(x.get("id")) - - logger.info("_hint_region_id_outer_, convert resource id: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 region id: %s" % value) - - return value - - @classmethod - def _hint_instance_type_(cls, provider, value, usage_type=None): - ''' - - :param value: - :param define: - :return: - ''' - - # for define instance type - if isinstance(value, basestring): - t_data, _ = InstanceTypeObject().convert_resource_name(provider, value, usage_type) - value = t_data or value - elif isinstance(value, list): - filters = {"provider": provider} - if usage_type: - filters["type"] = usage_type - c, t_data = InstanceTypeObject().list(filters=filters, filter_in={"name": value}) - - convertd = [] - for x in t_data: - convertd.append(x.get("name")) - value.append(x.get("origin_name")) - - logger.info("_hint_resource_, convert resource: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 instance type: %s" % value) - - return value - - @classmethod - def _hint_instance_type_outer_(cls, provider, value, usage_type=None): - ''' - #todo instance type 转换信息新增 cpu 内存等信息 - :param value: - :param define: - :return: - ''' - - # for define instance type - add_infos = {} - if isinstance(value, (basestring, int)): - value, add_infos = InstanceTypeObject().convert_asset(provider, value, usage_type) - elif isinstance(value, list): - # for list add info is {}, e: list may not used - filters = {"provider": provider} - if usage_type: - filters["type"] = usage_type - c, t_data = InstanceTypeObject().list(filters=filters, filter_in={"origin_name": value}) - - convertd = [] - for x in t_data: - convertd.append(x.get("origin_name")) - value.append(x.get("name")) - - logger.info("_hint_instance_type_outer_, convert resource: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 instance type: %s" % value) - - return value, add_infos - - @classmethod - def _hint_instance_type_columns_outer_(cls, provider, value, columns, usage_type=None): - ''' - #todo instance type 转换信息新增 cpu 内存等信息 - :param provider: - :param value: - :param columns: - :param usage_type: - :return: - ''' - - # for define instance type - add_infos = {} - if isinstance(value, (basestring, int)): - value, add_infos = InstanceTypeObject().convert_asset(provider, value, usage_type) - value = add_infos.get(columns) or value - return value, add_infos - elif isinstance(value, list): - # for list add info is {}, e: list may not used - filters = {"provider": provider} - if usage_type: - filters["type"] = usage_type - c, t_data = InstanceTypeObject().list(filters=filters, filter_in={"origin_name": value}) - - convertd = [] - for x in t_data: - convertd.append(x.get(columns)) if x.get(columns) else None - value.append(x.get("name")) - - logger.info("_hint_instance_type_outer_, convert resource: %s" % (bytes(convertd))) - value = list(set(value) - set(convertd)) - else: - raise ValueError("不正确的 instance type: %s" % value) - - return value, add_infos - - @classmethod - def hint_apply_infos(cls, provider, value, define, resource_name): - ''' - hint info, 用于转换cmdb信息定义 - :param value: - :param define: - :return: - ''' - - if value and define.get("hint"): - define = define.get("hint") - else: - return value - - if define.startswith("$resource"): - value = cls._hint_resource_id_(value, define) - elif define == "$zone": - value = cls._hint_zone_id_(value) - elif define == "$region": - value = cls._hint_region_id_(value) - elif define == "$instance.type": - # instance type转换更新值转换 - logger.info("instance type revert used value now. skip ...") - # value = cls._hint_instance_type_(provider, value, usage_type=resource_name) - else: - logger.info("define %s not define now, skip it ..." % (define)) - return value - - @classmethod - def hint_outer_infos(cls, provider, value, define, resource_name): - ''' - hint info, 用于转换cmdb信息定义 - - :param value: - :param define: - :return: - ''' - - add_infos = {} - if value and define.get("hint"): - define = define.get("hint") - else: - return value, add_infos - - logger.debug("outer revert value ...") - if define.startswith("$resource"): - value = cls._hint_resource_id_outer_(value, define) - elif define == "$zone": - value = cls._hint_zone_id_outer_(value) - elif define == "$region": - value = cls._hint_region_id_outer_(value) - elif define.startswith("$instance.type"): - # instance type 转换统一使用value进行转换 - if define == "$instance.type": - logger.info("instance type used value revert now, skip value ...") - _, add_infos = cls._hint_instance_type_outer_(provider, value, usage_type=resource_name) - else: - column = define[len("$instance.type."):] - value, _ = cls._hint_instance_type_columns_outer_(provider, value, - columns=column, usage_type=resource_name) - else: - logger.info("define %s not define now, skip it ..." % (define)) - return value, add_infos diff --git a/apps/api/conductor/provider.py b/apps/api/conductor/provider.py deleted file mode 100644 index b1ee6e68..00000000 --- a/apps/api/conductor/provider.py +++ /dev/null @@ -1,236 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from lib.encrypt_helper import encrypt_str -from lib.encrypt_helper import decrypt_str -from wecube_plugins_terraform.settings import TERRAFORM_BASE_PATH -from apps.background.resource.configr.provider import ProviderObject -from apps.api.configer.provider_secret import SecretApi -from apps.api.configer.provider import ProviderApi -from apps.common.validation import validate_column_line -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from .region import RegionConductor - -if not os.path.exists(TERRAFORM_BASE_PATH): - os.makedirs(TERRAFORM_BASE_PATH) - - -class ProviderConductor(object): - def decrypt_key(self, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - def _split_to_json(self, secret): - try: - res = {} - secret = secret.replace(" ", '') - _cols = secret.split(";") - for col in _cols: - tmp = col.split("=") - res[tmp[0]] = tmp[1] - - return res - except: - logger.info(traceback.format_exc()) - raise ValueError("格式错误, 无法解析的格式, 正确格式为: key1=value1; key2=value2 ...") - - def zone_info(self, provider, zone): - if zone: - return ProviderApi().zone_info(provider, zone) - - return zone - - def region_info(self, provider, region): - if region: - return ProviderApi().region_info(provider, region) - - return region - - def zone_reverse_info(self, provider, zone): - if zone: - return ProviderApi().zone_reverse_info(provider, zone) - - return zone - - def region_reverse_info(self, provider, region): - if region: - return ProviderApi().region_reverse_info(provider, region) - - return region - - def format_secret(self, secret): - ''' - - :param secret: - :return: - ''' - if not secret: - return {} - - if "{" in secret and "}" in secret: - try: - res = json.loads(secret) - except: - logger.info(secret) - raise ValueError("secret key 不能转换为json") - - elif ";" in secret or "=" in secret: - res = self._split_to_json(secret) - else: - validate_column_line(secret) - res = secret - - return res - - def _provider_secret(self, provider, region, secret): - if not secret: - return secret - - secret = self.format_secret(secret) - if isinstance(secret, dict): - logger.info("secret format json, use secret info") - return secret - else: - logger.debug("search secret info") - info = SecretApi().secret_info(provider, name=secret, region=region) - # if not info: - # raise ValueError("provider %s 提供了未知的认证信息, 请检查") - return info - - def producer_secret_info(self, provider, region, secret, provider_data): - ''' - - :param provider: - :param region: - :param secret: - :return: - ''' - - secret_info = self._provider_secret(provider, region, secret) - if not secret_info: - # 兼容provider的认证方式 - secret_info = {} - logger.info("not search secret info, try use provider define info") - provider_data["secret_id"] = self.decrypt_key(provider_data.get("secret_id")) - provider_data["secret_key"] = self.decrypt_key(provider_data.get("secret_key")) - - for key in ["secret_id", "secret_key"]: - if provider_data.get(key): - secret_info[key] = provider_data.get(key) - - provider_property = provider_data.get("provider_property", {}) - secret_info = convert_keys(secret_info, defines=provider_property, is_update=True) - - if not secret_info: - raise ValueError("获取provider 认证信息失败") - - return secret_info - - def _provider_object(self, provider): - provider_data = ProviderObject().query_one(where_data={"name": provider}) - if not provider_data: - logger.debug("provider: %s is null, try search id" % provider) - provider_data = ProviderObject().show(provider) - - if not provider_data: - raise local_exceptions.ResourceValidateError("provider", "provider %s 未注册" % provider) - return provider_data - - def find_provider_info(self, provider): - ''' - - :param provider: ID or name - :return: - ''' - - provider_data = self._provider_object(provider) - - if not provider_data.get("is_init"): - raise local_exceptions.ResourceConfigError("provider 未初始化,请重新初始化") - - return provider_data - - def conductor_provider_info(self, provider, region, secret): - ''' - - :param provider: name - :param region: name - :param secret: name or string dict - :return: - ''' - - provider_data = self.find_provider_info(provider) - - provider = provider_data.get("name") - ProviderApi().create_provider_workspace(provider) - - secret_info = self.producer_secret_info(provider, region, secret, provider_data) - - region = ProviderApi().region_info(provider, region) - provider_info = {"region": region} - - extend_info = provider_data.get("extend_info", {}) - provider_property = provider_data.get("provider_property", {}) - - provider_info.update(extend_info) - provider_columns = convert_keys(provider_info, defines=provider_property, is_update=True) - provider_columns.update(secret_info) - - - info = { - "provider": { - provider: provider_columns - } - } - - return provider_data, info - - def conductor_backend_provider_info(self, provider, region, secret): - ''' - - :param provider: name - :param region: name - :param secret: name or string dict - :return: - ''' - - region = RegionConductor().region_info(provider, region) - return self.conductor_provider_info(provider, region, secret) - - def conductor_provider(self, provider_object, region_object, secret_info): - ''' - - :param provider_object: - :param region_object: - :param secret_info: - :return: - ''' - - provider = provider_object.get("name") - ProviderApi().create_provider_workspace(provider) - - provider_info = {"region": region_object["asset_id"]} - provider_property = provider_object.get("provider_property", {}) - - provider_info.update(provider_object.get("extend_info", {})) - provider_columns = convert_keys(provider_info, defines=provider_property, is_update=True) - provider_columns.update(secret_info) - - info = { - "provider": { - provider: provider_columns - } - } - - return info - diff --git a/apps/api/conductor/region.py b/apps/api/conductor/region.py deleted file mode 100644 index 4f9de876..00000000 --- a/apps/api/conductor/region.py +++ /dev/null @@ -1,100 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.configer.region import RegionApi -from apps.api.configer.region import ZoneApi - - -class RegionConductor(object): - def region_info(self, provider, region): - ''' - - :param provider: - :param region: region id - :return: - ''' - - asset_id, data = RegionApi().region_asset(region) - if provider != data["provider"]: - raise ValueError("provider: %s 没有region:%s 注册信息,请先注册" % (provider, region)) - - return asset_id - - def provider_region_info(self, provider, region): - ''' - - :param provider: - :param region: region id - :return: - ''' - - asset_id, data = RegionApi().provider_region_asset(provider, region) - # if provider != data["provider"]: - # raise ValueError("provider: %s 没有region:%s 注册信息,请先注册" % (provider, region)) - - return asset_id, data - - def zone_info(self, provider, zone): - ''' - - :param provider: - :param zone: zone id - :return: - ''' - - asset_id, data = ZoneApi().zone_asset_data(zone) - if provider != data["provider"]: - raise ValueError("provider: %s zone:%s 注册信息,请先注册" % (provider, zone)) - - return asset_id - - def provider_zone_info(self, provider, region, zone): - ''' - - :param provider: - :param zone: zone id - :return: - ''' - - asset_id, data = ZoneApi().provider_zone_object(provider=provider, region=region, zone_id=zone) - return asset_id, data - - def region_reverse_info(self, provider, region): - ''' - - :param provider: - :param region: region asset id - :return: - ''' - - return RegionApi().region_objectid(asset_id=region, provider=provider) - - def zone_reverse_info(self, provider, zone): - ''' - - :param provider: - :param zone: zone asset id - :return: - ''' - - return ZoneApi().zone_objectid(asset_id=zone, provider=provider) - - def zone_asset(self, provider, zone_id): - ''' - - :param provider: - :param zone: zone asset id - :return: - ''' - - return ZoneApi().zone_asset(zone_id=zone_id) - - def zone_reverse_mapping(self, provider, region): - ''' - - :param provider: - :param region: region id - :return: - ''' - - return ZoneApi().zone_region_ids(region=region, provider=provider) diff --git a/apps/api/conductor/resource.py b/apps/api/conductor/resource.py deleted file mode 100644 index 400d4226..00000000 --- a/apps/api/conductor/resource.py +++ /dev/null @@ -1,340 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from lib.json_helper import format_json_dumps -from .resourceConfiger import ResourceConfiger -from .valueConfiger import ValueConductor - - -class ResourceConductor(object): - def __init__(self): - pass - - def _generate_resource(self, provider, resource_name, - label_name, create_data, extend_info): - ''' - - :param provider: name - :param resource_name: - :param label_name: - :param create_data: - :param extend_info: - :return: - ''' - - create_data = ValueConductor().conductor_apply_values(provider=provider, - resource_name=resource_name, - data=create_data) - - configer = ResourceConfiger() - resource_columns, resource_keys_config = configer.conductor_apply_property(provider=provider, - resource_name=resource_name, - resource_data=create_data) - - extend_json, _ = configer.conductor_apply_extend(provider=provider, - resource_name=resource_name, - extend_info=extend_info) - - resource_columns.update(extend_json) - property = resource_keys_config["resource_name"] - - _info = { - "resource": { - property: { - label_name: resource_columns - } - } - } - logger.info(format_json_dumps(_info)) - return _info, resource_keys_config - - def conductor_apply_resource(self, provider, resource_name, - label_name, create_data, extend_info): - ''' - - :param provider: name - :param region: - :param secret: - :param create_data: - :param extend_info: - :return: - ''' - - define_json, resource_keys_config = self._generate_resource(provider=provider, - resource_name=resource_name, - label_name=label_name, - create_data=create_data, - extend_info=extend_info) - - output_json, _ = ResourceConfiger().conductor_apply_output(provider=provider, - resource_name=resource_name, - label_name=label_name - ) - - define_json.update(output_json) - return define_json, resource_keys_config - - def conductor_apply_data(self, label_name, create_data, ora_resource_name): - ''' - - :param provider: name - :param region: - :param secret: - :param create_data: - :param extend_info: - :return: - ''' - - _info = { - "resource": { - ora_resource_name: { - label_name: create_data - } - } - } - logger.info(format_json_dumps(_info)) - return _info - - def conductor_apply_output(self, provider, resource_name, label_name): - ''' - - :param provider: name - :param region: - :param secret: - :param create_data: - :param extend_info: - :return: - ''' - - output_json, _ = ResourceConfiger().conductor_apply_output(provider=provider, - resource_name=resource_name, - label_name=label_name - ) - - return output_json - - def apply_output(self, label_name, resource_object): - return ResourceConfiger().apply_output(label_name=label_name, resource_object=resource_object) - - def _generate_import_resource(self, provider, resource_name, label_name): - ''' - # import resource define: { - "resource": { - "route_table": { - "example": { - } - } - } - } - :param provider: - :param resource_name: - :param label_name: - :return: - ''' - - configer = ResourceConfiger() - resource_columns, resource_keys_config = configer.conductor_import_property(provider=provider, - resource_name=resource_name) - - property = resource_keys_config["resource_name"] - - _info = { - "resource": { - property: { - label_name: resource_columns - } - } - } - logger.info(format_json_dumps(_info)) - return _info, resource_keys_config - - def conductor_import_resource(self, provider, resource_name, label_name): - ''' - - :param provider: name - :param region: - :param secret: - :param create_data: - :param extend_info: - :return: - ''' - - define_json, resource_keys_config = self._generate_import_resource(provider=provider, - resource_name=resource_name, - label_name=label_name - ) - - return define_json, resource_keys_config - - def fetch_resource_propertys(self, resource_name, label_name, define_json): - _t = define_json["resource"][resource_name] - origin_columns = _t[label_name] - return origin_columns - - def _generate_upgrade_resource(self, provider, resource_name, - label_name, update_data, - extend_info, origin_data): - ''' - - :param provider: name - :param resource_name: - :param label_name: - :param update_data: - - :param extend_info: - :return: - ''' - - update_data = ValueConductor().conductor_apply_values(provider=provider, - resource_name=resource_name, - data=update_data) - - configer = ResourceConfiger() - resource_columns, resource_keys_config = configer.conductor_upgrade_property(provider=provider, - resource_name=resource_name, - resource_data=update_data, - ) - - extend_json, _ = configer.conductor_upgrade_extend(provider=provider, - resource_name=resource_name, - extend_info=extend_info, - ) - - resource_columns.update(extend_json) - property = resource_keys_config["resource_name"] - - origin_columns = self.fetch_resource_propertys(resource_name, label_name, origin_data) - origin_columns.update(resource_columns) - - _info = { - "resource": { - property: { - label_name: origin_columns - } - } - } - - logger.info(format_json_dumps(_info)) - return _info, resource_keys_config - - def conductor_upgrade_resource(self, provider, resource_name, - label_name, update_data, - extend_info, origin_data): - ''' - - :param provider: - :param resource_name: - :param label_name: - :param update_data: - :param extend_info: - :param origin_data: - :return: - ''' - - update_json, resource_keys_config = self._generate_upgrade_resource(provider=provider, - resource_name=resource_name, - label_name=label_name, - update_data=update_data, - extend_info=extend_info, - origin_data=origin_data) - - origin_data.update(update_json) - return origin_data, resource_keys_config - - def _generate_data_source(self, provider, resource_name, label_name, resource_data): - ''' - - :param provider: - :param resource_name: - :param label_name: - :param resource_data: - :return: - ''' - - resource_data = ValueConductor().conductor_apply_values(provider=provider, - resource_name=resource_name, - data=resource_data) - - configer = ResourceConfiger() - configer.pre_check_source_property(provider=provider, - resource_name=resource_name, - resource_data=resource_data) - - resource_columns, resource_keys_config = configer.conductor_source_property(provider=provider, - resource_name=resource_name, - resource_data=resource_data) - - property = resource_keys_config.get("data_source_name") or resource_keys_config["resource_name"] - - _info = { - "data": { - property: { - label_name: resource_columns - } - } - } - logger.info(format_json_dumps(_info)) - return _info, resource_keys_config - - def generate_data_source(self, label_name, query_data, resource_object): - ''' - - :param provider: - :param resource_name: - :param label_name: - :param resource_data: - :return: - ''' - - property = resource_object.get("data_source_name") or resource_object["resource_name"] - - _info = { - "data": { - property: { - label_name: query_data - } - } - } - logger.info(format_json_dumps(_info)) - return _info - - def conductor_reset_resource(self, provider, resource_name, label_name, resource_data): - ''' - - :param provider: name - :param region: - :param secret: - :param create_data: - :param extend_info: - :return: - ''' - - define_json, resource_keys_config = self._generate_data_source(provider=provider, - resource_name=resource_name, - label_name=label_name, - resource_data=resource_data) - - output_json, _ = ResourceConfiger().conductor_data_output(provider=provider, - resource_name=resource_name, - label_name=label_name - ) - - # define_json.update(output_json) - return define_json, resource_keys_config - - def conductor_reset_filter(self, provider, resource_name): - configer = ResourceConfiger() - resource_columns, _ = configer.conductor_reset_property(provider=provider, - resource_name=resource_name) - return resource_columns - - def conductor_reset_equivalence(self, provider, resource_name): - configer = ResourceConfiger() - resource_columns, _ = configer.conductor_reset_equivalence(provider=provider, - resource_name=resource_name) - return resource_columns diff --git a/apps/api/conductor/resourceConfiger.py b/apps/api/conductor/resourceConfiger.py deleted file mode 100644 index 640d8975..00000000 --- a/apps/api/conductor/resourceConfiger.py +++ /dev/null @@ -1,374 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import convert_extend_propertys -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import ext_convert_keys -from apps.common.convert_keys import convert_value -from apps.common.convert_keys import output_line -from apps.common.convert_keys import ConvertMetadata -from apps.common.reverse_convert_keys import ReverseProperty -from apps.background.resource.configr.resource import ResourceObject - - -class ResourceConfiger(object): - def __init__(self): - self.resource_keys_config = None - - def resource_info(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :return: - ''' - - if self.resource_keys_config: - return self.resource_keys_config - - resource_keys_config = ResourceObject().query_one(where_data={"provider": provider, - "resource_type": resource_name}) - if not resource_keys_config: - raise local_exceptions.ResourceConfigError("%s 资源未初始化完成配置" % resource_name) - - self.resource_keys_config = resource_keys_config - return resource_keys_config - - def conductor_apply_property(self, provider, resource_name, resource_data): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - resource_property = self.resource_keys_config["resource_property"] - # resource_columns = convert_keys(resource_data, defines=resource_property) - resource_columns = ext_convert_keys(resource_data, defines=resource_property) - - resource_columns = self.reduce_key(resource_columns) - return resource_columns, self.resource_keys_config - - def conductor_import_property(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - return {}, self.resource_keys_config - - def pre_check_source_property(self, provider, resource_name, resource_data): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - data_source = self.resource_keys_config["data_source"] - if not data_source: - raise ValueError("source资源未定义") - - for key, value in resource_data.items(): - if key == "zone": - continue - else: - if key not in data_source.keys(): - raise ValueError("provider: %s 不支持过滤参数: %s" % (provider, key)) - - def conductor_source_property(self, provider, resource_name, resource_data): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - data_source = self.resource_keys_config["data_source"] - if not data_source: - raise ValueError("source资源未定义") - - # resource_columns = convert_keys(resource_data, defines=data_source) - resource_columns = ext_convert_keys(resource_data, defines=data_source) - - resource_columns = self.reduce_key(resource_columns) - return resource_columns, self.resource_keys_config - - def conductor_reset_property(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - origin_columns = {} - # resource_property = self.resource_keys_config["resource_property"] - # extend_info = self.resource_keys_config["extend_info"] - # resource_output = self.resource_keys_config["resource_output"] - # data_source_output = self.resource_keys_config["data_source_output"] - - resource_property = self.resource_keys_config["data_source_output"] - - extend_info = resource_property.pop("extend_info", {}) or {} - - propertys = ReverseProperty.reverse_keys(resource_property) - extend = ReverseProperty.reverse_extend_keys(extend_info) - - # resource_output.pop("resource_id", None) - # output = ReverseProperty.reverse_output_lines(resource_output) - origin_columns.update(propertys) - origin_columns.update(extend) - # origin_columns.update(output) - - # columns = {} - # for key, value in origin_columns.items(): - # if value in data_source_output.keys(): - # # todo dirct reverse ? - # continue - # columns[key] = value - - # data_source_output = ReverseProperty.reverse_keys(data_source_output) - # columns.update(data_source_output) - - columns = origin_columns - return columns, self.resource_keys_config - - def conductor_reset_equivalence(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - origin_columns = {} - resource_property = self.resource_keys_config["data_source_output"] - - extend_info = resource_property.pop("extend_info", {}) or {} - - propertys = ReverseProperty.reverse_equivalence(resource_property) - extend = ReverseProperty.reverse_extend_key_equivalence(extend_info) - - origin_columns.update(propertys) - origin_columns.update(extend) - - columns = origin_columns - return columns, self.resource_keys_config - - def conductor_upgrade_property(self, provider, resource_name, resource_data): - ''' - - :param provider: - :param resource_name: - :param resource_data: - :return: - ''' - - self.resource_info(provider, resource_name) - - resource_property = self.resource_keys_config["resource_property"] - resource_columns = ConvertMetadata.upgrade_keys(resource_data, defines=resource_property) - - resource_columns = self.reduce_key(resource_columns) - - # origin_data.update(resource_columns) - # return origin_data, self.resource_keys_config - return resource_columns, self.resource_keys_config - - def conductor_apply_extend(self, provider, resource_name, extend_info): - ''' - - :param provider: - :param resource_name: - :param extend_info: - :return: - ''' - - self.resource_info(provider, resource_name) - - resource_columns = {} - resource_property = self.resource_keys_config["resource_property"] - resource_extend_info = self.resource_keys_config["extend_info"] - - resource_property_extend = resource_property.pop("extend_info", {}) or {} - - # _extend_columns = convert_keys(datas=extend_info, defines=resource_property, is_extend=True) - _extend_columns = ext_convert_keys(datas=extend_info, defines=resource_property, is_extend=True) - logger.info("property extend info: %s" % (format_json_dumps(_extend_columns))) - resource_columns.update(_extend_columns) - - # _extend_columns = convert_keys(datas=extend_info, defines=resource_property_extend, is_extend=True) - _extend_columns = ext_convert_keys(datas=extend_info, defines=resource_property_extend, is_extend=True) - logger.info("resource_property extend info: %s" % (format_json_dumps(_extend_columns))) - resource_columns.update(_extend_columns) - - _extend_columns = convert_extend_propertys(datas=extend_info, extend_info=resource_extend_info) - logger.info("extend info: %s" % (format_json_dumps(_extend_columns))) - resource_columns.update(_extend_columns) - - resource_columns = self.reduce_key(resource_columns) - return resource_columns, self.resource_keys_config - - def reduce_key(self, data): - result = {} - for key, value in data.items(): - if value: - result[key] = value - elif isinstance(value, (int, bool)): - result[key] = value - else: - logger.info("key %s value: %s is null, remove it" % (key, str(value))) - - return result - - def _generate_output(self, provider, resource_name, label_name): - ''' - 转换output 输出参数,生成配置 - :param label_name: - :return: - ''' - - self.resource_info(provider, resource_name) - - output_configs = self.resource_keys_config["resource_output"] - resource_name = self.resource_keys_config["resource_name"] - - _ext_output = {} - for key, define in output_configs.items(): - _ext_output.update(output_line(key, define)) - - ext_output_config = {} - for column, ora_column in _ext_output.items(): - ext_output_config[column] = {"value": "${%s.%s.%s}" % (resource_name, label_name, ora_column)} - - result = {"output": ext_output_config} if ext_output_config else {} - return result, self.resource_keys_config - - def apply_output(self, label_name, resource_object): - ''' - 转换output 输出参数,生成配置 - :param label_name: - :return: - ''' - - output_configs = resource_object["resource_output"] - resource_name = resource_object["resource_name"] - - _ext_output = {} - for key, define in output_configs.items(): - _ext_output.update(output_line(key, define)) - - ext_output_config = {} - for column, ora_column in _ext_output.items(): - ext_output_config[column] = {"value": "${%s.%s.%s}" % (resource_name, label_name, ora_column)} - - result = {"output": ext_output_config} if ext_output_config else {} - return result - - def _generate_source_output(self, provider, resource_name, label_name): - ''' - 转换output 输出参数,生成配置 - :param label_name: - :return: - ''' - - self.resource_info(provider, resource_name) - - output_configs = self.resource_keys_config["resource_output"] - resource_name = self.resource_keys_config["resource_type"] - - _ext_output = {} - for key, define in output_configs.items(): - _ext_output.update(output_line(key, define)) - - ext_output_config = {} - for column, ora_column in _ext_output.items(): - if column != "resource_id": - ext_output_config[column] = {"value": "${data.%s.%s.%s}" % (resource_name, label_name, ora_column)} - - result = {"output": ext_output_config} if ext_output_config else {} - return result, self.resource_keys_config - - def conductor_apply_output(self, provider, resource_name, label_name): - ''' - - :param provider: - :param resource_name: - :param label_name: - :return: - ''' - - return self._generate_output(provider, resource_name, label_name) - - def conductor_data_output(self, provider, resource_name, label_name): - ''' - - :param provider: - :param resource_name: - :param label_name: - :return: - ''' - - return self._generate_source_output(provider, resource_name, label_name) - - def conductor_upgrade_extend(self, provider, resource_name, extend_info): - ''' - - :param provider: - :param resource_name: - :param extend_info: - :return: - ''' - - self.resource_info(provider, resource_name) - - resource_columns = {} - resource_property = self.resource_keys_config["resource_property"] - resource_extend_info = self.resource_keys_config["extend_info"] - - _extend_columns = ConvertMetadata.upgrade_extend_keys(datas=extend_info, defines=resource_property) - logger.info("property extend info: %s" % (format_json_dumps(_extend_columns))) - resource_columns.update(_extend_columns) - - _extend_columns = ConvertMetadata.upgrade_extend_info(datas=extend_info, extend_info=resource_extend_info) - logger.info("extend info: %s" % (format_json_dumps(_extend_columns))) - resource_columns.update(_extend_columns) - - resource_columns = self.reduce_key(resource_columns) - - # origin_data.update(resource_columns) - # - # result = self.reduce_key(origin_data) - # return result, self.resource_keys_config - return resource_columns, self.resource_keys_config - -# if __name__ == '__main__': -# x = ResourceConfiger().conductor_reset_property('tencentcloud', 'vpc') -# print(x) diff --git a/apps/api/conductor/resourceReverse.py b/apps/api/conductor/resourceReverse.py deleted file mode 100644 index a69e2e89..00000000 --- a/apps/api/conductor/resourceReverse.py +++ /dev/null @@ -1,57 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from apps.common.convert_keys import convert_value -from apps.common.reverse import Reverse -from apps.common.reverse_convert_keys import ReverseProperty -from apps.background.resource.configr.value_config import ValueConfigObject -from .resourceConfiger import ResourceConfiger - - -class ResourceResetConductor(object): - def values_config(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :return: - ''' - - return ValueConfigObject().resource_value_configs(provider, resource_name) - - def reset_propertys(self, provider, resource_name, data): - ''' - todo 特殊值/约定规则值处理 - :param provider: - :param resource_name: - :param data: - :return: - ''' - - configer = ResourceConfiger() - resource_columns, resource_keys_config = configer.conductor_apply_property(provider=provider, - resource_name=resource_name, - resource_data=data) - - - resource_values_config = self.values_config(provider, resource_name) - - resource_columns = {} - logger.debug("start revert value ....") - for key, value in data.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - # value = convert_value(value, _values_configs.get(value)) - value = ReverseProperty.format_value(value, _values_configs.get(value)) - else: - logger.debug("key: %s value config is null, skip..." % key) - - resource_columns[key] = value - - return resource_columns - diff --git a/apps/api/conductor/source_data_conductor.py b/apps/api/conductor/source_data_conductor.py deleted file mode 100644 index c5a50094..00000000 --- a/apps/api/conductor/source_data_conductor.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import copy -import json -import traceback -from lib.logs import logger -from apps.api.conductor.type_format import TypeFormat -from apps.api.conductor.model_format import ModelFormat - -client = ModelFormat - - -def _validate_query_data(provider, key, value, define, resource_value_config, resource_name): - ''' - - - :param key: - :param value: - :param define: string or json - { - "type":"string", - "convert":"access_key", - "allow_null":1, - "default":"", - "define":{ - } - } - :return: - ''' - - if not define: - return {} - - if isinstance(define, basestring): - if define.strip() == '-' or not define.strip(): - return {} - - key = define or key - else: - value = client.format_query_value(key, value, define) - if value: - value = client.convert_apply_value(value, resource_value_config) - value = client.format_type(value, type=define.get("type", "string")) - if isinstance(value, list): - # for list after format, may need revert value - value = client.convert_apply_value(value, resource_value_config) - value = client.hint_apply_infos(provider, value, define, resource_name) - - if define.get("convert"): - key = define.get("convert") or key - - if value: - return {key: value} - elif isinstance(value, (int, bool)): - return {key: value} - else: - return {} - - -def query_data_builder(provider, datas, defines, resource_values_config, resource_name): - ''' - - { - "type":"string", - "convert":"access_key", - "allow_null":1, - "default":"", - "hint":"$resource.vpc/$resource", - "define":{ - } - } - :param provider: - :param datas: - :param defines: - :param resource_values_config: - :return: - ''' - result = {} - logger.info("query_builder ... ") - - for key, define in defines.items(): - # 依据定义字段转换,只转换defines中的字段,检查必要字段的传入,未定义字段移除 - if isinstance(define, dict) and define.get("define"): - if datas.get(key): - value = TypeFormat.f_dict(datas.get(key)) - else: - value = query_data_builder(provider=provider, - datas=datas, defines=define.get("define"), - resource_values_config=resource_values_config, - resource_name=resource_name) - if value: - result[key] = value - else: - _t = _validate_query_data(provider=provider, key=key, - value=datas.get(key), define=define, - resource_value_config=resource_values_config.get(key), - resource_name=resource_name) - if _t: - result.update(_t) - - return result - - -def query_return_builder(data, defines, results): - add_columns = {} - for key, define in defines.items(): - if isinstance(define, dict): - if define.get("return") in [True, 1, '1', 'true', 'True']: - add_columns[key] = data.get(key) - - if isinstance(results, list): - res = [] - for result in results: - if isinstance(result, dict): - x_adder_col = copy.deepcopy(add_columns) - x_adder_col.update(result) - res.append(x_adder_col) - else: - res.append(result) - return res - elif isinstance(results, dict): - add_columns.update(results) - return add_columns - else: - return results diff --git a/apps/api/conductor/source_output_conductor.py b/apps/api/conductor/source_output_conductor.py deleted file mode 100644 index 9f199195..00000000 --- a/apps/api/conductor/source_output_conductor.py +++ /dev/null @@ -1,304 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import re -import json -import traceback -from lib.logs import logger -from apps.api.conductor.type_format import TypeFormat -from apps.api.conductor.model_format import ModelFormat -from apps.api.conductor.value_conductor import ValueConfigerConductor - - -class SourceOuterReader(object): - @staticmethod - def format_argument(key, data): - if not data: - return "" - if isinstance(data, dict): - return data - elif isinstance(data, basestring): - data = data.strip() - if data.startswith("{"): - data = TypeFormat.f_dict(data) - - return data - else: - raise ValueError("key: %s 应为json或string" % key) - - @staticmethod - def is_null_dict(data): - count = 0 - for _, x_value in data.items(): - if not x_value: - count += 1 - if count == len(data): - logger.info("out data columns is null, skip ...") - return True - - return False - - @staticmethod - def skip_empty_dict(datas): - result = [] - for out_data in datas: - if out_data: - if isinstance(out_data, dict) and (not SourceOuterReader.is_null_dict(out_data)): - result.append(out_data) - elif isinstance(out_data, (basestring, bool, int, float)): - result.append(out_data) - - return result - - @staticmethod - def eval_line(line, column): - def line_pointer(data): - try: - data = data[len("$line"):] - nums = re.findall('\d+', data) - if nums: - num = nums[0] - split_point = data.replace(num) - return split_point, int(num) - else: - return None, None - except: - raise ValueError("$line define error, use: split + [num], example: $line / $line#2") - - if column == "$line": - return line - elif column.startswith("$line"): - s_split, point = line_pointer(column) - if s_split: - try: - return line.split(s_split)[point] - except: - raise ValueError("$line: %s 不能获取数据, 请检查定义" % column) - else: - return line - else: - logger.info("unknown define %s ,skip ..." % line) - return "" - - @staticmethod - def fetch_property(provider, key, data, define_column, resource_value_config, resource_name): - ''' - - 提取 output 数据字段值 - :param provider: - :param data: - :param define_columns: - string: {"desc": "$line"} - dict: {"name": "name", "dns": {"type": "list", "convert": "dns_info"}} - :param resource_value_config: - :return: - ''' - - def _f_string_property_(data, key, column, resource_value_config): - ''' - #处理data为string时字段提取 - {"desc": "$line", "port": "$line#2"} - :param data: - :param key: - :param columns: - :param resource_values_config: - :return: - ''' - - x_value = "" - if column.startswith("$"): - x_value = SourceOuterReader.eval_line(line=data, column=column) - if x_value: - x_value = resource_value_config.get(x_value) or x_value - elif column == "-" or not column: - return {} - - return {key: x_value} - - def _f_fetch_property(data, define): - if "." in define: - _keys = define.split(".") - tmp = data - for x_key in _keys: - try: - tmp = tmp[int(x_key)] - except: - tmp = tmp.get(x_key) - - x_data = tmp - else: - x_data = data.get(define) or "" - - return x_data - - def _f_dict_property_(provider, data, key, define, resource_value_config, resource_name): - ''' - for dict - :param data: - :param define_columns: - {"name": "name", "dns": {"type": "list", "convert": "dns_info"}} - :param resource_value_config: - :return: - ''' - - if not define: - logger.info("key: %s define is empty, skip it .." % key) - return {} - - if isinstance(define, basestring): - if define == '-' or not define.strip(): - logger.info("key: %s define ignore, skip it .." % key) - return {} - - value = _f_fetch_property(data, define) - value = ValueConfigerConductor.outer_value(value, resource_value_config) - return {key: value} - else: - to_column = define.get("convert") or key - value = _f_fetch_property(data, to_column) - value = ModelFormat.format_type(value, type=define.get("type", "string")) - value = ValueConfigerConductor.outer_value(value, resource_value_config) - - # for hint 转换为资产id等信息 - value, add_info = ModelFormat.hint_outer_infos(provider, value, define, resource_name) - add_info[key] = value - return add_info - - if isinstance(data, basestring): - try: - return _f_string_property_(data=data, key=key, - column=define_column, - resource_value_config=resource_value_config) - except Exception, e: - raise e - - return _f_dict_property_(provider=provider, data=data, - key=key, define=define_column, - resource_value_config=resource_value_config, - resource_name=resource_name) - - -def source_object_outer(datas, columns): - if len(columns) == 0: - c_data = [] - if isinstance(datas, list): - for data in datas: - if isinstance(data, list): - c_data += data - else: - c_data.append(data) - elif isinstance(datas, dict): - c_data.append(datas) - else: - logger.info("data is not list/json may error....") - c_data.append(datas) - - return SourceOuterReader.skip_empty_dict(c_data) - - column = columns.pop(0) - if isinstance(datas, list): - x_data = [] - for data in datas: - try: - x_data.append(data.get(column)) if data.get(column) else None - except: - raise ValueError("can not fetch property: %s" % column) - - return source_object_outer(x_data, columns) - elif isinstance(datas, dict): - return source_object_outer(datas.get(column), columns) - else: - logger.info("data is not dict/list, no columns %s filter, skip.." % column) - return datas - - -def _data_attr_(result): - _data = result.get("resources")[0] - _instances = _data.get("instances")[0] - return _instances.get("attributes") - - -def _adder_property(result, key, define): - x_tmp = [] - for t_data in result: - if isinstance(t_data, dict): - t_data[define.get("property")] = key - elif isinstance(t_data, basestring): - # 对于获取的数据是字符串类型时, 需要添加property字段, - # 则先将数据放入x_Origin_line, 由后续的字段提取进行特殊处理 - t_data = {define.get("property"): key, "x_Origin_line": t_data} - else: - logger.info("_adder_property %s is not string or dict, skip add property" % key) - - x_tmp.append(t_data) - - return x_tmp - - -def read_source_output(result, data_source_argument): - try: - result_columns = [] - _attributes = _data_attr_(result) - - if not data_source_argument: - return source_object_outer(datas=_attributes, columns=[]) - - if isinstance(data_source_argument, basestring): - result_columns = source_object_outer(datas=_attributes, columns=data_source_argument.split(".")) - elif isinstance(data_source_argument, dict): - # 多个字段提取定义 {"engree": {"property": "type", "attributes": "engress"}} - for key, define in data_source_argument.items(): - if isinstance(define, basestring): - result_columns = source_object_outer(datas=_attributes, columns=define.split(".")) - elif isinstance(define, dict): - col_defines = source_object_outer(datas=_attributes, - columns=define.get("attributes", "").split(".")) - - if define.get("property"): - col_defines = _adder_property(result=col_defines, key=key, define=define) - result_columns += col_defines - else: - raise ValueError("data source argument 配置异常应为 string或json:key-value " - "或 key - {'property': 'xxx', 'attributes': ''}") - - return result_columns - except: - logger.info(traceback.format_exc()) - raise ValueError("query remote source failed, result read faild") - - -def read_outer_property(provider, result, defines, resource_values_config, resouce_name): - logger.debug("data source output outer .... ") - - # 处理x_Origin_line - x_Origin_line = result.pop("x_Origin_line", None) if isinstance(result, dict) else None - if x_Origin_line: - for key, define in defines.items(): - _t = SourceOuterReader.fetch_property(provider, key, result, define, - resource_values_config.get(key), - resouce_name) - result.update(_t) - - return result - - res = {} - for key, define in defines.items(): - if isinstance(define, dict) and define.get("define"): - if result.get(key): - value = TypeFormat.f_dict(result.get(key)) - else: - value = read_outer_property(provider=provider, result=result, - defines=define.get("define"), - resource_values_config=resource_values_config, - resouce_name=resouce_name) - if value: - res[key] = value - - else: - _t = SourceOuterReader.fetch_property(provider, key, result, define, - resource_values_config.get(key), - resouce_name) - if _t: - res.update(_t) - - return res diff --git a/apps/api/conductor/type_format.py b/apps/api/conductor/type_format.py deleted file mode 100644 index 2a04b565..00000000 --- a/apps/api/conductor/type_format.py +++ /dev/null @@ -1,150 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger - - -class TypeFormat(object): - @staticmethod - def f_dict(value): - ''' - - :param value: - :return: - ''' - - def str_dict(value): - try: - return json.loads(value) - except: - if value.startswith("{"): - return eval(value) - if value.startswith("["): - return eval(value) - - raise ValueError() - - if isinstance(value, dict): - return value - else: - try: - return str_dict(value) - except: - logger.info(traceback.format_exc()) - logger.info("key: %s, data: %s may is json, but format error") - raise ValueError("%s 不是json" % value) - - @staticmethod - def f_list(value): - ''' - - :param value: - :return: - ''' - if not value: - return [] - def str_list(value): - try: - value = value.replace("[", "").replace("]", "") - return value.split(",") - except: - pass - - try: - return json.loads(value) - except: - return eval(value) - - try: - if isinstance(value, basestring): - if value.startswith("["): - value = str_list(value) - elif "," in value: - value = value.split(",") - elif ";" in value: - value = value.split(";") - else: - _v = " ".join(value.split()) - value = _v.split() - elif isinstance(value, list): - return value - else: - raise ValueError() - except: - raise ValueError("%s 不是list类型" % value) - - # 移除空值 - result = [] - for x_value in value: - if x_value: - result.append(x_value) - return result - - @staticmethod - def f_bool(value): - ''' - - :param value: - :return: - ''' - - if isinstance(value, basestring): - if value.lower() == "true": - value = True - elif value.lower() == "false": - value = False - else: - raise ValueError() - elif isinstance(value, int): - if value: - value = True - else: - value = False - else: - raise ValueError("未知的 bool值: %s" % value) - - return value - - @staticmethod - def f_int(value): - ''' - - :param value: - :return: - ''' - - try: - value = int(value) - except: - raise ValueError("%s 不是int" % value) - return value - - @staticmethod - def f_float(value): - ''' - - :param value: - :return: - ''' - - try: - value = float(value) - except: - raise ValueError("%s 不是浮点类型" % value) - return value - - @staticmethod - def f_string(value): - ''' - - :param value: - :return: - ''' - - if isinstance(value, int): - value = str(value) - else: - raise ValueError("%s 不是string" % value) - return value diff --git a/apps/api/conductor/valueConfiger.py b/apps/api/conductor/valueConfiger.py deleted file mode 100644 index 8e1057a0..00000000 --- a/apps/api/conductor/valueConfiger.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from apps.common.convert_keys import convert_value -from apps.background.resource.configr.value_config import ValueConfigObject - - -class ValueConductor(object): - def values_config(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :return: - ''' - - return ValueConfigObject().resource_value_configs(provider, resource_name) - - def conductor_apply_values(self, provider, resource_name, data): - ''' - todo 特殊值/约定规则值处理 - :param provider: - :param resource_name: - :param data: - :return: - ''' - - resource_values_config = self.values_config(provider, resource_name) - - resource_columns = {} - logger.debug("start revert value ....") - for key, value in data.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - value = convert_value(value, _values_configs.get(value)) - else: - logger.debug("key: %s value config is null, skip..." % key) - - resource_columns[key] = value - - return resource_columns diff --git a/apps/api/conductor/valueReverse.py b/apps/api/conductor/valueReverse.py deleted file mode 100644 index feb8594e..00000000 --- a/apps/api/conductor/valueReverse.py +++ /dev/null @@ -1,77 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from apps.common.convert_keys import convert_value -from apps.common.reverse import Reverse -from apps.common.reverse_convert_keys import ReverseProperty -from apps.background.resource.configr.value_config import ValueConfigObject -from .provider import ProviderConductor -from .region import RegionConductor - - -class ValueResetConductor(object): - def values_config(self, provider, resource_name): - ''' - - :param provider: - :param resource_name: - :return: - ''' - - return ValueConfigObject().resource_value_configs(provider, resource_name) - - def reset_values(self, provider, resource_name, data): - ''' - todo 特殊值/约定规则值处理 - :param provider: - :param resource_name: - :param data: - :return: - ''' - - resource_values_config = self.values_config(provider, resource_name) - - resource_columns = {} - logger.debug("start revert value ....") - for key, value in data.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - # value = convert_value(value, _values_configs.get(value)) - x_value = ReverseProperty.format_value(value, _values_configs.get(value)) - - if x_value != value: - value = x_value - else: - for m_key, m_value in _values_configs.items(): - if m_value == value: - value = m_key - else: - logger.debug("key: %s value config is null, skip..." % key) - - resource_columns[key] = value - - if "zone" in data.keys(): - zone = ProviderConductor().zone_reverse_info(provider, zone=data["zone"]) - zone = RegionConductor().zone_reverse_info(provider, zone=zone) - logger.info("find zone %s" % zone) - resource_columns["zone"] = zone - resource_columns["x_ora_zone"] = zone - - if "peer_region" in data.keys(): - peer_region = ProviderConductor().region_reverse_info(provider, region=data["peer_region"]) - peer_region = RegionConductor().region_reverse_info(provider, region=peer_region) - logger.info("find region %s" % peer_region) - resource_columns["peer_region"] = peer_region - - if "region" in data.keys(): - peer_region = ProviderConductor().region_reverse_info(provider, region=data["region"]) - peer_region = RegionConductor().region_reverse_info(provider, region=peer_region) - logger.info("find region %s" % peer_region) - resource_columns["region"] = peer_region - - return resource_columns diff --git a/apps/api/conductor/value_conductor.py b/apps/api/conductor/value_conductor.py deleted file mode 100644 index b4e02452..00000000 --- a/apps/api/conductor/value_conductor.py +++ /dev/null @@ -1,39 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from apps.api.conductor.model_format import ModelFormat - - -class ValueConfigerConductor(object): - @staticmethod - def terraform_value(value, resource_value_config): - if value: - x_value = resource_value_config.get(value) - if isinstance(x_value, dict): - value = x_value.get("value") or value - value = ModelFormat.format_type(value, type=x_value.get("type")) - - return value - - @staticmethod - def outer_value(value, resource_value_config): - if value is None or value == "": - value = "" - if not resource_value_config: - return value - - if value or value == 0 or value is False: - for x_value, y_value in resource_value_config.items(): - if isinstance(y_value, basestring): - if str(y_value) == str(value): - value = x_value - break - else: - if str(y_value.get("value")) == str(value): - value = x_value - break - - return value diff --git a/apps/api/configer/commonkey.py b/apps/api/configer/commonkey.py deleted file mode 100644 index 7198d268..00000000 --- a/apps/api/configer/commonkey.py +++ /dev/null @@ -1,7 +0,0 @@ -# coding: utf-8 - -from apps.background.resource.configr.commonkey import CommonKeyObject - - -class CommonKeyApi(object): - pass diff --git a/apps/api/configer/provider.py b/apps/api/configer/provider.py deleted file mode 100644 index 7f3ef0c1..00000000 --- a/apps/api/configer/provider.py +++ /dev/null @@ -1,179 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -from core import local_exceptions -from lib.command import command -from lib.logs import logger -from lib.encrypt_helper import encrypt_str -from lib.encrypt_helper import decrypt_str -from wecube_plugins_terraform.settings import BASE_DIR -from wecube_plugins_terraform.settings import TERRAFORM_BASE_PATH -from wecube_plugins_terraform.settings import TERRFORM_BIN_PATH -from apps.background.lib.commander.terraform import TerraformDriver -from apps.background.resource.configr.provider import ProviderObject -from apps.background.resource.configr.value_config import ValueConfigObject -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from apps.common.reverse_convert_keys import ReverseProperty - -if not os.path.exists(TERRAFORM_BASE_PATH): - os.makedirs(TERRAFORM_BASE_PATH) - - -class ProviderApi(object): - def region_info(self, provider, region): - ''' - 转换region 信息 - :param provider: provider name - :param region: - :return: - ''' - - data = ValueConfigObject().query_one(where_data={"provider": provider, - "resource": "region"}) - # if not data: - # raise local_exceptions.RequestValidateError("config region 未进行定义") - - _config = data.get("value_config") or {} - return convert_value(region, _config.get(region)) - - def zone_info(self, provider, zone): - ''' - - :param provider: provider name - :param region: - :return: - ''' - data = ValueConfigObject().query_one(where_data={"provider": provider, - "resource": "zone"}) - # if not data: - # raise local_exceptions.RequestValidateError("zone 未进行定义") - - _config = data.get("value_config") or {} - return convert_value(zone, _config.get(zone)) - - def region_reverse_info(self, provider, region): - ''' - - :param provider: provider name - :param region: - :return: - ''' - data = ValueConfigObject().query_one(where_data={"provider": provider, - "resource": "region"}) - if not data: - return region - - _config = data["value_config"] - - return ReverseProperty.format_value(region, _config) - - def zone_reverse_info(self, provider, zone): - ''' - - :param provider: provider name - :param region: - :return: - ''' - data = ValueConfigObject().query_one(where_data={"provider": provider, - "resource": "zone"}) - if not data: - return zone - - _config = data["value_config"] - - return ReverseProperty.format_value(zone, _config) - - # return convert_value(zone, _config.get(zone)) - - def init_provider(self, provider): - ''' - - :param provider: provider name - :return: - ''' - provider_path = os.path.join(TERRAFORM_BASE_PATH, provider) - if not os.path.exists(provider_path): - os.makedirs(provider_path) - - return TerraformDriver(terraform_path=TERRFORM_BIN_PATH, - workdir=provider_path).init(provider_path) - - def _generate_info(self, provider, region, data): - ''' - - :param provider: provider name - :param region: - :param data: other columns - :return: - ''' - - extend_info = data.get("extend_info", {}) - provider_property = data.get("provider_property", {}) - - region = self.region_info(provider, region) - - provider_info = {"region": region} - for key in ["secret_id", "secret_key"]: - if data.get(key): - provider_info[key] = data.get(key) - - provider_info.update(extend_info) - provider_columns = convert_keys(provider_info, defines=provider_property) - - provider_data = { - "provider": { - provider: provider_columns - } - } - - return provider_data - - def decrypt_key(self, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - def provider_info(self, provider_id, region, provider_data=None): - ''' - - :param provider_id: provider id - :param region: - :param provider_data: provider object - :return: - ''' - if not provider_data: - provider_data = ProviderObject().provider_object(provider_id) - provider_data["secret_id"] = self.decrypt_key(provider_data.get("secret_id")) - provider_data["secret_key"] = self.decrypt_key(provider_data.get("secret_key")) - - if not provider_data.get("is_init"): - raise local_exceptions.ResourceConfigError("provider 未初始化,请重新初始化") - - return provider_data, self._generate_info(provider_data["name"], region, provider_data) - - def create_provider_workspace(self, provider): - ''' - - :param provider: provider name - :return: - ''' - provider_path = os.path.join(TERRAFORM_BASE_PATH, provider) - provider_version = os.path.join(provider_path, "versions.tf") - - if not os.path.exists(provider_path): - os.makedirs(provider_path) - - if not os.path.exists(provider_version): - _version_path = os.path.join(BASE_DIR, "plugins/%s/versions.tf" % provider) - if os.path.exists(_version_path): - command(cmd="cp %s %s" % (_version_path, provider_path)) - else: - logger.info("file: %s not found" % _version_path) - - return True diff --git a/apps/api/configer/provider_secret.py b/apps/api/configer/provider_secret.py deleted file mode 100644 index 73d3eb3a..00000000 --- a/apps/api/configer/provider_secret.py +++ /dev/null @@ -1,52 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from core import local_exceptions -from lib.logs import logger -from lib.encrypt_helper import encrypt_str -from lib.encrypt_helper import decrypt_str -from apps.background.resource.configr.provider_secret import ProviderSecretObject - - -class SecretApi(object): - def decrypt_key(self, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - def secret_info(self, provider, name, region): - ''' - - :param provider: - :param name: - :return: - ''' - - data = ProviderSecretObject().query_one(where_data={"provider": provider, "name": name}) - if not data: - logger.info("provider %s, name %s is null, return skip ..." % (provider, name)) - return {} - else: - define_region = data.get("region", "") or "" - if define_region: - if region not in define_region.split(","): - raise ValueError("secret : %s define at %s not apply for region: %s" % (name, - define_region, region)) - try: - _info = self.decrypt_key(data.get("secret_info")) - if _info: - logger.debug("found secret: %s" % name) - return json.loads(_info) - else: - logger.info("secret name %s info is null" % name) - return {} - except: - logger.info(traceback.format_exc()) - logger.info("secret name %s decrypt secret failed..." % name) - return {} diff --git a/apps/api/configer/region.py b/apps/api/configer/region.py deleted file mode 100644 index 7384f0aa..00000000 --- a/apps/api/configer/region.py +++ /dev/null @@ -1,158 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import local_exceptions -from apps.background.resource.configr.region import RegionObject -from apps.background.resource.configr.region import ZoneObject - - -class RegionApi(object): - def region_asset(self, region_id): - ''' - - :param region_id: - :return: - ''' - - data = RegionObject().region_object(region_id) - return data["asset_id"], data - - def provider_region_asset(self, provider, region_id): - ''' - - :param region_id: - :return: - ''' - - client = RegionObject() - data = client.query_one(where_data={"provider": provider, "id": region_id}) - if not data: - data = client.query_one(where_data={"provider": provider, "name": region_id}) - if not data: - data = client.query_one(where_data={"provider": provider, "asset_id": region_id}) - - if not data: - raise local_exceptions.ResourceValidateError("region", "region %s 未注册" % region_id) - - return data["asset_id"], data - - def region_objectid(self, asset_id, provider=None): - ''' - - :param asset_id: - :param provider: - :return: - ''' - - data = RegionObject().region_asset_object(asset_id, provider) - if data: - return data["id"] - return asset_id - - -class ZoneApi(object): - def zone_asset(self, zone_id): - ''' - - :param region_id: - :return: - ''' - - data = ZoneObject().zone_object(zone_id) - return data["asset_id"] - - def zone_asset_data(self, zone_id): - ''' - - :param region_id: - :return: - ''' - - data = ZoneObject().zone_object(zone_id) - return data["asset_id"], data - - def provider_zone_object(self, provider, region, zone_id): - ''' - - :param provider: - :param region: - :param zone_id: - :return: - ''' - - template = {"provider": provider} - if region: - template["region"] = region - - where_data = {"id": zone_id} - where_data.update(template) - data = ZoneObject().query_one(where_data=where_data) - if not data: - where_data = {"name": zone_id} - where_data.update(template) - data = ZoneObject().query_one(where_data=where_data) - if not data: - where_data = {"asset_id": zone_id} - where_data.update(template) - data = ZoneObject().query_one(where_data=where_data) - - if not data: - raise local_exceptions.ResourceValidateError("zone", "zone %s 未注册" % zone_id) - - return data["asset_id"], data - - def zone_objectid(self, asset_id, provider=None): - ''' - - :param asset_id: - :param provider: - :return: - ''' - - data = ZoneObject().zone_asset(asset_id, provider) - if data: - return data["id"] - - return asset_id - - def zone_region_ids(self, region, provider=None): - ''' - - :param region: - :param provider: - :return: - ''' - - where_data = {"region": region} - if provider: - where_data["provider"] = region - - count, data = ZoneObject().list(filters=where_data) - mapping = {} - - for x_data in data: - asset_id = x_data["asset_id"] - mapping[asset_id] = x_data["id"] - - return mapping - - def region_zones(self, region, provider=None): - ''' - - :param region: - :param provider: - :return: - ''' - - where_data = {"region": region} - # if provider: - # where_data["provider"] = region - - count, data = ZoneObject().list(filters=where_data) - mapping = [] - - for x_data in data: - mapping.append(x_data["id"]) - mapping.append(x_data["asset_id"]) - - return mapping diff --git a/apps/api/configer/resource.py b/apps/api/configer/resource.py deleted file mode 100644 index 85e36804..00000000 --- a/apps/api/configer/resource.py +++ /dev/null @@ -1,13 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import datetime -import json -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import ResourceManager -from apps.background.resource.configr.resource import ResourceObject - - -class ResourceApi(object): - pass diff --git a/apps/api/configer/value_config.py b/apps/api/configer/value_config.py deleted file mode 100644 index 9a335141..00000000 --- a/apps/api/configer/value_config.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding: utf-8 - -import datetime -import json -from lib.uuid_util import get_uuid -from apps.background.resource.configr.value_config import ValueConfigObject - - -class ValueConfigApi(object): - pass diff --git a/apps/api/database/kvstore/kvstore.py b/apps/api/database/kvstore/kvstore.py deleted file mode 100644 index 78f20718..00000000 --- a/apps/api/database/kvstore/kvstore.py +++ /dev/null @@ -1,141 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.vm.instance_type import InstanceTypeObject -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class KvStoreApi(ApiBase): - def __init__(self): - super(KvStoreApi, self).__init__() - self.resource_name = "kvstore" - self.resource_workspace = "kvstore" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _subnet_status = define_relations_key("subnet_id", subnet_id, resource_property.get("subnet_id")) - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _sg_status = define_relations_key("security_group_id", sg_id, resource_property.get("security_group_id")) - - ext_info = {} - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id"), - "security_group_id": create_data.get("security_group_id")} - - password = create_data.get("password") - x_create_data = {"name": create_data.get("name"), - "engine": self.resource_name, "zone": zone, - "version": create_data.get("version"), - "port": create_data.get("port"), - "charge_type": create_data.get("charge_type"), - "instance_type": create_data.get("instance_type") - } - - if password: - x_create_data["password"] = password - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - return None, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - - origin_type, instance_type_data = InstanceTypeObject().convert_resource_id(provider_object.get("id"), - create_data.get("instance_type")) - - x_create_data["instance_type"] = origin_type - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - -class KvStoreBackendApi(ApiBackendBase): - def __init__(self): - super(KvStoreBackendApi, self).__init__() - self.resource_name = "kvstore" - self.resource_workspace = "kvstore" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/kvstore/kvstore_account.py b/apps/api/database/kvstore/kvstore_account.py deleted file mode 100644 index 58c694a8..00000000 --- a/apps/api/database/kvstore/kvstore_account.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class KvAccountApi(ApiBase): - def __init__(self): - super(KvAccountApi, self).__init__() - self.resource_name = "kvstore_account" - self.resource_workspace = "kvstore_account" - self.relation_resource = "kvstore" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - kvstore_id = create_data.get("kvstore_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _kv_status = define_relations_key("kvstore_id", kvstore_id, resource_property.get("kvstore_id")) - - ext_info = {} - if kvstore_id and (not _kv_status): - ext_info["kvstore_id"] = CrsObject("kvstore").object_resource_id(kvstore_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"kvstore_id": create_data.get("kvstore_id")} - - x_create_data = {"username": create_data.get("username"), - "password": create_data.get("password")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("kvstore_id") - return owner_id, None - - -class KvAccountBackendApi(ApiBackendBase): - def __init__(self): - super(KvAccountBackendApi, self).__init__() - self.resource_name = "kvstore_account" - self.resource_workspace = "kvstore_account" - self.relation_resource = "kvstore" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/kvstore/kvstore_backup.py b/apps/api/database/kvstore/kvstore_backup.py deleted file mode 100644 index fa358d89..00000000 --- a/apps/api/database/kvstore/kvstore_backup.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class KvBackupApi(ApiBase): - def __init__(self): - super(KvBackupApi, self).__init__() - self.resource_name = "kvstore_backup" - self.resource_workspace = "kvstore_backup" - self.relation_resource = "kvstore" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - kvstore_id = create_data.get("kvstore_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _kv_status = define_relations_key("kvstore_id", kvstore_id, resource_property.get("kvstore_id")) - - ext_info = {} - if kvstore_id and (not _kv_status): - ext_info["kvstore_id"] = CrsObject("kvstore").object_resource_id(kvstore_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"kvstore_id": create_data.get("kvstore_id")} - - x_create_data = {"backup_period": create_data.get("backup_period"), - "backup_time": create_data.get("backup_time")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("kvstore_id") - return owner_id, None - - -class KvBackupBackendApi(ApiBackendBase): - def __init__(self): - super(KvBackupBackendApi, self).__init__() - self.resource_name = "kvstore_backup" - self.resource_workspace = "kvstore_backup" - self.relation_resource = "kvstore" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/kvstore/memcached.py b/apps/api/database/kvstore/memcached.py deleted file mode 100644 index 0d827d0a..00000000 --- a/apps/api/database/kvstore/memcached.py +++ /dev/null @@ -1,70 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from .kvstore import KvStoreApi -from .kvstore import KvStoreBackendApi -from .kvstore_backup import KvBackupApi -from .kvstore_backup import KvBackupBackendApi -from apps.background.resource.resource_base import CrsObject - - -class MemcachedApi(KvStoreApi): - def __init__(self): - super(MemcachedApi, self).__init__() - self.resource_name = "memcached" - self.resource_workspace = "memcached" - self._flush_resobj() - self.resource_keys_config = None - - -class MemcachedBackendApi(KvStoreBackendApi): - def __init__(self): - super(MemcachedBackendApi, self).__init__() - self.resource_name = "memcached" - self.resource_workspace = "memcached" - self._flush_resobj() - self.resource_keys_config = None - - -class MemcachedBackupApi(KvBackupApi): - def __init__(self): - super(MemcachedBackupApi, self).__init__() - self.resource_name = "memcached_backup" - self.resource_workspace = "memcached_backup" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - kvstore_id = create_data.get("memcached_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _kv_status = define_relations_key("memcached_id", kvstore_id, resource_property.get("memcached_id")) - - ext_info = {} - if kvstore_id and (not _kv_status): - ext_info["memcached_id"] = CrsObject("memcached").object_resource_id(kvstore_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - -class MemcachedBackupBackendApi(KvBackupBackendApi): - def __init__(self): - super(MemcachedBackupBackendApi, self).__init__() - self.resource_name = "memcached_backup" - self.resource_workspace = "memcached_backup" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/kvstore/redis.py b/apps/api/database/kvstore/redis.py deleted file mode 100644 index 1c401439..00000000 --- a/apps/api/database/kvstore/redis.py +++ /dev/null @@ -1,113 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from .kvstore import KvStoreApi -from .kvstore import KvStoreBackendApi -from .kvstore_backup import KvBackupApi -from .kvstore_backup import KvBackupBackendApi -from .kvstore_account import KvAccountApi -from .kvstore_account import KvAccountBackendApi -from apps.background.resource.resource_base import CrsObject - - -class RedisApi(KvStoreApi): - def __init__(self): - super(RedisApi, self).__init__() - self.resource_name = "redis" - self.resource_workspace = "redis" - self._flush_resobj() - self.resource_keys_config = None - - -class RedisBackendApi(KvStoreBackendApi): - def __init__(self): - super(RedisBackendApi, self).__init__() - self.resource_name = "redis" - self.resource_workspace = "redis" - self._flush_resobj() - self.resource_keys_config = None - - -class RedisBackupApi(KvBackupApi): - def __init__(self): - super(RedisBackupApi, self).__init__() - self.resource_name = "redis_backup" - self.resource_workspace = "redis_backup" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param create_data: - :param is_update: - :return: - ''' - - kvstore_id = create_data.get("redis_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _kv_status = define_relations_key("redis_id", kvstore_id, resource_property.get("redis_id")) - - ext_info = {} - if kvstore_id and (not _kv_status): - ext_info["redis_id"] = CrsObject("redis").object_resource_id(kvstore_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - -class RedisBackupBackendApi(KvBackupBackendApi): - def __init__(self): - super(RedisBackupBackendApi, self).__init__() - self.resource_name = "redis_backup" - self.resource_workspace = "redis_backup" - self._flush_resobj() - self.resource_keys_config = None - - -class RedisAccountApi(KvAccountApi): - def __init__(self): - super(RedisAccountApi, self).__init__() - self.resource_name = "redis_account" - self.resource_workspace = "redis_account" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param create_data: - :param is_update: - :return: - ''' - - kvstore_id = create_data.get("redis_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _kv_status = define_relations_key("redis_id", kvstore_id, resource_property.get("redis_id")) - - ext_info = {} - if kvstore_id and (not _kv_status): - ext_info["redis_id"] = CrsObject("redis").object_resource_id(kvstore_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - -class RedisAccountBackendApi(KvAccountBackendApi): - def __init__(self): - super(RedisAccountBackendApi, self).__init__() - self.resource_name = "redis_account" - self.resource_workspace = "redis_account" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/mysql/account.py b/apps/api/database/mysql/account.py deleted file mode 100644 index ae37dff1..00000000 --- a/apps/api/database/mysql/account.py +++ /dev/null @@ -1,163 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from apps.common.convert_keys import convert_key_only -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class MysqlAccountApi(ApiBase): - def __init__(self): - super(MysqlAccountApi, self).__init__() - self.resource_name = "mysql_account" - self.resource_workspace = "mysql_account" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param mysql_id: - :return: - ''' - - mysql_id = create_data.get("mysql_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _mysql_status = define_relations_key("mysql_id", mysql_id, resource_property.get("mysql_id")) - - ext_info = {} - if mysql_id and (not _mysql_status): - ext_info["mysql_id"] = CrsObject("mysql").object_resource_id(mysql_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"mysql_id": create_data.get("mysql_id")} - - password = create_data.get("password") # or "Terraform.123" - x_create_data = {"password": password, - "name": create_data.get("name")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("mysql_id") - return owner_id, None - - -class MysqlAccountBackendApi(ApiBackendBase): - def __init__(self): - super(MysqlAccountBackendApi, self).__init__() - self.resource_name = "mysql_account" - self.resource_workspace = "mysql_account" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None - - -class MysqlPrivilegeApi(ApiBase): - def __init__(self): - super(MysqlPrivilegeApi, self).__init__() - self.resource_name = "mysql_privilege" - self.resource_workspace = "mysql_privilege" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param mysql_id: - :return: - ''' - mysql_id = create_data.get("mysql_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _mysql_status = define_relations_key("mysql_id", mysql_id, resource_property.get("mysql_id")) - - ext_info = {} - if mysql_id and (not _mysql_status): - ext_info["mysql_id"] = CrsObject("mysql").object_resource_id(mysql_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def format_privilege(self, provider, database, privileges): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - resource_values_config = self.values_config(provider) - - _columns_status = define_relations_key("database_columns", "00000", resource_property.get("database_columns")) - - ext_info = {} - if _columns_status: - ext_info["database"] = database - ext_info["privileges"] = privileges - else: - tmp_dict = {"database": database, "privileges": privileges} - resource_columns = {} - for key, value in tmp_dict.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - value = convert_value(value, _values_configs.get(value)) - - resource_columns[key] = value - - resource_columns = convert_keys(resource_columns, defines=resource_property, is_update=True) - database_columns = convert_key_only("database_columns", - define=resource_property.get("database_columns", "database_columns")) - - ext_info = {database_columns: [resource_columns]} - - logger.info("format_privilege add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"mysql_id": create_data.get("mysql_id")} - - database = create_data.get("database") - privileges = create_data.get("privileges") - x_create_data = {"username": create_data.get("username")} - - x_create_data.update(self.format_privilege(provider=kwargs.get("provider"), - database=database, - privileges=privileges)) - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("mysql_id") - return owner_id, None - - -class MysqlPrivilegeBackendApi(ApiBackendBase): - def __init__(self): - super(MysqlPrivilegeBackendApi, self).__init__() - self.resource_name = "mysql_privilege" - self.resource_workspace = "mysql_privilege" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/mysql/backup.py b/apps/api/database/mysql/backup.py deleted file mode 100644 index 1010b70c..00000000 --- a/apps/api/database/mysql/backup.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class MysqlBackupApi(ApiBase): - def __init__(self): - super(MysqlBackupApi, self).__init__() - self.resource_name = "mysql_backup" - self.resource_workspace = "mysql_backup" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param mysql_id: - :return: - ''' - mysql_id = create_data.get("mysql_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _mysql_status = define_relations_key("mysql_id", mysql_id, resource_property.get("mysql_id")) - - ext_info = {} - if mysql_id and (not _mysql_status): - ext_info["mysql_id"] = CrsObject("mysql").object_resource_id(mysql_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"mysql_id": create_data.get("mysql_id")} - - x_create_data = {"backup_model": create_data.get("backup_model"), - "backup_time": create_data.get("backup_time")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("mysql_id") - return owner_id, None - - -class MysqlBackupBackendApi(ApiBackendBase): - def __init__(self): - super(MysqlBackupBackendApi, self).__init__() - self.resource_name = "mysql_backup" - self.resource_workspace = "mysql_backup" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/mysql/database.py b/apps/api/database/mysql/database.py deleted file mode 100644 index 7f2f15cd..00000000 --- a/apps/api/database/mysql/database.py +++ /dev/null @@ -1,64 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -from core import local_exceptions -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class MysqlDatabaseApi(ApiBase): - def __init__(self): - super(MysqlDatabaseApi, self).__init__() - self.resource_name = "mysql_database" - self.resource_workspace = "mysql_database" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param mysql_id: - :return: - ''' - - mysql_id = create_data.get("mysql_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _mysql_status = define_relations_key("mysql_id", mysql_id, resource_property.get("mysql_id")) - - ext_info = {} - if mysql_id and (not _mysql_status): - ext_info["mysql_id"] = CrsObject("mysql").object_resource_id(mysql_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"mysql_id": create_data.get("mysql_id")} - - x_create_data = {"name": create_data.get("name")} - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("mysql_id") - return owner_id, None - - -class MysqlDatabaseBackendApi(ApiBackendBase): - def __init__(self): - super(MysqlDatabaseBackendApi, self).__init__() - self.resource_name = "mysql_database" - self.resource_workspace = "mysql_database" - self.owner_resource = "mysql" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/mysql/instance.py b/apps/api/database/mysql/instance.py deleted file mode 100644 index 1d92eaa4..00000000 --- a/apps/api/database/mysql/instance.py +++ /dev/null @@ -1,185 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import convert_key_only -from apps.api.configer.provider import ProviderApi -from apps.common.convert_keys import define_relations_key -from apps.background.resource.vm.instance_type import InstanceTypeObject -from apps.api.database.rds.rds import RdsDBApi -from apps.api.database.rds.rds import RdsDBBackendApi -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor - - -class MysqlApi(RdsDBApi): - def __init__(self): - super(MysqlApi, self).__init__() - self.resource_name = "mysql" - self.resource_workspace = "mysql" - self.relation_resource = "subnet" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _subnet_status = define_relations_key("subnet_id", subnet_id, resource_property.get("subnet_id")) - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _sg_status = define_relations_key("security_group_id", sg_id, resource_property.get("security_group_id")) - - ext_info = {} - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def _generate_slave_zone(self, provider, first_slave_zone, second_slave_zone): - create_data = {} - if first_slave_zone: - create_data["first_slave_zone"] = self.zone_info(provider, first_slave_zone) - if second_slave_zone: - create_data["second_slave_zone"] = self.zone_info(provider, second_slave_zone) - - logger.info("_generate_slave_zone format json: %s" % (format_json_dumps(create_data))) - return create_data - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id"), - "security_group_id": create_data.get("security_group_id")} - - password = create_data.get("password") or "Terraform@123" - x_create_data = {"name": create_data.get("name"), - "engine": self.resource_name, "zone": zone, - "version": create_data.get("version"), - "instance_type": create_data.get("instance_type"), - "password": password, - "charge_type": create_data.get("charge_type"), - "user": create_data.get("user"), - "port": create_data.get("port"), - "disk_type": create_data.get("disk_type"), - "disk_size": create_data.get("disk_size")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("mysql_id") - return owner_id, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - - origin_type, instance_type_data = InstanceTypeObject().convert_resource_id(provider_object.get("id"), - create_data.get("instance_type")) - - cpu = instance_type_data.get("cpu") - memory = instance_type_data.get("memory") - kwargs["cpu"] = cpu - kwargs["memory"] = memory - x_create_data["instance_type"] = origin_type - - first_slave_zone = create_data.get("first_slave_zone") - second_slave_zone = create_data.get("second_slave_zone") - - x_create_data.update(self._generate_slave_zone(provider=provider_object["name"], - first_slave_zone=first_slave_zone, - second_slave_zone=second_slave_zone)) - - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - -class MysqlBackendApi(RdsDBBackendApi): - def __init__(self): - super(MysqlBackendApi, self).__init__() - self.resource_name = "mysql" - self.resource_workspace = "mysql" - self.relation_resource = "" - self._flush_resobj() - self.resource_keys_config = None - - # def generate_create_data(self, zone, create_data, **kwargs): - # r_create_data = {"vpc_id": create_data.get("vpc_id"), - # "subnet_id": create_data.get("subnet_id"), - # "security_group_id": create_data.get("security_group_id")} - # - # password = create_data.get("password") or "Terraform@123" - # x_create_data = {"name": create_data.get("name"), - # "engine": self.resource_name, "zone": zone, - # "version": create_data.get("version"), - # "instance_type": create_data.get("instance_type"), - # "password": password, - # "charge_type": create_data.get("charge_type"), - # "user": create_data.get("user"), - # "port": create_data.get("port"), - # "disk_type": create_data.get("disk_type"), - # "disk_size": create_data.get("disk_size")} - # - # return x_create_data, r_create_data diff --git a/apps/api/database/nosql/mogodb.py b/apps/api/database/nosql/mogodb.py deleted file mode 100644 index 8c507fcf..00000000 --- a/apps/api/database/nosql/mogodb.py +++ /dev/null @@ -1,25 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -# from apps.background.resource.database.nosql import MongoDBObject -from apps.api.database.nosql.nosql import NosqlApi -from apps.api.database.nosql.nosql import NosqlBackendApi - - -class MongodbApi(NosqlApi): - def __init__(self): - super(MongodbApi, self).__init__() - self.resource_name = "mongodb" - self.resource_workspace = "mongodb" - self._flush_resobj() - self.resource_keys_config = None - - -class MongodbBackendApi(NosqlBackendApi): - def __init__(self): - super(MongodbBackendApi, self).__init__() - self.resource_name = "mongodb" - self.resource_workspace = "mongodb" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/nosql/nosql.py b/apps/api/database/nosql/nosql.py deleted file mode 100644 index a98b555b..00000000 --- a/apps/api/database/nosql/nosql.py +++ /dev/null @@ -1,143 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import define_relations_key -from apps.background.resource.resource_base import CrsObject -from apps.background.resource.vm.instance_type import InstanceTypeObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class NosqlApi(ApiBase): - def __init__(self): - super(NosqlApi, self).__init__() - self.resource_name = "nosql" - self.resource_workspace = "nosql" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _subnet_status = define_relations_key("subnet_id", subnet_id, resource_property.get("subnet_id")) - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _sg_status = define_relations_key("security_group_id", sg_id, resource_property.get("security_group_id")) - - ext_info = {} - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id"), - "security_group_id": create_data.get("security_group_id")} - - password = create_data.get("password") - x_create_data = {"name": create_data.get("name"), - "engine": self.resource_name, "zone": zone, - "version": create_data.get("version"), - "charge_type": create_data.get("charge_type"), - "instance_type": create_data.get("instance_type"), - "disk_size": create_data.get("disk_size") - } - - if password: - x_create_data["password"] = password - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - return None, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - - origin_type, instance_type_data = InstanceTypeObject().convert_resource_id(provider_object.get("id"), - create_data.get("instance_type")) - - x_create_data["instance_type"] = origin_type - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - -class NosqlBackendApi(ApiBackendBase): - def __init__(self): - super(NosqlBackendApi, self).__init__() - self.resource_name = "nosql" - self.resource_workspace = "nosql" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/rds/mariadb.py b/apps/api/database/rds/mariadb.py deleted file mode 100644 index d15ce5c1..00000000 --- a/apps/api/database/rds/mariadb.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.background.resource.database.rds import MariaDBObject -from apps.api.database.rds.rds import RdsDBApi -from apps.api.database.rds.rds import RdsDBBackendApi - - -class MariaDBApi(RdsDBApi): - def __init__(self): - super(MariaDBApi, self).__init__() - self.resource_name = "mariadb" - self.resource_workspace = "mariadb" - self.relation_resource = "subnet" - self._flush_resobj() - self.resource_keys_config = None - - -class MariaDBBackendApi(RdsDBBackendApi): - def __init__(self): - super(MariaDBBackendApi, self).__init__() - self.resource_name = "mariadb" - self.resource_workspace = "mariadb" - self.relation_resource = "subnet" - self._flush_resobj() - self.resource_keys_config = None - diff --git a/apps/api/database/rds/postgresql.py b/apps/api/database/rds/postgresql.py deleted file mode 100644 index 8a0100f5..00000000 --- a/apps/api/database/rds/postgresql.py +++ /dev/null @@ -1,27 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -# from apps.background.resource.database.rds import PostgreSQLObject -from apps.api.database.rds.rds import RdsDBApi -from apps.api.database.rds.rds import RdsDBBackendApi - - -class PostgreSQLApi(RdsDBApi): - def __init__(self): - super(PostgreSQLApi, self).__init__() - self.resource_name = "postgreSQL" - self.resource_workspace = "postgreSQL" - self.relation_resource = "subnet" - self._flush_resobj() - self.resource_keys_config = None - - -class PostgreSQLBackendApi(RdsDBBackendApi): - def __init__(self): - super(PostgreSQLBackendApi, self).__init__() - self.resource_name = "postgreSQL" - self.resource_workspace = "postgreSQL" - self.relation_resource = "subnet" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/database/rds/rds.py b/apps/api/database/rds/rds.py deleted file mode 100644 index 2f919d63..00000000 --- a/apps/api/database/rds/rds.py +++ /dev/null @@ -1,187 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import define_relations_key -from apps.api.configer.provider import ProviderApi -from apps.api.apibase import ApiBase -from apps.background.resource.vm.instance_type import InstanceTypeObject -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class RdsDBApi(ApiBase): - def __init__(self): - super(RdsDBApi, self).__init__() - self.resource_name = "rds" - self.resource_workspace = "rds" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _subnet_status = define_relations_key("subnet_id", subnet_id, resource_property.get("subnet_id")) - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _sg_status = define_relations_key("security_group_id", sg_id, resource_property.get("security_group_id")) - - ext_info = {} - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def zone_info(self, provider, zone): - return ProviderApi().zone_info(provider, zone) - - def _generate_slave_zone(self, provider, first_slave_zone, second_slave_zone): - create_data = {} - if first_slave_zone: - create_data["first_slave_zone"] = self.zone_info(provider, first_slave_zone) - if second_slave_zone: - create_data["second_slave_zone"] = self.zone_info(provider, second_slave_zone) - - logger.info("_generate_slave_zone format json: %s" % (format_json_dumps(create_data))) - return create_data - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id"), - "security_group_id": create_data.get("security_group_id")} - - password = create_data.get("password") or "Terraform@123" - x_create_data = {"name": create_data.get("name"), - "engine": self.resource_name, "zone": zone, - "version": create_data.get("version"), - "instance_type": create_data.get("instance_type"), - "password": password, - "charge_type": create_data.get("charge_type"), - "user": create_data.get("user"), - "port": create_data.get("port"), - "disk_type": create_data.get("disk_type"), - "disk_size": create_data.get("disk_size")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - return None, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - - origin_type, instance_type_data = InstanceTypeObject().convert_resource_id(provider_object.get("id"), - create_data.get("instance_type")) - - cpu = instance_type_data.get("cpu") - memory = instance_type_data.get("memory") - kwargs["cpu"] = cpu - kwargs["memory"] = memory - x_create_data["instance_type"] = origin_type - - first_slave_zone = create_data.get("first_slave_zone") - second_slave_zone = create_data.get("second_slave_zone") - - x_create_data.update(self._generate_slave_zone(provider=provider_object["name"], - first_slave_zone=first_slave_zone, - second_slave_zone=second_slave_zone)) - - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - -class RdsDBBackendApi(ApiBackendBase): - def __init__(self): - super(RdsDBBackendApi, self).__init__() - self.resource_name = "rds" - self.resource_workspace = "rds" - self._flush_resobj() - self.resource_keys_config = None - - # def generate_create_data(self, zone, create_data, **kwargs): - # r_create_data = {"vpc_id": create_data.get("vpc_id"), - # "subnet_id": create_data.get("subnet_id"), - # "security_group_id": create_data.get("security_group_id")} - # - # password = create_data.get("password") or "Terraform@123" - # x_create_data = {"name": create_data.get("name"), - # "engine": self.resource_name, "zone": zone, - # "version": create_data.get("version"), - # "instance_type": create_data.get("instance_type"), - # "password": password, - # "user": create_data.get("user"), - # "port": create_data.get("port"), - # "disk_type": create_data.get("disk_type"), - # "disk_size": create_data.get("disk_size")} - # - # return x_create_data, r_create_data diff --git a/apps/api/database/rds/subnet_group.py b/apps/api/database/rds/subnet_group.py deleted file mode 100644 index 51ecd8c6..00000000 --- a/apps/api/database/rds/subnet_group.py +++ /dev/null @@ -1,80 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -import json -from core import local_exceptions -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.api.configer.provider import ProviderApi -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.common.convert_keys import validate_type -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class SubnetGroupApi(ApiBase): - def __init__(self): - super(SubnetGroupApi, self).__init__() - self.resource_name = "db_subnet_group" - self.resource_workspace = "db_subnet_group" - self.owner_resource = "rds" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param subnet_id: - :return: - ''' - - subnet_id = create_data.get("subnet_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _subnet_status = define_relations_key("subnet_id", subnet_id, resource_property.get("subnet_id")) - - ext_info = {} - if subnet_id and (not _subnet_status): - # ext_info["subnet_id"] = CrsObject("mysql").object_resource_id(subnet_id) - - sg_property = resource_property.get("subnet_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(subnet_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("subnet").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("subnet").object_resource_id(subnet_id) - - ext_info["subnet_id"] = _sg_resource_ids - else: - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"subnet_id": create_data.get("subnet_id")} - - x_create_data = {"name": create_data.get("name")} - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - # owner_id = create_data.get("subnet_id") - return None, None - - -class SubnetGroupBackendApi(ApiBackendBase): - def __init__(self): - super(SubnetGroupBackendApi, self).__init__() - self.resource_name = "db_subnet_group" - self.resource_workspace = "db_subnet_group" - self.owner_resource = "rds" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/lb.py b/apps/api/loadbalance/lb.py deleted file mode 100644 index 780c0a01..00000000 --- a/apps/api/loadbalance/lb.py +++ /dev/null @@ -1,68 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class LBApi(ApiBase): - def __init__(self): - super(LBApi, self).__init__() - self.resource_name = "lb" - self.resource_workspace = "lb" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _subnet_status = define_relations_key("subnet", subnet_id, resource_property.get("subnet_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id")} - create_data = {"network_type": create_data.get("network_type"), - "name": create_data.get("name"), - "charge_type": create_data.get("charge_type")} - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class LBBackendApi(ApiBackendBase): - def __init__(self): - super(LBBackendApi, self).__init__() - self.resource_name = "lb" - self.resource_workspace = "lb" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/lb_attach.py b/apps/api/loadbalance/lb_attach.py deleted file mode 100644 index 080bc7a4..00000000 --- a/apps/api/loadbalance/lb_attach.py +++ /dev/null @@ -1,198 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.uuid_util import get_uuid -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from apps.common.convert_keys import convert_key_only -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class LBAttachApi(ApiBase): - def __init__(self): - super(LBAttachApi, self).__init__() - self.resource_name = "lb_attach" - self.resource_workspace = "lb_attach" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param lb_id: - :param listener_id: - :return: - ''' - lb_id = create_data.get("lb_id") - listener_id = create_data.get("listener_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _ll_status = define_relations_key("listener_id", listener_id, resource_property.get("listener_id")) - _lb_status = define_relations_key("lb_id", lb_id, resource_property.get("lb_id")) - - ext_info = {} - if listener_id and (not _ll_status): - ext_info["listener_id"] = CrsObject("lb_listener").object_resource_id(listener_id) - if lb_id and (not _lb_status): - ext_info["lb_id"] = CrsObject("lb").object_resource_id(lb_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def validate_instance(self, provider, instances): - result = [] - self.resource_info(provider) - resource_values_config = self.values_config(provider) - resource_property = self.resource_keys_config["resource_property"] - - for instance_dict in instances: - if not instance_dict.get("instance_id"): - raise ValueError("instance not permit null") - else: - instance_dict["instance_id"] = CrsObject("instance").object_resource_id( - instance_dict.get("instance_id")) - - resource_columns = {} - for key, value in instance_dict.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - value = convert_value(value, _values_configs.get(value)) - - resource_columns[key] = value - - resource_columns = convert_keys(resource_columns, defines=resource_property, is_update=True) - result.append(resource_columns) - - return result - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"lb_id": create_data.get("lb_id"), - "listener_id": create_data.get("listener_id")} - - backend_servers = create_data.get("backend_servers") - bs = self.validate_instance(provider=kwargs.get("provider"), - instances=backend_servers) - - x_create_data = {"backend_servers": bs} - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def destroy(self, rid): - ''' - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - def _generate_remove_instance(self, rid, provider, define_json, origin_instance_id): - self.resource_info(provider) - - resource_name = self.resource_keys_config["property"] - resource_property = self.resource_keys_config["resource_property"] - - column_server = convert_key_only("backend_servers", - define=resource_property.get("backend_servers", "backend_servers")) - column_instance = convert_key_only("instance_id", - define=resource_property.get("instance_id", "instance_id")) - - _t = define_json["resource"][resource_name] - label_name = self.resource_name + "_" + rid - origin_columns = _t[label_name] - - instances = origin_columns[column_server] - new_instances = [] - for instance in instances: - if instance.get(column_instance) != origin_instance_id: - new_instances.append(instance) - - origin_columns[column_server] = new_instances - - define_json["resource"] = { - resource_name: { - label_name: origin_columns - } - } - logger.info("_generate_remove_instance format json: %s" % (format_json_dumps(define_json))) - return define_json - - def remove_instance(self, rid, instance_id): - ''' - - :param rid: - :param instance_id: - :return: - ''' - - resource_info = self.resource_object.show(rid) - - _filter_instance = {} - if resource_info["lb_id"]: - _filter_instance["lb_id"] = resource_info["lb_id"] - if resource_info["listener_id"]: - _filter_instance["listener_id"] = resource_info["listener_id"] - - _filter_instance["instance_id"] = instance_id - # _attach_status = LBAttachInstanceObject().query_one(where_data=_filter_instance) - # if not _attach_status: - # raise local_exceptions.ResourceValidateError("lb attach instance", "lb %s 未关联实例 %s" % (rid, instance_id)) - _instance_data = CrsObject("instance").ora_show(rid=instance_id) - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - define_json = self._generate_remove_instance(rid, provider=resource_info["provider"], - define_json=resource_info["define_json"], - origin_instance_id=_instance_data["resource_id"]) - - self.write_define(rid, _path, define_json=define_json) - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="detach %s %s failed" % (self.resource_name, rid)) - - count, data = self.resource_object.update(rid, update_data={"define_json": define_json}) - - return count - - -class LBAttachBackendApi(ApiBackendBase): - def __init__(self): - super(LBAttachBackendApi, self).__init__() - self.resource_name = "lb_attach" - self.resource_workspace = "lb_attach" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/lb_ca.py b/apps/api/loadbalance/lb_ca.py deleted file mode 100644 index 4221a11e..00000000 --- a/apps/api/loadbalance/lb_ca.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class LBCAApi(ApiBase): - def __init__(self): - super(LBCAApi, self).__init__() - self.resource_name = "lb_ca" - self.resource_workspace = "lb_ca" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param lb_id: - :param listener_id: - :return: - ''' - return {} - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - - x_create_data = {} - for key in ["ca_certificate", "name"]: - x_create_data[key] = create_data.get(key) - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class LBCABackendApi(ApiBackendBase): - def __init__(self): - super(LBCABackendApi, self).__init__() - self.resource_name = "lb_ca" - self.resource_workspace = "lb_ca" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/lb_certificate.py b/apps/api/loadbalance/lb_certificate.py deleted file mode 100644 index fa6e4c7f..00000000 --- a/apps/api/loadbalance/lb_certificate.py +++ /dev/null @@ -1,45 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class LBCertificateApi(ApiBase): - def __init__(self): - super(LBCertificateApi, self).__init__() - self.resource_name = "lb_certificate" - self.resource_workspace = "lb_certificate" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param lb_id: - :param listener_id: - :return: - ''' - return {} - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - - x_create_data = {} - for key in ["name", "private_key", "public_key"]: - x_create_data[key] = create_data.get(key) - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class LBCertificateBackendApi(ApiBackendBase): - def __init__(self): - super(LBCertificateBackendApi, self).__init__() - self.resource_name = "lb_certificate" - self.resource_workspace = "lb_certificate" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/lb_group.py b/apps/api/loadbalance/lb_group.py deleted file mode 100644 index d7dfb5d5..00000000 --- a/apps/api/loadbalance/lb_group.py +++ /dev/null @@ -1,83 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class LBGroupApi(ApiBase): - def __init__(self): - super(LBGroupApi, self).__init__() - self.resource_name = "lb_server_group" - self.resource_workspace = "lb_server_group" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param lb_id: - :param listener_id: - :return: - ''' - lb_id = create_data.get("lb_id") - instance_id = create_data.get("instance_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _lb_status = define_relations_key("lb_id", lb_id, resource_property.get("lb_id")) - _ins_status = define_relations_key("instance_id", instance_id, - resource_property.get("instance_id"), is_update) - - ext_info = {} - - if lb_id and (not _lb_status): - ext_info["lb_id"] = CrsObject("lb").object_resource_id(lb_id) - if instance_id and (not _ins_status): - sg_property = resource_property.get("instance_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(instance_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("instance").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("instance").object_resource_id(instance_id) - - ext_info["instance_id"] = _sg_resource_ids - else: - ext_info["instance_id"] = CrsObject("instance_id").object_resource_id(instance_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = { - "lb_id": create_data.get("lb_id"), - "instance_id": create_data.get("instance_id") - } - - x_create_data = {} - for key in ["name", "port"]: - x_create_data[key] = create_data.get(key) - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class LBGroupBackendApi(ApiBackendBase): - def __init__(self): - super(LBGroupBackendApi, self).__init__() - self.resource_name = "lb_server_group" - self.resource_workspace = "lb_server_group" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/lb_rule.py b/apps/api/loadbalance/lb_rule.py deleted file mode 100644 index d2357082..00000000 --- a/apps/api/loadbalance/lb_rule.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class LBRuleApi(ApiBase): - def __init__(self): - super(LBRuleApi, self).__init__() - self.resource_name = "lb_rule" - self.resource_workspace = "lb_rule" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param lb_id: - :param listener_id: - :return: - ''' - lb_id = create_data.get("lb_id") - listener_id = create_data.get("listener_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _ll_status = define_relations_key("listener_id", listener_id, resource_property.get("listener_id")) - _lb_status = define_relations_key("lb_id", lb_id, resource_property.get("lb_id")) - _sg_status = define_relations_key("security_group_id", sg_id, - resource_property.get("security_group_id"), is_update) - - ext_info = {} - if listener_id and (not _ll_status): - ext_info["listener_id"] = CrsObject("lb_listener").object_resource_id(listener_id) - if lb_id and (not _lb_status): - ext_info["lb_id"] = CrsObject("lb").object_resource_id(lb_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"lb_id": create_data.get("lb_id"), - "listener_id": create_data.get("listener_id"), - "security_group_id": create_data.get("security_group_id")} - - x_create_data = {} - for key in ["frontend_port", "name", - ]: - x_create_data[key] = create_data.get(key) - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def destroy(self, rid): - ''' - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class LBRuleBackendApi(ApiBackendBase): - def __init__(self): - super(LBRuleBackendApi, self).__init__() - self.resource_name = "lb_rule" - self.resource_workspace = "lb_rule" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/loadbalance/listener.py b/apps/api/loadbalance/listener.py deleted file mode 100644 index ec1ed20a..00000000 --- a/apps/api/loadbalance/listener.py +++ /dev/null @@ -1,96 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class LBListenerApi(ApiBase): - def __init__(self): - super(LBListenerApi, self).__init__() - self.resource_name = "lb_listener" - self.resource_workspace = "lb_listener" - self.owner_resource = "lb" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - lb_id = create_data.get("lb_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _lb_status = define_relations_key("lb_id", lb_id, resource_property.get("lb_id")) - - ext_info = {} - if lb_id and (not _lb_status): - ext_info["lb_id"] = CrsObject(self.owner_resource).object_resource_id(lb_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"lb_id": create_data.get("lb_id")} - - name = create_data.get("name") - protocol = create_data.get("protocol") - port = create_data.get("port") - name = name or "%s_%s" % (protocol, port) - - x_create_data = {"name": name, "port": port, "protocol": protocol, - "backend_port": create_data.get("backend_port"), - "health_check": create_data.get("health_check"), - "health_check_uri": create_data.get("health_check_uri")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("lb_id") - return owner_id, None - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - # todo 校验lb listen是否挂载了后端应用 - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class LBListenerBackendApi(ApiBackendBase): - def __init__(self): - super(LBListenerBackendApi, self).__init__() - self.resource_name = "lb_listener" - self.resource_workspace = "lb_listener" - self.owner_resource = "lb" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/ccn_attach.py b/apps/api/network/ccn_attach.py deleted file mode 100644 index 1083b907..00000000 --- a/apps/api/network/ccn_attach.py +++ /dev/null @@ -1,117 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.api.configer.provider import ProviderApi -from apps.api.apibase import ApiBase -from apps.common.convert_keys import define_relations_key -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class CCNAttachApi(ApiBase): - def __init__(self): - super(CCNAttachApi, self).__init__() - self.resource_name = "ccn_attach" - self.resource_workspace = "ccn_attach" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - vpc_id = create_data.get("vpc_id") - ccn_id = create_data.get("ccn_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("instance_id", vpc_id, resource_property.get("instance_id")) - _ccn_status = define_relations_key("ccn_id", ccn_id, resource_property.get("ccn_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["instance_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if ccn_id and (not _ccn_status): - ext_info["ccn_id"] = CrsObject("ccn").object_resource_id(ccn_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def region_name(self, provider, region): - return ProviderApi().region_info(provider, region) - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("instance_id"), - "ccn_id": create_data.get("ccn_id")} - - instance_type = create_data.get("instance_type") or "VPC" - create_data = {"instance_type": instance_type} - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - - ''' - - :param rid: - :param name: - :param provider_id: - :param ccn_id: - :param instance_id: - :param instance_type: - :param instance_region: - :param region: - :param zone: - :param extend_info: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data) - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - instance_region = create_data.get("instance_region") - if instance_region: - create_data["instance_region"] = self.region_name(provider_object["name"], instance_region) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - -class CCNAttachBackendApi(ApiBackendBase): - def __init__(self): - super(CCNAttachBackendApi, self).__init__() - self.resource_name = "ccn_attach" - self.resource_workspace = "ccn_attach" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/ccn_bandwidth.py b/apps/api/network/ccn_bandwidth.py deleted file mode 100644 index 93353b3c..00000000 --- a/apps/api/network/ccn_bandwidth.py +++ /dev/null @@ -1,115 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.api.apibase import ApiBase -from apps.api.configer.provider import ProviderApi -from apps.common.convert_keys import define_relations_key -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class CCNBandwidthApi(ApiBase): - def __init__(self): - super(CCNBandwidthApi, self).__init__() - self.resource_name = "ccn_bandwidth" - self.resource_workspace = "ccn_bandwidth" - self.owner_resource = "ccn" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - ccn_id = create_data.get("ccn_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _ccn_status = define_relations_key("ccn_id", ccn_id, resource_property.get("ccn_id")) - - ext_info = {} - if ccn_id and (not _ccn_status): - ext_info["ccn_id"] = CrsObject(self.owner_resource).object_resource_id(ccn_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def region_name(self, provider, region): - return ProviderApi().region_info(provider, region) - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"ccn_id": create_data.get("ccn_id")} - create_data = { - "bandwidth": create_data.get("bandwidth"), - "ccn_id": create_data.get("ccn_id") - } - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data) - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - from_region = create_data.get("from_region") - dest_region = create_data.get("dest_region") - if from_region: - x_create_data["from_region"] = self.region_name(provider_object["name"], from_region) - if dest_region: - x_create_data["dest_region"] = self.region_name(provider_object["name"], dest_region) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, **kwargs) - - return count, res - - -class CCNBandwidthBackendApi(ApiBackendBase): - def __init__(self): - super(CCNBandwidthBackendApi, self).__init__() - self.resource_name = "ccn_bandwidth" - self.resource_workspace = "ccn_bandwidth" - self.owner_resource = "ccn" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/connnect_network.py b/apps/api/network/connnect_network.py deleted file mode 100644 index 09954ffd..00000000 --- a/apps/api/network/connnect_network.py +++ /dev/null @@ -1,35 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class CCNApi(ApiBase): - def __init__(self): - super(CCNApi, self).__init__() - self.resource_name = "ccn" - self.resource_workspace = "ccn" - self._flush_resobj() - self.resource_keys_config = None - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - create_data = { - "name": create_data.get("name") - } - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class CCNBackendApi(ApiBackendBase): - def __init__(self): - super(CCNBackendApi, self).__init__() - self.resource_name = "ccn" - self.resource_workspace = "ccn" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/eip.py b/apps/api/network/eip.py deleted file mode 100644 index 01c01d5e..00000000 --- a/apps/api/network/eip.py +++ /dev/null @@ -1,46 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class EipApi(ApiBase): - def __init__(self): - super(EipApi, self).__init__() - self.resource_name = "eip" - self.resource_workspace = "eip" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :return: - ''' - - self.resource_info(provider) - return {} - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - create_data = { - "name": create_data.get("name"), - "charge_type": create_data.get("charge_type") - } - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class EipBackendApi(ApiBackendBase): - def __init__(self): - super(EipBackendApi, self).__init__() - self.resource_name = "eip" - self.resource_workspace = "eip" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/eip_association.py b/apps/api/network/eip_association.py deleted file mode 100644 index 202c2ae2..00000000 --- a/apps/api/network/eip_association.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class EipAssociationApi(ApiBase): - def __init__(self): - super(EipAssociationApi, self).__init__() - self.resource_name = "eip_association" - self.resource_workspace = "eip_association" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param eip_id: - :param instance_id: - :param eni_id: - :return: - ''' - - eip_id = create_data.get("eip_id") - instance_id = create_data.get("instance_id") - eni_id = create_data.get("eni_id") - - # todo 校验instance eni 弹性网卡 - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _eip_status = define_relations_key("eip_id", eip_id, resource_property.get("eip_id")) - _instance_status = define_relations_key("instance_id", instance_id, - resource_property.get("instance_id")) - - ext_info = {} - if eip_id and (not _eip_status): - ext_info["eip_id"] = CrsObject("eip").object_resource_id(eip_id) - if instance_id and (not _instance_status): - ext_info["instance_id"] = CrsObject("instance").object_resource_id(instance_id) - if eni_id: - # 统一不使用eni - pass - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"eip_id": create_data.get("eip_id"), - "instance_id": create_data.get("instance_id"), - "eni_id": create_data.get("eni_id")} - create_data = {"private_ip": create_data.get("private_ip")} - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - -class EipAssociationBackendApi(ApiBackendBase): - def __init__(self): - super(EipAssociationBackendApi, self).__init__() - self.resource_name = "eip_association" - self.resource_workspace = "eip_association" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/ip_group.py b/apps/api/network/ip_group.py deleted file mode 100644 index 066f8753..00000000 --- a/apps/api/network/ip_group.py +++ /dev/null @@ -1,14 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class IpGroupBackendApi(ApiBackendBase): - def __init__(self): - super(IpGroupBackendApi, self).__init__() - self.resource_name = "ipaddress_group" - self.resource_workspace = "ipaddress_group" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/nat_gateway.py b/apps/api/network/nat_gateway.py deleted file mode 100644 index 38445599..00000000 --- a/apps/api/network/nat_gateway.py +++ /dev/null @@ -1,79 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class NatGatewayApi(ApiBase): - def __init__(self): - super(NatGatewayApi, self).__init__() - self.resource_name = "nat" - self.resource_workspace = "nat" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - eip = create_data.get("eip") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _subnet_status = define_relations_key("subnet_id", subnet_id, resource_property.get("subnet_id")) - _eip_status = define_relations_key("eip", eip, resource_property.get("eip")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if eip and (not _eip_status): - # eip 特殊处理 - _eip_ip = eip.split(",") - x_eip = [] - for _ip in _eip_ip: - x = CrsObject("eip").object_resource_id(_ip) - x_eip.append(x) - ext_info["eip"] = x_eip - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id")} - - create_data = {"eip": create_data.get("eip"), - "name": create_data.get("name"), - "bandwidth": create_data.get("bandwidth")} - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("vpc_id") - return owner_id, None - - -class NatGatewayBackendApi(ApiBackendBase): - def __init__(self): - super(NatGatewayBackendApi, self).__init__() - self.resource_name = "nat" - self.resource_workspace = "nat" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/peer_connection.py b/apps/api/network/peer_connection.py deleted file mode 100644 index 4cb3877c..00000000 --- a/apps/api/network/peer_connection.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class PeerConnApi(ApiBase): - def __init__(self): - super(PeerConnApi, self).__init__() - self.resource_name = "peer_connection" - self.resource_workspace = "peer_connection" - self.owner_resource = "" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - peer_vpc_id = create_data.get("peer_vpc_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject(self.owner_resource).object_resource_id(vpc_id) - if peer_vpc_id and (not _vpc_status): - ext_info["peer_vpc_id"] = CrsObject(self.owner_resource).object_resource_id(peer_vpc_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "peer_vpc_id": create_data.get("peer_vpc_id")} - create_data = { - "name": create_data.get("name") - } - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - return None, None - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - peer_region = create_data.get("region") - if peer_region: - peer_region = ProviderConductor().region_info(provider=provider_object["name"], region=peer_region) - x_create_data["peer_region"] = peer_region - - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, - **kwargs) - - return count, res - - -class PeerConnBackendApi(ApiBackendBase): - def __init__(self): - super(PeerConnBackendApi, self).__init__() - self.resource_name = "peer_connection" - self.resource_workspace = "peer_connection" - self.owner_resource = "" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/route_entry.py b/apps/api/network/route_entry.py deleted file mode 100644 index b03f506b..00000000 --- a/apps/api/network/route_entry.py +++ /dev/null @@ -1,77 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import define_relations_key -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class RouteEntryApi(ApiBase): - def __init__(self): - super(RouteEntryApi, self).__init__() - self.resource_name = "route_entry" - self.resource_workspace = "route_entry" - self.owner_resource = "route_table" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :param route_table_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - route_table_id = create_data.get("route_table_id") - next_hub = create_data.get("next_hub") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _rt_status = define_relations_key("route_table_id", route_table_id, resource_property.get("route_table_id")) - _ne_status = define_relations_key("next_hub", next_hub, resource_property.get("next_hub")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if route_table_id and (not _rt_status): - ext_info["route_table_id"] = CrsObject(self.owner_resource).object_resource_id(route_table_id) - if next_hub and (not _ne_status): - ext_info["next_hub"] = CrsObject(self.owner_resource).object_resource_assetid(next_hub) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "next_hub": create_data.get("next_hub"), - "route_table_id": create_data.get("route_table_id")} - - create_data = {"name": create_data.get("name"), - "destination": create_data.get("destination"), - "next_type": create_data.get("next_type"), - } - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("route_table_id") - return owner_id, None - - -class RouteEntryBackendApi(ApiBackendBase): - def __init__(self): - super(RouteEntryBackendApi, self).__init__() - self.resource_name = "route_entry" - self.resource_workspace = "route_entry" - self.owner_resource = "route_table" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/route_table.py b/apps/api/network/route_table.py deleted file mode 100644 index 2aee98ab..00000000 --- a/apps/api/network/route_table.py +++ /dev/null @@ -1,109 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.api.configer.provider import ProviderApi -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class RouteTableApi(ApiBase): - def __init__(self): - super(RouteTableApi, self).__init__() - self.resource_name = "route_table" - self.resource_workspace = "route_table" - self.owner_resource = "vpc" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject(self.owner_resource).object_resource_id(vpc_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id")} - create_data = { - "name": create_data.get("name"), - } - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("vpc_id") - return owner_id, None - - def update(self, rid, name, extend_info, **kwargs): - ''' - - :param rid: - :param name: - :param extend_info: - :param kwargs: - :return: - ''' - - _obj = self.resource_object.show(rid) - if not _obj: - raise local_exceptions.ResourceNotFoundError("Route Table %s 不存在" % rid) - - vpc_resource_id = _obj.get("vpc") - - provider_object, provider_info = ProviderApi().provider_info(_obj["provider_id"], - region=_obj["region"]) - _path = self.create_workpath(rid, - provider=provider_object["name"], - region=_obj["region"]) - - create_data = {"name": name, "vpc_id": vpc_resource_id} - - define_json = self._generate_resource(provider_object["name"], rid, - data=create_data, extend_info=extend_info) - define_json.update(provider_info) - - self.update_data(rid, data={"status": "updating"}) - self.write_define(rid, _path, define_json=define_json) - - try: - result = self.run(_path) - except Exception, e: - self.rollback_data(rid) - raise e - - result = self.formate_result(result) - logger.info(format_json_dumps(result)) - - return self.update_data(rid, data={"status": "ok", "name": name, - "define_json": json.dumps(define_json)}) - - -class RouteTableBackendApi(ApiBackendBase): - def __init__(self): - super(RouteTableBackendApi, self).__init__() - self.resource_name = "route_table" - self.resource_workspace = "route_table" - self.owner_resource = "vpc" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/security_group.py b/apps/api/network/security_group.py deleted file mode 100644 index 00b229f2..00000000 --- a/apps/api/network/security_group.py +++ /dev/null @@ -1,74 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class SecGroupApi(ApiBase): - def __init__(self): - super(SecGroupApi, self).__init__() - self.resource_name = "security_group" - self.resource_workspace = "security_group" - self.relation_resource = "vpc" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject(self.relation_resource).object_resource_id(vpc_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id")} - create_data = {"name": create_data.get("name")} - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - r_id = create_data.get("vpc_id") - return None, r_id - - def sg_vpc_relationship(self, rid, provider, region, zone, secret, - resource_id, **kwargs): - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", "0000000", resource_property.get("vpc_id")) - if _vpc_status: - return [] - else: - return self.get_remote_source(rid, provider, region, zone, secret, - resource_id, **kwargs) - - -class SecGroupBackendApi(ApiBackendBase): - def __init__(self): - super(SecGroupBackendApi, self).__init__() - self.resource_name = "security_group" - self.resource_workspace = "security_group" - self.relation_resource = "vpc" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/security_group_rule.py b/apps/api/network/security_group_rule.py deleted file mode 100644 index f868a9dc..00000000 --- a/apps/api/network/security_group_rule.py +++ /dev/null @@ -1,75 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class SecGroupRuleApi(ApiBase): - def __init__(self): - super(SecGroupRuleApi, self).__init__() - self.resource_name = "security_group_rule" - self.resource_workspace = "security_group_rule" - self.owner_resource = "security_group" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param create_data: - :return: - ''' - - security_group_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _sg_status = define_relations_key("security_group_id", security_group_id, - resource_property.get("security_group_id")) - - ext_info = {} - if security_group_id and (not _sg_status): - ext_info["security_group_id"] = CrsObject(self.owner_resource).object_resource_id(security_group_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"security_group_id": create_data.get("security_group_id")} - - type = create_data.get("type") - ports = create_data.get("ports") - cidr_ip = create_data.get("cidr_ip") - policy = create_data.get("policy") - ip_protocol = create_data.get("ip_protocol") - description = create_data.get("description") - description = description or "%s_%s_%s" % (type, ip_protocol, ports) - - create_data = {"description": description, - "type": type, "ports": ports, - "cidr_ip": cidr_ip, - "ip_protocol": ip_protocol, - "policy": policy} - - return create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("security_group_id") - return owner_id, None - - -class SecGroupRuleBackendApi(ApiBackendBase): - def __init__(self): - super(SecGroupRuleBackendApi, self).__init__() - self.resource_name = "security_group_rule" - self.resource_workspace = "security_group_rule" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/subnet.py b/apps/api/network/subnet.py deleted file mode 100644 index 34d68363..00000000 --- a/apps/api/network/subnet.py +++ /dev/null @@ -1,65 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class SubnetApi(ApiBase): - def __init__(self): - super(SubnetApi, self).__init__() - self.resource_name = "subnet" - self.resource_workspace = "subnet" - self.owner_resource = "vpc" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject(self.owner_resource).object_resource_id(vpc_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id")} - x_create_data = {"cidr": create_data.get("cidr"), - "name": create_data.get("name"), - "zone": zone} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("vpc_id") - return owner_id, None - - -class SubnetBackendApi(ApiBackendBase): - def __init__(self): - super(SubnetBackendApi, self).__init__() - self.resource_name = "subnet" - self.resource_workspace = "subnet" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/network/vpc.py b/apps/api/network/vpc.py deleted file mode 100644 index 394b74fd..00000000 --- a/apps/api/network/vpc.py +++ /dev/null @@ -1,28 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class VpcApi(ApiBase): - def __init__(self): - super(VpcApi, self).__init__() - self.resource_name = "vpc" - self.resource_workspace = "vpc" - self._flush_resobj() - self.resource_keys_config = None - - def generate_create_data(self, zone, create_data, **kwargs): - create_data = {"cidr": create_data.get("cidr"), - "name": create_data.get("name")} - return create_data, {} - - -class VpcBackendApi(ApiBackendBase): - def __init__(self): - super(VpcBackendApi, self).__init__() - self.resource_name = "vpc" - self.resource_workspace = "vpc" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/storage/bucket_object.py b/apps/api/storage/bucket_object.py deleted file mode 100644 index 64f443de..00000000 --- a/apps/api/storage/bucket_object.py +++ /dev/null @@ -1,142 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from apps.common.convert_keys import define_relations_key -from apps.common.convert_keys import convert_extend_propertys -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class BucketObjectApi(ApiBase): - def __init__(self): - super(BucketObjectApi, self).__init__() - self.resource_name = "bucket_object" - self.resource_workspace = "bucket_object" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - bucket_id = create_data.get("bucket_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _bucket_status = define_relations_key("bucket_id", bucket_id, resource_property.get("bucket_id")) - - ext_info = {} - if bucket_id and (not _bucket_status): - ext_info["bucket_id"] = CrsObject("object_storage").object_resource_id(bucket_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def _generate_resource(self, provider, label_name, data, extend_info): - ''' - 转换resource 资源属性, 生成配置 - :param provider: - :param label_name: 资源的标签名称 - :param data: - :param extend_info: - :return: - ''' - - self.resource_info(provider) - resource_values_config = self.values_config(provider) - - resource_name = self.resource_keys_config["resource_type"] - resource_property = self.resource_keys_config["resource_property"] - resource_extend_info = self.resource_keys_config["extend_info"] - - resource_columns = {} - for key, value in data.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - value = convert_value(value, _values_configs.get(value)) - - resource_columns[key] = value - - resource_columns = convert_keys(resource_columns, defines=resource_property) - _extend_columns = convert_extend_propertys(datas=extend_info, extend_info=resource_extend_info) - resource_columns.update(_extend_columns) - - _info = { - "resource": { - resource_name: { - label_name: resource_columns - } - } - } - - if "content" in data.keys(): - logger.info("%s include content may too long, not print in log, please check it in file" % label_name) - else: - logger.info(format_json_dumps(_info)) - return _info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"bucket_id": create_data.get("bucket_id")} - - content = create_data.get("content") - source = create_data.get("source") - x_create_data = {"key": create_data.get("key")} - if content: - x_create_data["context"] = content - if source: - x_create_data["source"] = source - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class BucketObjectBackendApi(ApiBackendBase): - def __init__(self): - super(BucketObjectBackendApi, self).__init__() - self.resource_name = "bucket_object" - self.resource_workspace = "bucket_object" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/storage/disk.py b/apps/api/storage/disk.py deleted file mode 100644 index d9cc2316..00000000 --- a/apps/api/storage/disk.py +++ /dev/null @@ -1,69 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import local_exceptions -from apps.api.apibase import ApiBase -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class DiskApi(ApiBase): - def __init__(self): - super(DiskApi, self).__init__() - self.resource_name = "disk" - self.resource_workspace = "disk" - self._flush_resobj() - self.resource_keys_config = None - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - - if zone: - zone = ProviderConductor().zone_info(provider=kwargs.get("provider"), - zone=zone) - - x_create_data = {"type": create_data.get("type"), - "size": create_data.get("size"), - "charge_type": create_data.get("charge_type"), - "name": create_data.get("name"), - "zone": zone} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class DiskBackendApi(ApiBackendBase): - def __init__(self): - super(DiskBackendApi, self).__init__() - self.resource_name = "disk" - self.resource_workspace = "disk" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/storage/disk_attach.py b/apps/api/storage/disk_attach.py deleted file mode 100644 index 246f3b8e..00000000 --- a/apps/api/storage/disk_attach.py +++ /dev/null @@ -1,106 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class DiskAttachApi(ApiBase): - def __init__(self): - super(DiskAttachApi, self).__init__() - self.resource_name = "disk_attach" - self.resource_workspace = "disk_attach" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - disk_id = create_data.get("disk_id") - instance_id = create_data.get("instance_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _instance_status = define_relations_key("instance_id", instance_id, resource_property.get("instance_id")) - _disk_status = define_relations_key("disk_id", disk_id, resource_property.get("disk_id")) - - ext_info = {} - if instance_id and (not _instance_status): - ext_info["instance_id"] = CrsObject("instance").object_resource_id(instance_id) - if disk_id and (not _disk_status): - ext_info["disk_id"] = CrsObject("disk").object_resource_id(disk_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"disk_id": create_data.get("disk_id"), - "instance_id": create_data.get("instance_id")} - x_create_data = {} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def attach(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - return self.create(rid, provider, region, zone, secret, - create_data, extend_info, **kwargs) - - def detach(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="disk detach %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class DiskAttachBackendApi(ApiBackendBase): - def __init__(self): - super(DiskAttachBackendApi, self).__init__() - self.resource_name = "disk_attach" - self.resource_workspace = "disk_attach" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/storage/object_storage.py b/apps/api/storage/object_storage.py deleted file mode 100644 index 1a51ef6a..00000000 --- a/apps/api/storage/object_storage.py +++ /dev/null @@ -1,67 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import local_exceptions -from apps.api.apibase import ApiBase -from apps.api.apibase_backend import ApiBackendBase - - -class ObjectStorageApi(ApiBase): - def __init__(self): - super(ObjectStorageApi, self).__init__() - self.resource_name = "object_storage" - self.resource_workspace = "object_storage" - self._flush_resobj() - self.resource_keys_config = None - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {} - - name = create_data.get("name") - appid = create_data.get("appid") - if appid: - name = "%s-%s" % (name, appid) - - x_create_data = {"acl": create_data.get("acl"), - "name": name} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class ObjectStorageBackendApi(ApiBackendBase): - def __init__(self): - super(ObjectStorageBackendApi, self).__init__() - self.resource_name = "object_storage" - self.resource_workspace = "object_storage" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/vm/__init__.py b/apps/api/vm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/api/vm/eni.py b/apps/api/vm/eni.py deleted file mode 100644 index e3bbf5ae..00000000 --- a/apps/api/vm/eni.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class EniApi(ApiBase): - def __init__(self): - super(EniApi, self).__init__() - self.resource_name = "network_interface" - self.resource_workspace = "network_interface" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, resource_property.get("vpc_id")) - _subnet_status = define_relations_key("subnet", subnet_id, resource_property.get("subnet_id")) - _sg_status = define_relations_key("security_group_id", sg_id, resource_property.get("security_group_id")) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id"), - "security_group_id": create_data.get("security_group_id")} - x_create_data = {"ipaddress": create_data.get("ipaddress"), - "name": create_data.get("name")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = create_data.get("vpc_id") - return owner_id, None - - def destroy(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class EniBackendApi(ApiBackendBase): - def __init__(self): - super(EniBackendApi, self).__init__() - self.resource_name = "network_interface" - self.resource_workspace = "network_interface" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/vm/eni_attach.py b/apps/api/vm/eni_attach.py deleted file mode 100644 index 401f049f..00000000 --- a/apps/api/vm/eni_attach.py +++ /dev/null @@ -1,108 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.resource_base import CrsObject -from apps.api.apibase_backend import ApiBackendBase - - -class ENIAttachApi(ApiBase): - def __init__(self): - super(ENIAttachApi, self).__init__() - self.resource_name = "network_interface_attach" - self.resource_workspace = "network_interface_attach" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - instance_id = create_data.get("instance_id") - eni_id = create_data.get("network_interface_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _instance_status = define_relations_key("instance_id", instance_id, resource_property.get("instance_id")) - _eni_status = define_relations_key("disk_id", eni_id, resource_property.get("eni_id")) - - ext_info = {} - if instance_id and (not _instance_status): - ext_info["instance_id"] = CrsObject("instance_id").object_resource_id(instance_id) - if eni_id and (not _eni_status): - ext_info["network_interface_id"] = CrsObject("network_interface").object_resource_id(eni_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"instance_id": create_data.get("instance_id"), - "network_interface_id": create_data.get("network_interface_id")} - x_create_data = {} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - owner_id = None - return owner_id, None - - def attach(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - return self.create(rid, provider, region, zone, secret, - create_data, extend_info, **kwargs) - - def detach(self, rid): - ''' - - :param rid: - :return: - ''' - - resource_info = self.resource_object.show(rid) - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="network interface detach %s %s failed" % ( - self.resource_name, rid)) - - return self.resource_object.delete(rid) - - -class ENIAttachBackendApi(ApiBackendBase): - def __init__(self): - super(ENIAttachBackendApi, self).__init__() - self.resource_name = "network_interface_attach" - self.resource_workspace = "network_interface_attach" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/vm/instance.py b/apps/api/vm/instance.py deleted file mode 100644 index c16554f3..00000000 --- a/apps/api/vm/instance.py +++ /dev/null @@ -1,340 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import base64 -import traceback -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.common.convert_keys import convert_keys -from apps.common.convert_keys import convert_value -from apps.common.convert_keys import validate_type -from apps.common.convert_keys import convert_extend_propertys -from apps.common.convert_keys import define_relations_key -from apps.api.apibase import ApiBase -from apps.background.resource.vm.instance_type import InstanceTypeObject -from apps.background.resource.resource_base import CrsObject -from apps.api.conductor.provider import ProviderConductor -from apps.api.apibase_backend import ApiBackendBase - - -class InstanceApi(ApiBase): - def __init__(self): - super(InstanceApi, self).__init__() - self.resource_name = "instance" - self.resource_workspace = "instance" - self._flush_resobj() - self.resource_keys_config = None - - def before_keys_checks(self, provider, create_data, is_update=None): - ''' - - :param provider: - :param vpc_id: - :return: - ''' - - vpc_id = create_data.get("vpc_id") - subnet_id = create_data.get("subnet_id") - sg_id = create_data.get("security_group_id") - - self.resource_info(provider) - resource_property = self.resource_keys_config["resource_property"] - _vpc_status = define_relations_key("vpc_id", vpc_id, - resource_property.get("vpc_id"), is_update) - _subnet_status = define_relations_key("subnet", subnet_id, - resource_property.get("subnet_id"), is_update) - _sg_status = define_relations_key("security_group_id", sg_id, - resource_property.get("security_group_id"), is_update) - - ext_info = {} - if vpc_id and (not _vpc_status): - ext_info["vpc_id"] = CrsObject("vpc").object_resource_id(vpc_id) - if subnet_id and (not _subnet_status): - ext_info["subnet_id"] = CrsObject("subnet").object_resource_id(subnet_id) - if sg_id and (not _sg_status): - sg_property = resource_property.get("security_group_id") - if isinstance(sg_property, dict): - if sg_property.get("type", "string") == "list": - sg_list = validate_type(sg_id, "list") - _sg_resource_ids = [] - for _sg in sg_list: - _sg_resource_ids.append(CrsObject("security_group").object_resource_id(_sg)) - else: - _sg_resource_ids = CrsObject("security_group").object_resource_id(sg_id) - - ext_info["security_group_id"] = _sg_resource_ids - else: - ext_info["security_group_id"] = CrsObject("security_group").object_resource_id(sg_id) - - logger.info("before_keys_checks add info: %s" % (format_json_dumps(ext_info))) - return ext_info - - def _generate_update_data(self, rid, provider, define_json, update_data, extend_info): - self.resource_info(provider) - resource_values_config = self.values_config(provider) - - resource_name = self.resource_keys_config["resource_type"] - resource_property = self.resource_keys_config["resource_property"] - resource_extend_info = self.resource_keys_config["extend_info"] - - resource_columns = {} - for key, value in update_data.items(): - if resource_values_config.get(key): - _values_configs = resource_values_config.get(key) - value = convert_value(value, _values_configs.get(value)) - - resource_columns[key] = value - - resource_columns = convert_keys(resource_columns, defines=resource_property, is_update=True) - if extend_info: - _extend_columns = convert_extend_propertys(datas=extend_info, - extend_info=resource_extend_info, - is_update=True) - resource_columns.update(_extend_columns) - - _t = define_json["resource"][resource_name] - label_name = self.resource_name + "_" + rid - origin_columns = _t[label_name] - - origin_columns.update(resource_columns) - - define_json["resource"] = { - resource_name: { - label_name: origin_columns - } - } - logger.info(format_json_dumps(define_json)) - return define_json - - def generate_create_data(self, zone, create_data, **kwargs): - r_create_data = {"vpc_id": create_data.get("vpc_id"), - "subnet_id": create_data.get("subnet_id"), - "security_group_id": create_data.get("security_group_id")} - - x_create_data = {"name": create_data.get("name"), - "hostname": create_data.get("hostname"), - "disk_type": create_data.get("disk_type"), - "disk_size": create_data.get("disk_size"), - "data_disks": create_data.get("data_disks"), - "charge_type": create_data.get("charge_type"), - "zone": zone, "image": create_data.get("image")} - - return x_create_data, r_create_data - - def generate_owner_data(self, create_data, **kwargs): - r_id = {"subnet_id": create_data.get("subnet_id")} - return None, r_id - - def create(self, rid, provider, region, zone, secret, - create_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param secret: - :param create_data: - :param extend_info: - :param kwargs: - :return: - ''' - - _exists_data = self.create_resource_exists(rid) - if _exists_data: - return 1, _exists_data - - extend_info = extend_info or {} - provider_object, provider_info = ProviderConductor().conductor_provider_info(provider, region, secret) - - zone = ProviderConductor().zone_info(provider=provider_object["name"], zone=zone) - x_create_data, r_create_data = self.generate_create_data(zone, create_data, - provider=provider_object["name"]) - - password = create_data.get("password") # or "Terraform.123" - origin_type, instance_type_data = InstanceTypeObject().convert_resource_id(provider_object.get("id"), - create_data.get("instance_type")) - - cpu = instance_type_data.get("cpu") - memory = instance_type_data.get("memory") - kwargs["cpu"] = cpu - kwargs["memory"] = memory - - x_create_data["password"] = password - x_create_data["instance_type"] = origin_type - - _relations_id_dict = self.before_keys_checks(provider_object["name"], r_create_data) - - x_create_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_data(create_data) - count, res = self.run_create(rid=rid, region=region, zone=zone, - provider_object=provider_object, - provider_info=provider_info, - owner_id=owner_id, relation_id=relation_id, - create_data=x_create_data, - extend_info=extend_info, - power_state="running", - **kwargs) - - return count, res - - def destroy(self, rid, force_delete=False): - ''' - - :param rid: - :param force_delete: - :return: - ''' - - resource_info = self.resource_object.show(rid) - if not resource_info: - return 0 - _path = self.create_workpath(rid, - provider=resource_info["provider"], - region=resource_info["region"]) - - if not self.destroy_ensure_file(rid, path=_path): - self.write_define(rid, _path, define_json=resource_info["define_json"]) - - if force_delete: - update_data = {"force_delete": "true"} - define_json = self._generate_update_data(rid, resource_info["provider"], - define_json=resource_info["define_json"], - update_data=update_data, extend_info={}) - - self.write_define(rid, _path, define_json=define_json) - - status = self.run_destroy(_path) - if not status: - raise local_exceptions.ResourceOperateException(self.resource_name, - msg="delete %s %s failed" % (self.resource_name, rid)) - - return self.resource_object.delete(rid, update_data=None) - - # return self.resource_object.delete(rid, update_data={"status": "deleted", - # "power_state": "stop"}) - - def generate_update_data(self, zone, update_data, **kwargs): - x_update_data = {} - for key in ["name", "image"]: - if update_data.get(key): - x_update_data[key] = update_data.get(key) - - r_update_data = {} - return x_update_data, r_update_data - - def update(self, rid, provider, region, zone, - update_data, extend_info, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param secret: - :param update_data: - :param extend_info: - :param kwargs: - :return: - ''' - - resource_obj = self.resource_object.show(rid) - if not resource_obj: - raise local_exceptions.ResourceNotFoundError("%s:%s 不存在" % (self.resource_name, rid)) - - extend_info = extend_info or {} - - zone = ProviderConductor().zone_info(provider=resource_obj["provider"], zone=zone) - x_update_data, r_update_data = self.generate_update_data(zone, update_data, - provider=resource_obj["provider"]) - - if update_data.get("instance_type"): - origin_type, instance_type_data = InstanceTypeObject().convert_resource_id(resource_obj.get("provider_id"), - update_data.get("instance_type")) - - kwargs["cpu"] = instance_type_data.get("cpu") - kwargs["memory"] = instance_type_data.get("memory") - x_update_data["instance_type"] = origin_type - - _relations_id_dict = self.before_keys_checks(provider=resource_obj["provider"], - create_data=x_update_data, - is_update=True) - - x_update_data.update(_relations_id_dict) - - owner_id, relation_id = self.generate_owner_update_data(update_data) - count, res = self.run_update(rid=rid, region=resource_obj["region"], - zone=zone, owner_id=owner_id, - relation_id=relation_id, - origin_data=resource_obj, - update_data=x_update_data, - extend_info=extend_info, **kwargs) - - return count, res - - def start(self, rid): - ''' - power_action " start - :param rid: - :return: - ''' - - resource_obj = self.resource_object.show(rid) - if not resource_obj: - raise local_exceptions.ResourceNotFoundError("%s:%s 不存在" % (self.resource_name, rid)) - - update_data = {"power_action": "start"} - count, res = self.run_update(rid=rid, region=resource_obj["region"], - zone=resource_obj.get("zone"), - owner_id=None, - relation_id=None, - origin_data=resource_obj, - update_data=update_data, - extend_info={}, - power_state="running") - - # return self.update_data(rid, data={"status": "ok", "power_state": "start", - # "define_json": json.dumps(define_json)}) - - return count, res - - def stop(self, rid): - ''' - power_action: stop - :param rid: - :return: - ''' - - resource_obj = self.resource_object.show(rid) - if not resource_obj: - raise local_exceptions.ResourceNotFoundError("%s:%s 不存在" % (self.resource_name, rid)) - - update_data = {"power_action": "stop"} - - count, res = self.run_update(rid=rid, region=resource_obj["region"], - zone=resource_obj.get("zone"), - owner_id=None, - relation_id=None, - origin_data=resource_obj, - update_data=update_data, - extend_info={}, - power_state="stopped") - - # result = self.formate_result(result) - # logger.info(format_json_dumps(result)) - - # return self.update_data(rid, data={"status": "ok", "power_state": "stop", - # "define_json": json.dumps(define_json)}) - - return count, res - - -class InstanceBackendApi(ApiBackendBase): - def __init__(self): - super(InstanceBackendApi, self).__init__() - self.resource_name = "instance" - self.resource_workspace = "instance" - self._flush_resobj() - self.resource_keys_config = None diff --git a/apps/api/vm/instance_type.py b/apps/api/vm/instance_type.py deleted file mode 100644 index f55d4052..00000000 --- a/apps/api/vm/instance_type.py +++ /dev/null @@ -1,49 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from apps.background.resource.configr.provider import ProviderObject -from apps.background.resource.vm.instance_type import InstanceTypeObject - - -class InstanceTypeApi(object): - def __init__(self): - self.resource_object = InstanceTypeObject() - - def create(self, rid, name, provider, origin_name, cpu, memory, network, type, extend_info): - ''' - - :param rid: - :param name: - :param provider_id: - :param origin_name: - :param cpu: - :param memory: - :param network: - :param extend_info: - :return: - ''' - - extend_info = extend_info or {} - provider_info = ProviderObject().provider_name_object(provider) - create_data = {"id": rid, - "name": name, - "type": type, - "provider_id": provider_info.get("id"), - "provider": provider, - "origin_name": origin_name, - "cpu": cpu, "memory": memory, - "network": network, - "extend_info": json.dumps(extend_info), - } - - return self.resource_object.create(create_data) - - def update(self, rid, data): - if "provider_id" in data.keys(): - if not data.get("provider_id"): - raise ValueError("provider id not permit set null") - provider_info = ProviderObject().provider_object(provider_id=data.get("provider_id")) - data["provider"] = provider_info.get("name") - - return self.resource_object.update(rid, data) diff --git a/apps/background/__init__.py b/apps/background/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/lib/__init__.py b/apps/background/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/lib/commander/__init__.py b/apps/background/lib/commander/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/lib/commander/terraform.py b/apps/background/lib/commander/terraform.py deleted file mode 100644 index 295e2bdf..00000000 --- a/apps/background/lib/commander/terraform.py +++ /dev/null @@ -1,375 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from lib.logs import logger -from lib.command import command -from wecube_plugins_terraform.settings import TERRAFORM_BASE_PATH -from wecube_plugins_terraform.settings import TERRFORM_BIN_PATH -from wecube_plugins_terraform.settings import TERRAFORM_PLUGIN_CACHE_PATH - -if not os.path.exists(TERRAFORM_BASE_PATH): - os.makedirs(TERRAFORM_BASE_PATH) - - -class TerrformExecError(Exception): - pass - - -def path_dir(path): - dirs = os.listdir(path) - for xdir in dirs: - if not xdir.startswith("."): - return xdir - else: - return "" - - -class TerraformDriver(object): - def __init__(self, terraform_path=None, workdir=None): - self.terraform = terraform_path or TERRFORM_BIN_PATH - self.workdir = workdir - - def init_provider_path(self, provider): - provider_path = os.path.join(TERRAFORM_BASE_PATH, provider) - if not os.path.exists(provider_path): - os.makedirs(provider_path) - - return provider_path - - def upgrade(self, dir_path=None, version_file=None): - workdir = dir_path or self.workdir or '' - if version_file: - if not os.path.exists(os.path.join(workdir, "versions.tf")): - command("cp %s %s" % (version_file, workdir)) - - exec_cmd = "cd %s; terraform 0.13upgrade -yes ." % (workdir) - code, out, err = command(self._format_cmd(exec_cmd), workdir=workdir) - if code == 0: - pass - else: - pass - - def _cache_status(self, path, code): - if code == 0: - with open(path, 'wb+') as f: - f.close() - - def cache_provider_plugin(self, provider, initd_path): - path = os.path.join(TERRAFORM_BASE_PATH, ".%s.status" % provider) - if os.path.exists(path): - return - - logger.info(" try cache plugin status %s...." % path) - plugin_path = os.path.join(initd_path, ".terraform/providers/registry.terraform.io") - plugin_path2 = os.path.join(initd_path, ".terraform/plugins/registry.terraform.io/") - - if not os.path.exists(plugin_path): - if os.path.exists(plugin_path2): - plugin_path = plugin_path2 - else: - logger.info("plugin cache path not find, please check... skip ...") - return - - tmp_path = os.path.join(plugin_path, "hashicorp") - if os.path.exists(tmp_path): - child_dir = path_dir(tmp_path) - if child_dir: - exists_cache = os.path.join(TERRAFORM_PLUGIN_CACHE_PATH, "hashicorp", child_dir) - if os.path.exists(exists_cache): - logger.info("cache mount already, write status, path: %s" % exists_cache) - self._cache_status(path, 0) - return - else: - xcmd = "cp -r %s %s" % ( - os.path.join(tmp_path, "*"), os.path.join(TERRAFORM_PLUGIN_CACHE_PATH, "hashicorp")) - logger.info(xcmd) - code, _, _ = command(cmd=xcmd) - logger.info("cached write status, path: %s" % exists_cache) - self._cache_status(path, code) - else: - logger.info("child_dir is empty, skip cache ...") - else: - child_dir = path_dir(plugin_path) - if child_dir: - exists_cache = os.path.join(TERRAFORM_PLUGIN_CACHE_PATH, child_dir) - if os.path.exists(exists_cache): - logger.info("cache mount already, write status, path: %s" % exists_cache) - self._cache_status(path, 0) - return - else: - xcmd = "cp -r %s %s" % (os.path.join(plugin_path, "*"), TERRAFORM_PLUGIN_CACHE_PATH) - logger.info(xcmd) - code, _, _ = command(cmd=xcmd) - logger.info("cached, write status, path: %s" % exists_cache) - self._cache_status(path, code) - else: - logger.info("child_dir is empty, skip cache ...") - - def init(self, dir_path=None, - backend=None, backend_config=None, - plugin_dir=None, reconfigure=None, upgrade=None): - ''' - - :param dir_path: - :param backend: 'true' or 'flase' - :param backend_config: - :param plugin_dir: - :param reconfigure: None or '' - :param upgrade: 'true' or 'false' - :return: - ''' - - workdir = dir_path or self.workdir or '' - - input_options = {} - input_options["backend"] = backend - input_options["backend_config"] = backend_config - input_options["reconfigure"] = reconfigure - input_options["upgrade"] = upgrade - - args = self._generate_args(input_options) - if plugin_dir: - args += " -plugin-dir %s" % plugin_dir - - exec_cmd = "-chdir=%s init %s" % (workdir, args) - - code, out, err = command(self._format_cmd(exec_cmd), workdir=workdir) - if code == 0: - return True - else: - raise TerrformExecError("init error, msg: %s" % err) - - def init_resource_dir(self, dir_path, provider): - provider_path = self.init_provider_path(provider) - if not os.path.exists(os.path.join(dir_path, "versions.tf")): - if os.path.exists(os.path.join(provider_path, "versions.tf")): - command("cp %s %s" % (os.path.join(provider_path, "versions.tf"), dir_path)) - else: - logger.info("versions.tf file not exists") - - # todo cache plugin file - res = self.init(dir_path=dir_path) - try: - self.cache_provider_plugin(provider, dir_path) - except Exception, e: - logger.info(traceback.format_exc()) - logger.info("cache plugin error, continue ... ") - - return res - - def _format_cmd(self, cmd, args=None): - if not args: - return "%s %s" % (self.terraform, cmd) - else: - return "%s %s %s" % (self.terraform, cmd, args) - - def _generate_args(self, input_options): - args_list = [] - for k, v in input_options.items(): - if v is not None: - if "_" in k: - k = k.replace("_", "-") - if isinstance(v, bool): - v = "true" if v else "false" - - if v: - args_list.append("-%s=%s" % (k, v)) - else: - args_list.append("-%s" % (k)) - - return " ".join(args_list) - - def _generate_var_args(self, var_args): - var_args = var_args or {} - args = "" - for i, x in var_args.items(): - args += " -var %s=%s" % (i, x) - return args - - def apply(self, dir_or_plan=None, auto_approve=None, - backup=None, refresh=None, - state=None, state_out=None, - lock=None, var=None, var_file=None): - ''' - - :param dir_path: - :param auto_approve: -auto-approve None or "" - :param backup: path - :param refresh: 'true' or 'false' - :param state: path - :param state_out: path 保留以前的state文件 - :param lock: 'true' or 'false' - :param var: dict - :param var_file: path - :return: - ''' - - workdir = dir_or_plan or self.workdir or '' - - input_options = {} - input_options["lock"] = lock - input_options["backup"] = backup - input_options["refresh"] = refresh - input_options["state"] = state - input_options["state_out"] = state_out - input_options["var_file"] = var_file - input_options["auto_approve"] = auto_approve - args = self._generate_args(input_options) - args += self._generate_var_args(var) - exec_cmd = " -chdir=%s apply %s" % (workdir, args) - - code, out, err = command(self._format_cmd(exec_cmd), workdir=workdir) - if code == 0: - return True - else: - raise TerrformExecError("apply error, msg: %s" % err) - - def refresh(self, path=None): - ''' - - :param path: - :return: - ''' - - workdir = path or self.workdir or '' - - exec_cmd = "refresh %s" % (path) - - code, out, err = command(self._format_cmd(exec_cmd), workdir=workdir) - if code == 0: - return True - else: - raise TerrformExecError("refresh error, msg: %s" % err) - - def plan(self, dir_path=None, compact_warnings=None, - destroy=None, detailed_exitcode=None, - out=None, refresh=None, state=None, - lock=None, var=None, var_file=None): - ''' - - :param dir_path: - :param compact_warnings: None or "" - :param destroy: plan destroy None or "" - :param detailed_exitcode: None or "" - :param out: path - :param refresh: 'true' or 'false' - :param state: path - :param state_out: path 保留以前的state文件 - :param lock: 'true' or 'false' - :param var: dict - :param var_file: path - - :return: - ''' - - workdir = dir_path or self.workdir or '' - - input_options = {} - input_options["lock"] = lock - input_options["out"] = out - input_options["compact_warnings"] = compact_warnings - input_options["destroy"] = destroy - input_options["detailed_exitcode"] = detailed_exitcode - input_options["refresh"] = refresh - input_options["state"] = state - input_options["var_file"] = var_file - args = self._generate_args(input_options) - args += self._generate_var_args(var) - exec_cmd = " -chdir=%s plan %s " % (workdir, args) - - code, out, err = command(self._format_cmd(exec_cmd), workdir=workdir) - if code in [0, 1, 2]: - # failed - pass - else: - raise TerrformExecError("plan error, msg: %s" % err) - - def read_result(self, state_path=None): - if not state_path: - workdir = self.workdir or "" - state_path = os.path.join(workdir, "terraform.tfstate") - - if not os.path.exists(state_path): - raise TerrformExecError("can not read state") - - try: - with open(state_path, "rb+") as f: - return json.load(f) - except Exception, e: - logger.info(traceback.format_exc()) - raise TerrformExecError("can not generate state file. msg: %s-%s" % (e.__class__.__name__, - e.message)) - - def resource_result(self, path): - path = path or self.workdir or "" - state_path = os.path.join(path, "terraform.tfstate") - return self.read_result(state_path) - - def destroy(self, dir_path=None, - auto_approve=None, force=None, - backup=None, refresh=None, - state=None, state_out=None, - lock=None, var=None, var_file=None): - ''' - - - :param dir_path: - :param force: 同auto_approve - :param auto_approve: -auto-approve None or "" - :param backup: path - :param refresh: 'true' or 'false' - :param state: path - :param state_out: path 保留以前的state文件 - :param lock: 'true' or 'false' - :param var: dict - :param var_file: path - :return: - ''' - - workdir = dir_path or self.workdir or '' - if not os.path.exists(workdir): - raise TerrformExecError("resource workpath not exists, not permit delete") - - input_options = {} - input_options["lock"] = lock - input_options["backup"] = backup - input_options["refresh"] = refresh - input_options["state"] = state - input_options["state_out"] = state_out - input_options["var_file"] = var_file - input_options["auto_approve"] = auto_approve - input_options["force"] = force - args = self._generate_args(input_options) - args += self._generate_var_args(var) - exec_cmd = " -chdir=%s destroy %s " % (workdir, args) - - code, out, err = command(self._format_cmd(exec_cmd), workdir=workdir) - if code == 0: - return True - else: - raise TerrformExecError("destroy error, msg: %s" % err) - - def import_state(self, from_source, dest_source, dir_or_plan=None, state=None): - ''' - - :param dir_path: - :param state: path - :return: - ''' - - workdir = dir_or_plan or self.workdir or '' - - if state: - exec_cmd = "import -state=%s %s %s" % (state, dest_source, from_source) - else: - exec_cmd = "import %s %s" % (dest_source, from_source) - - x_command = "cd %s; %s" % (workdir, self._format_cmd(exec_cmd)) - code, out, err = command(x_command, workdir=workdir) - if code == 0: - return True - else: - raise TerrformExecError("import error, msg: %s" % err) diff --git a/apps/background/lib/drivers/__init__.py b/apps/background/lib/drivers/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/lib/drivers/terraform_operate.py b/apps/background/lib/drivers/terraform_operate.py deleted file mode 100644 index bcd1233f..00000000 --- a/apps/background/lib/drivers/terraform_operate.py +++ /dev/null @@ -1,211 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import json -import traceback -from shutil import copyfile -from lib.logs import logger -from lib.command import command -from lib.json_helper import format_json_dumps -from lib.date_time import get_datetime_point_str -from wecube_plugins_terraform.settings import TERRAFORM_BASE_PATH -from apps.background.lib.commander.terraform import TerraformDriver - - -class TerraformResource(object): - terraformDriver = TerraformDriver() - - def __init__(self): - self.resource_name = None - self.resource_workspace = None - - def get_workpath(self, rid, provider, region, **kwargs): - return os.path.join(TERRAFORM_BASE_PATH, provider, region, self.resource_workspace, rid) - - def create_workpath(self, rid, provider, region, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param kwargs: - :return: - ''' - - # _path = os.path.join(TERRAFORM_BASE_PATH, provider, region, self.resource_workspace, rid) - _path = self.get_workpath(rid, provider, region, **kwargs) - if not os.path.exists(_path): - os.makedirs(_path) - - return _path - - def init_workspace(self, _path, provider): - return self.terraformDriver.init_resource_dir(dir_path=_path, provider=provider) - - def write_provider_define(self, path, define_json): - ''' - - :param rid: - :param path: - :param define_json: - :return: - ''' - - file = os.path.join(path, "provider.tf.json") - if os.path.exists(file): - backupfile = file + "_" + get_datetime_point_str() - copyfile(file, backupfile) - - with open(file, 'wb+') as f: - json.dump(define_json, f, ensure_ascii=False, indent=4) - - logger.info(format_json_dumps(define_json)) - - def write_define(self, rid, path, define_json): - ''' - - :param rid: - :param path: - :param define_json: - :return: - ''' - - backupfile = "" - file = os.path.join(path, "%s.tf.json" % rid) - if os.path.exists(file): - backupfile = file + "_" + get_datetime_point_str() - copyfile(file, backupfile) - - with open(os.path.join(path, "%s.tf.json" % rid), 'wb+') as f: - json.dump(define_json, f, ensure_ascii=False, indent=4) - - logger.info(format_json_dumps(define_json)) - return backupfile - - def rewrite_state(self, path, state_file): - ''' - - :param rid: - :param path: - :param define_json: - :return: - ''' - - _statefile = os.path.join(path, "terraform.tfstate") - if os.path.exists(_statefile): - return - - with open(_statefile, 'wb+') as f: - json.dump(state_file, f, ensure_ascii=False, indent=4) - - logger.info("rewrite state file") - logger.info(format_json_dumps(state_file)) - - def rollback_workspace(self, path): - if os.path.exists(path): - backuppath = path + "_" + get_datetime_point_str() - logger.info("try rollback workspace %s to %s" % (path, backuppath)) - command(cmd="mv %s %s" % (path, backuppath)) - - def run(self, path, skip_backup=None): - ''' - - :param path: - :return: - ''' - - _statefile = os.path.join(path, "terraform.tfstate") - if os.path.exists(_statefile): - backupfile = _statefile + "_" + get_datetime_point_str() - copyfile(_statefile, backupfile) - - try: - self.terraformDriver.plan(path) - except: - logger.info(traceback.format_exc()) - logger.info("terraform plan run failed, continue ... ") - - self.terraformDriver.apply(path, auto_approve="") - return self.terraformDriver.resource_result(path) - - def is_need_imort(self, path): - ''' - - :param path: - :return: - ''' - _statefile = os.path.join(path, "terraform.tfstate") - if os.path.exists(_statefile): - return False - - return True - - def run_import(self, from_source, dest_source, path, state=None): - ''' - # 非资产类型资源没有id, 使用两个字段 资产id + 标识id 进行导入 - :param from_source: - :param dest_source: - :param path: - :param state: - :return: - ''' - - self.terraformDriver.import_state(from_source, dest_source, path, state) - return self.terraformDriver.resource_result(path) - - def refresh(self, path): - ''' - - :param path: - :return: - ''' - - _statefile = os.path.join(path, "terraform.tfstate") - if os.path.exists(_statefile): - backupfile = _statefile + "_" + get_datetime_point_str() - copyfile(_statefile, backupfile) - self.terraformDriver.refresh(path) - return self.terraformDriver.resource_result(path) - - def destroy_ensure_file(self, rid, path): - ''' - - :param rid: - :param path: - :return: - ''' - - file = os.path.join(path, "%s.tf.json" % rid) - if os.path.exists(file): - return True - - return False - - def ensure_provider_file(self, path): - ''' - - :param rid: - :param path: - :return: - ''' - file = os.path.join(path, "provider.tf.json") - if os.path.exists(file): - return True - - return False - - def run_destroy(self, path): - ''' - - :param path: - :return: - ''' - - try: - self.terraformDriver.plan(path, destroy="") - except: - logger.info(traceback.format_exc()) - logger.info("terraform plan run failed, continue ... ") - - return TerraformDriver().destroy(dir_path=path, auto_approve="") diff --git a/apps/background/models/__init__.py b/apps/background/models/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/models/dbserver.py b/apps/background/models/dbserver.py deleted file mode 100644 index 779fcb77..00000000 --- a/apps/background/models/dbserver.py +++ /dev/null @@ -1,48 +0,0 @@ -# coding: utf-8 - -import model -from objects import _BaseManager - - -class HistoryManager(_BaseManager): - obj = model.ResourceHistory - - -class ProvidersManager(_BaseManager): - obj = model.Providers - - -class ProviderSecretManager(_BaseManager): - obj = model.ProviderSecret - - -class RegionManager(_BaseManager): - obj = model.Region - - -class ZoneManager(_BaseManager): - obj = model.Zone - - -class CommonKeyManager(_BaseManager): - obj = model.CommonKeys - - -class ConfigManager(_BaseManager): - obj = model.Config - - -class ResourceManager(_BaseManager): - obj = model.Resource - - -class InstanceTypeManager(_BaseManager): - obj = model.InstanceType - - -class InstanceManager(_BaseManager): - obj = model.Instance - - -class CrsManager(_BaseManager): - obj = model.CloudResource diff --git a/apps/background/models/model.py b/apps/background/models/model.py deleted file mode 100644 index 746c2cb8..00000000 --- a/apps/background/models/model.py +++ /dev/null @@ -1,413 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import datetime -from sqlalchemy import Column, DateTime, Index, String, text, Text -from sqlalchemy import Integer -from sqlalchemy.dialects.mysql import INTEGER, TINYINT -from sqlalchemy.ext.declarative import declarative_base - -Base = declarative_base() - - -def to_dict(self): - return {c.name: getattr(self, c.name, None) for c in self.__table__.columns} - - -Base.to_dict = to_dict - - -class ResourceHistory(Base): - __tablename__ = "cloud_resource_history" - - xid = Column(String(64), primary_key=True) - id = Column(String(64)) - resource = Column(String(64)) - ora_data = Column(String(65535)) - created_time = Column(DateTime) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.xid = data.get("xid") - self.id = data.get("id") - self.resource = data.get("resource") - self.ora_data = data.get("ora_data") - - -class Providers(Base): - __tablename__ = "cloud_providers" - - id = Column(String(64), primary_key=True) - name = Column(String(64), nullable=False) - display_name = Column(String(64), nullable=False) - plugin_source = Column(String(64)) - secret_id = Column(String(256)) - secret_key = Column(String(256)) - region = Column(String(64)) - zone = Column(String(64)) - extend_info = Column(String(1024)) - provider_property = Column(String(1024)) - is_init = Column(TINYINT(1), server_default=text("'0'")) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.extend_info = data.get("extend_info") or '{}' - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.is_init = data.get("is_init") - self.region = data.get("region") - self.zone = data.get("zone") - self.name = data.get("name") - self.display_name = data.get("display_name") - self.provider_property = data.get("provider_property") or '{}' - self.secret_id = data.get("secret_id") - self.secret_key = data.get("secret_key") - self.plugin_source = data.get("plugin_source") - self.updated_time = data.get("updated_time") - - -class ProviderSecret(Base): - __tablename__ = "cloud_secret" - - id = Column(String(64), primary_key=True) - name = Column(String(64), nullable=False) - display_name = Column(String(64)) - provider = Column(String(64), nullable=False) - region = Column(String(64)) - server = Column(String(256)) - extend_info = Column(String(2048)) - secret_info = Column(String(2048)) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.display_name = data.get("display_name") - self.enabled = data.get("enabled") - self.extend_info = data.get("extend_info") - self.id = data.get("id") - self.server = data.get("server") - self.is_deleted = data.get("is_deleted") - self.name = data.get("name") - self.provider = data.get("provider") - self.region = data.get("region") - self.secret_info = data.get("secret_info") - self.updated_time = data.get("updated_time") - - -class Region(Base): - __tablename__ = "cloud_region" - - id = Column(String(64), primary_key=True) - name = Column(String(64)) - provider = Column(String(128), nullable=False) - asset_id = Column(String(128), nullable=False) - extend_info = Column(String(1024)) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.extend_info = data.get("extend_info") or '{}' - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.name = data.get("name") - self.provider = data.get("provider") or '{}' - self.asset_id = data.get("asset_id") - self.updated_time = data.get("updated_time") - - -class Zone(Base): - __tablename__ = "cloud_zone" - - id = Column(String(64), primary_key=True) - name = Column(String(64)) - provider = Column(String(128), nullable=False) - asset_id = Column(String(128), nullable=False) - region = Column(String(128)) - extend_info = Column(String(1024)) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.extend_info = data.get("extend_info") or '{}' - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.name = data.get("name") - self.region = data.get("region") - self.provider = data.get("provider") or '{}' - self.asset_id = data.get("asset_id") - self.updated_time = data.get("updated_time") - - -class Resource(Base): - __tablename__ = "resource" - - id = Column(String(64), primary_key=True) - provider = Column(String(64), nullable=False) - resource_type = Column(String(64), nullable=False) - resource_name = Column(String(64), nullable=False) - extend_info = Column(String(1024)) - resource_property = Column(String(2048), nullable=False) - resource_output = Column(String(1024), nullable=False) - data_source_name = Column(String(64)) - data_source_argument = Column(String(256)) - data_source_output = Column(String(512)) - data_source = Column(String(2048)) - pre_action = Column(String(256)) - pre_action_output = Column(String(512)) - is_locked = Column(TINYINT(1), server_default=text("'0'")) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.is_locked = data.get("is_locked") - self.resource_type = data.get("resource_type") - self.provider = data.get("provider") - self.data_source_argument = data.get("data_source_argument") - self.data_source_output = data.get("data_source_output") - self.resource_name = data.get("resource_name") - self.data_source_name = data.get("data_source_name") - self.data_source = data.get("data_source") - self.pre_action = data.get("pre_action") - self.pre_action_output = data.get("pre_action_output") - self.extend_info = data.get("extend_info") or '{}' - self.resource_property = data.get("resource_property") or '{}' - self.resource_output = data.get("resource_output") or '{}' - self.updated_time = data.get("updated_time") - - -class CommonKeys(Base): - __tablename__ = "common_keys" - - id = Column(String(64), primary_key=True) - resource = Column(String(64), nullable=False) - property = Column(String(64)) - key = Column(String(64), nullable=False) - is_locked = Column(TINYINT(1), server_default=text("'0'")) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.is_locked = data.get("is_locked") - self.key = data.get("key") - self.resource = data.get("resource") - self.property = data.get("property") - self.updated_time = data.get("updated_time") - - -class Config(Base): - __tablename__ = "config" - - id = Column(String(64), primary_key=True) - provider = Column(String(64), nullable=False) - resource = Column(String(64)) - property = Column(String(64)) - value_config = Column(String(2048)) - is_locked = Column(TINYINT(1), server_default=text("'0'")) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.is_locked = data.get("is_locked") - self.value_config = data.get("value_config") or '{}' - self.provider = data.get("provider") - self.resource = data.get("resource") - self.property = data.get("property") - self.updated_time = data.get("updated_time") - - -class InstanceType(Base): - __tablename__ = "instance_type" - - id = Column(String(64), primary_key=True) - provider_id = Column(String(64)) - provider = Column(String(64), nullable=False) - name = Column(String(64)) - type = Column(String(64)) - origin_name = Column(String(64)) - network = Column(String(64)) - cpu = Column(Integer) - memory = Column(Integer) - extend_info = Column(String(1024)) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.cpu = data.get("cpu") - self.created_time = data.get("created_time") - self.deleted_time = data.get("deleted_time") - self.enabled = data.get("enabled") - self.extend_info = data.get("extend_info") - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.memory = data.get("memory") - self.name = data.get("name") - self.type = data.get("type") - self.origin_name = data.get("origin_name") - self.network = data.get("network") - self.provider = data.get("provider") - self.provider_id = data.get("provider_id") - self.updated_time = data.get("updated_time") - - -class Instance(Base): - __tablename__ = "instance" - - id = Column(String(64), primary_key=True) - provider_id = Column(String(64)) - provider = Column(String(64), nullable=False) - region = Column(String(64)) - zone = Column(String(64)) - resource_id = Column(String(64)) - name = Column(String(64)) - hostname = Column(String(64)) - instance_type = Column(String(64)) - disk_type = Column(String(64)) - disk_size = Column(String(64)) - subnet_id = Column(String(64)) - ipaddress = Column(String(64)) - image = Column(String(64)) - password = Column(String(64)) - public_ip = Column(String(64)) - cpu = Column(Integer) - memory = Column(Integer) - power_state = Column(String(64)) - extend_info = Column(String(1024)) - define_json = Column(String(1024)) - status = Column(String(64)) - result_json = Column(String(5120)) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - enabled = Column(TINYINT(1), server_default=text("'1'")) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.cpu = data.get("cpu") - self.define_json = data.get("define_json") or '{}' - self.deleted_time = data.get("deleted_time") - self.image = data.get("image") - self.disk_size = data.get("disk_size") - self.disk_type = data.get("disk_type") - self.enabled = data.get("enabled") - self.extend_info = data.get("extend_info") or '{}' - self.hostname = data.get("hostname") - self.name = data.get("name") - self.id = data.get("id") - self.password = data.get("password") - self.public_ip = data.get("public_ip") - self.subnet_id = data.get("subnet_id") - self.instance_type = data.get("instance_type") - self.ipaddress = data.get("ipaddress") - self.is_deleted = data.get("is_deleted") - self.memory = data.get("memory") - self.power_state = data.get("power_state") - self.provider = data.get("provider") - self.provider_id = data.get("provider_id") - self.region = data.get("region") - self.resource_id = data.get("resource_id") - self.result_json = data.get("result_json") or '{}' - self.status = data.get("status") - self.updated_time = data.get("updated_time") - self.zone = data.get("zone") - - -class CloudResource(Base): - __tablename__ = "cloud_resource" - - id = Column(String(64), primary_key=True) - provider_id = Column(String(64)) - provider = Column(String(64)) - region = Column(String(64)) - zone = Column(String(64)) - resource_id = Column(String(64)) - owner_id = Column(String(64)) - relation_id = Column(String(64)) - resource_name = Column(String(64), nullable=False) - propertys = Column(String(1024)) - extend_info = Column(String(1024)) - define_json = Column(String(4096)) - status = Column(String(64)) - output_json = Column(String(2048)) - result_json = Column(String(5120)) - created_time = Column(DateTime) - updated_time = Column(DateTime) - deleted_time = Column(DateTime) - is_deleted = Column(TINYINT(1), server_default=text("'0'")) - - def __init__(self, data): - self.created_time = datetime.datetime.now() - self.define_json = data.get("define_json") - self.deleted_time = data.get("deleted_time") - self.extend_info = data.get("extend_info") - self.id = data.get("id") - self.is_deleted = data.get("is_deleted") - self.output_json = data.get("output_json") - self.propertys = data.get("propertys") - self.provider = data.get("provider") - self.provider_id = data.get("provider_id") - self.region = data.get("region") - self.resource_id = data.get("resource_id") - self.owner_id = data.get("owner_id") - self.relation_id = data.get("relation_id") - self.resource_name = data.get("resource_name") - self.result_json = data.get("result_json") - self.status = data.get("status") - self.updated_time = data.get("updated_time") - self.zone = data.get("zone") - -# p = dir(ProviderSecret) -# for x in p: -# if not x.startswith("_") and x not in ["to_dict", "metadata"]: -# print('self.%s = data.get("%s")' % (x, x)) diff --git a/apps/background/models/objects.py b/apps/background/models/objects.py deleted file mode 100644 index f9608f20..00000000 --- a/apps/background/models/objects.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding: utf-8 - -import traceback - -from lib.json_helper import format_json_dumps -from lib.logs import logger -from lib.mysql_client import Database - - -class _BaseManager(object): - obj = None - - def create(self, data): - ''' - - :param data: - :return: - ''' - - try: - logger.info("try create data: %s" % (format_json_dumps(data))) - obj = self.obj(data) - Database().create(obj) - return 1, obj.id - except Exception, e: - logger.info("%s create data error, insert: %s" % (str(self.obj), format_json_dumps(data))) - logger.info(traceback.format_exc()) - raise e - - def list(self, filters=None, filter_string=None, - params=None, pageAt=0, pageSize=20000, orderby=None): - ''' - - :param filters: - :param filter_string: - :param params: - :param pageAt: - :param pageSize: - :param orderby: - :return: - ''' - pageAt = pageAt or 0 - pageSize = pageSize or 20000 - filters = filters or {} - try: - num, result = Database().query(self.obj, - filters=filters, - filter_string=filter_string, - params=params, - pageAt=pageAt, - pageSize=pageSize, - orderby=orderby) - _tmp = [] - for data in result: - _tmp.append(data.to_dict()) - return num, _tmp - except: - logger.info("%s list data error" % (str(self.obj))) - logger.info(traceback.format_exc()) - return 0, [] - - def get(self, filters=None, filter_string=None, params=None): - try: - filters = filters or {} - result = Database().get(self.obj, filters=filters, filter_string=filter_string, params=params) - if result: - return result.to_dict() - else: - return {} - except: - logger.info("%s get data error" % (str(self.obj))) - logger.info(traceback.format_exc()) - return {} - - def update(self, filters, data): - try: - ora_data = self.get(filters=filters) - if not ora_data: - return 0, {} - - Database().update(self.obj, filters=filters, update_data=data) - ora_data.update(data) - return 1, ora_data - except Exception, e: - logger.info("%s update data error, filter: %s, data: %s" % (str(self.obj), - str(filters), - format_json_dumps(data))) - logger.info(traceback.format_exc()) - raise e - - def delete(self, filters): - try: - ora_data = self.get(filters=filters) - if not ora_data: - return 0 - - Database().delete(self.obj, filters=filters) - return 1 - except Exception, e: - logger.info("%s delete data error" % (str(self.obj))) - logger.info(traceback.format_exc()) - raise e - - def excute(self, sql, bind=None): - ''' - - :param sql: - :return: - ''' - - return Database().excute(sql, bind) diff --git a/apps/background/resource/__init__.py b/apps/background/resource/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/configr/__init__.py b/apps/background/resource/configr/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/configr/commonkey.py b/apps/background/resource/configr/commonkey.py deleted file mode 100644 index 516bdb52..00000000 --- a/apps/background/resource/configr/commonkey.py +++ /dev/null @@ -1,42 +0,0 @@ -# coding: utf-8 - -import datetime -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import CommonKeyManager - - -class CommonKeyObject(object): - def __init__(self): - self.resource = CommonKeyManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - return self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.get(filters=where_data) - - def query_one(self, where_data): - return self.resource.get(filters=where_data) - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - update_data["updated_time"] = datetime.datetime.now() - return self.resource.update(filters=where_data, data=update_data) - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) diff --git a/apps/background/resource/configr/history.py b/apps/background/resource/configr/history.py deleted file mode 100644 index b741dc2c..00000000 --- a/apps/background/resource/configr/history.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from lib.json_helper import format_json_dumps -from apps.background.models.dbserver import HistoryManager - - -class HistoryObject(object): - def __init__(self): - self.resource = HistoryManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["ora_data"] = json.loads(res["ora_data"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["xid"] = create_data.get("xid") or get_uuid() - create_data["ora_data"] = format_json_dumps(create_data.get("ora_data", {})) - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"xid": rid}) - - data = self.resource.get(filters=where_data) - if data: - data["ora_data"] = json.loads(data["ora_data"]) - - return data - - def query_one(self, where_data): - data = self.resource.get(filters=where_data) - if data: - data["ora_data"] = json.loads(data["ora_data"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"xid": rid}) - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["ora_data"] = json.loads(data["ora_data"]) - - return count, data - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"xid": rid}) - return self.resource.delete(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"xid": rid}) diff --git a/apps/background/resource/configr/provider.py b/apps/background/resource/configr/provider.py deleted file mode 100644 index 4ed3a7b3..00000000 --- a/apps/background/resource/configr/provider.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from core import local_exceptions -from lib.uuid_util import get_uuid -from lib.encrypt_helper import encrypt_str -from lib.encrypt_helper import decrypt_str -from apps.background.models.dbserver import ProvidersManager - - -class ProviderObject(object): - def __init__(self): - self.resource = ProvidersManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None, filter_string=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["provider_property"] = json.loads(res["provider_property"]) - data.append(res) - - return count, data - - def create(self, create_data): - if create_data.get("secret_id"): - _key = create_data.get("secret_id") - if not _key.startswith("{cipher_a}"): - create_data["secret_id"] = "{cipher_a}" + encrypt_str(create_data.get("secret_id")) - - if create_data.get("secret_key"): - _key = create_data.get("secret_key") - if not _key.startswith("{cipher_a}"): - create_data["secret_key"] = "{cipher_a}" + encrypt_str(create_data.get("secret_key")) - - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["provider_property"] = json.loads(data["provider_property"]) - - return data - - def query_one(self, where_data): - where_data = where_data or {} - where_data.update({"is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["provider_property"] = json.loads(data["provider_property"]) - - return data - - def update(self, rid, update_data, where_data=None): - if update_data.get("secret_id"): - _key = update_data.get("secret_id") - if not _key.startswith("{cipher_a}"): - update_data["secret_id"] = "{cipher_a}" + encrypt_str(update_data.get("secret_id")) - - if update_data.get("secret_key"): - _key = update_data.get("secret_key") - if not _key.startswith("{cipher_a}"): - update_data["secret_key"] = "{cipher_a}" + encrypt_str(update_data.get("secret_key")) - - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["provider_property"] = json.loads(data["provider_property"]) - - return count, data - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def provider_object(self, provider_id): - data = self.show(rid=provider_id) - if not data: - raise local_exceptions.ResourceValidateError("provider", "provider %s 未注册" % provider_id) - return data - - def provider_name_object(self, provider): - if not provider: - raise local_exceptions.ResourceValidateError("provider", "provider 不允许为空") - - data = self.query_one(where_data={"name": provider}) - if not data: - raise local_exceptions.ResourceValidateError("provider", "provider %s 未注册" % provider) - return data - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/configr/provider_secret.py b/apps/background/resource/configr/provider_secret.py deleted file mode 100644 index 29965aa0..00000000 --- a/apps/background/resource/configr/provider_secret.py +++ /dev/null @@ -1,89 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from core import local_exceptions -from lib.uuid_util import get_uuid -from lib.encrypt_helper import encrypt_str -from lib.encrypt_helper import decrypt_str -from apps.background.models.dbserver import ProviderSecretManager - - -class ProviderSecretObject(object): - def __init__(self): - self.resource = ProviderSecretManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None, filter_string=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - data.append(res) - - return count, data - - def create(self, create_data): - if create_data.get("secret_info"): - _key = create_data.get("secret_info") - if not _key.startswith("{cipher_a}"): - create_data["secret_info"] = "{cipher_a}" + encrypt_str(create_data.get("secret_info")) - - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - - return data - - def query_one(self, where_data): - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - - return data - - def update(self, rid, update_data, where_data=None): - if update_data.get("secret_info"): - _key = update_data.get("secret_info") - if not _key.startswith("{cipher_a}"): - update_data["secret_info"] = "{cipher_a}" + encrypt_str(update_data.get("secret_info")) - - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - - return count, data - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def secret_name_object(self, name, provider): - data = self.query_one(where_data={"name": name, "provider": provider}) - if not data: - raise local_exceptions.ResourceValidateError("secret", "secret %s 未注册" % name) - return data - - def name_object(self, name, provider): - return self.query_one(where_data={"name": name, "provider": provider}) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) diff --git a/apps/background/resource/configr/region.py b/apps/background/resource/configr/region.py deleted file mode 100644 index 2194f1ee..00000000 --- a/apps/background/resource/configr/region.py +++ /dev/null @@ -1,178 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from core import local_exceptions -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import RegionManager -from apps.background.models.dbserver import ZoneManager - - -class _AreaObject(object): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None, filter_in=None, filter_string=None): - ''' - - :param filters: - :param page: - :param pagesize: - :param orderby: - :param filter_in: - :param filter_string: - :return: - ''' - - filters = filters or {} - filter_in = filter_in or {} - - filters["is_deleted"] = 0 - - for key, value in filter_in.items(): - if value: - f = '' - for x in value: - f += "'" + x + "'," - f = f[:-1] - - x = '(' + f + ')' - if filter_string: - filter_string += 'and ' + key + " in " + x + " " - else: - filter_string = key + " in " + x + " " - - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) if res["extend_info"] else {} - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) if data["extend_info"] else {} - - return data - - def query_one(self, where_data): - where_data = where_data or {} - where_data.update({"is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) if data["extend_info"] else {} - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) if data["extend_info"] else {} - - return count, data - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class RegionObject(_AreaObject): - def __init__(self): - super(RegionObject, self).__init__() - self.resource = RegionManager() - - def region_object(self, region_id): - data = self.show(rid=region_id) - if not data: - data = self.query_one(where_data={"name": region_id}) - if not data: - data = self.query_one(where_data={"asset_id": region_id}) - - if not data: - raise local_exceptions.ResourceValidateError("region", "region %s 未注册" % region_id) - - return data - - def region_name_object(self, region, provider): - data = self.query_one(where_data={"name": region, "provider": provider}) - if not data: - raise local_exceptions.ResourceValidateError("region", "region %s 未注册" % region) - return data - - def region_asset_object(self, asset_id, provider=None): - where_data = {"asset_id": asset_id} - if provider: - where_data["provider"] = provider - data = self.query_one(where_data=where_data) - if not data: - raise local_exceptions.ResourceValidateError("region", "region asset %s 未注册" % asset_id) - return data - - def region_asset(self, asset_id, provider=None): - where_data = {"asset_id": asset_id} - if provider: - where_data["provider"] = provider - - return self.query_one(where_data=where_data) - - -class ZoneObject(_AreaObject): - def __init__(self): - super(ZoneObject, self).__init__() - self.resource = ZoneManager() - - def zone_object(self, zone_id): - data = self.show(rid=zone_id) - if not data: - raise local_exceptions.ResourceValidateError("zone", "zone %s 未注册" % zone_id) - return data - - def zone_name_object(self, zone, provider): - data = self.query_one(where_data={"name": zone, "provider": provider}) - if not data: - raise local_exceptions.ResourceValidateError("zone", "zone %s 未注册" % zone) - return data - - def zone_asset_object(self, asset_id, provider): - where_data = {"asset_id": asset_id} - if provider: - where_data["provider"] = provider - data = self.query_one(where_data=where_data) - if not data: - raise local_exceptions.ResourceValidateError("zone", "zone asset %s 未注册" % asset_id) - return data - - def zone_asset(self, asset_id, provider=None): - where_data = {"asset_id": asset_id} - if provider: - where_data["provider"] = provider - - return self.query_one(where_data=where_data) - - def zone_region_asset(self, asset_id, provider=None): - where_data = {"asset_id": asset_id} - if provider: - where_data["provider"] = provider - - return self.query_one(where_data=where_data) diff --git a/apps/background/resource/configr/resource.py b/apps/background/resource/configr/resource.py deleted file mode 100644 index 11e573db..00000000 --- a/apps/background/resource/configr/resource.py +++ /dev/null @@ -1,104 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -import traceback -from lib.logs import logger -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import ResourceManager - - -class ResourceObject(object): - def __init__(self): - self.resource = ResourceManager() - - def list(self, filters=None, page=None, - pagesize=None, orderby=None, - filter_string=None, filter_in=None): - ''' - - :param filters: - :param page: - :param pagesize: - :param orderby: - :param filter_string: - :param filter_in: - :return: - ''' - - filter_in = filter_in or {} - for key, value in filter_in.items(): - if value: - f = '' - for x in value: - f += "'" + x + "'," - f = f[:-1] - - x = '(' + f + ')' - if filter_string: - filter_string += 'and ' + key + " in " + x + " " - else: - filter_string = key + " in " + x + " " - - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["resource_property"] = json.loads(res["resource_property"]) - res["resource_output"] = json.loads(res["resource_output"]) if res["resource_output"] else {} - res["data_source"] = json.loads(res["data_source"]) if res.get("data_source") else {} - t = json.loads(res["data_source_output"]) if res.get("data_source_output") else {} - res["data_source_output"] = t - res["pre_action_output"] = json.loads(res["pre_action_output"]) if res.get("pre_action_output") else {} - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.query_one(where_data) - - def query_one(self, where_data): - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["resource_property"] = json.loads(data["resource_property"]) - data["resource_output"] = json.loads(data["resource_output"]) if data["resource_output"] else {} - data["data_source"] = json.loads(data["data_source"]) if data.get("data_source") else {} - t = json.loads(data["data_source_output"]) if data.get("data_source_output") else {} - data["data_source_output"] = t - data["pre_action_output"] = json.loads(data["pre_action_output"]) if data.get("pre_action_output") else {} - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["resource_property"] = json.loads(data["resource_property"]) - data["resource_output"] = json.loads(data["resource_output"]) if data["resource_output"] else {} - data["data_source"] = json.loads(data["data_source"]) if data.get("data_source") else {} - t = json.loads(data["data_source_output"]) if data.get("data_source_output") else {} - data["data_source_output"] = t - data["pre_action_output"] = json.loads(data["pre_action_output"]) if data.get("pre_action_output") else {} - return count, data - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) diff --git a/apps/background/resource/configr/value_config.py b/apps/background/resource/configr/value_config.py deleted file mode 100644 index 1cedaf15..00000000 --- a/apps/background/resource/configr/value_config.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import ConfigManager - - -class ValueConfigObject(object): - def __init__(self): - self.resource = ConfigManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None, filter_string=None): - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["value_config"] = json.loads(res["value_config"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.query_one(where_data) - - def query_one(self, where_data): - data = self.resource.get(filters=where_data) - if data: - data["value_config"] = json.loads(data["value_config"]) - return data - - def resource_value_configs(self, provider, resource): - where_data = {"provider": provider, "resource": resource} - count, datas = self.list(filters=where_data) - res = {} - for data in datas: - res[data["property"]] = data["value_config"] - - # if "zone" not in res.keys(): - # _zone = self.query_one(where_data={"provider": provider, "resource": "zone"}) - # if _zone: - # res["zone"] = _zone["value_config"] - - return res - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["value_config"] = json.loads(data["value_config"]) - return count, data - - def delete(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) diff --git a/apps/background/resource/database/__init__.py b/apps/background/resource/database/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/database/kvstore.py b/apps/background/resource/database/kvstore.py deleted file mode 100644 index ceccc04a..00000000 --- a/apps/background/resource/database/kvstore.py +++ /dev/null @@ -1,126 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import KVStoreManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class _KVStoreBase(ResourceBaseObject): - def __init__(self): - self.resource = KVStoreManager() - self.engine = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - if self.engine: - filters["engine"] = self.engine - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - if self.engine: - create_data["engine"] = self.engine - - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid, update_data=None): - update_data = update_data or {} - update_data["is_deleted"] = 1 - update_data["deleted_time"] = datetime.datetime.now() - count, data = self.update(rid, update_data=update_data) - return count - - def object_resource_id(self, rid): - data = self.show(rid) - if not data: - engine = self.engine or "database" - raise local_exceptions.ValueValidateError(engine, "rds database %s 不存在" % rid) - return data["resource_id"] - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class _BackupBase(_KVStoreBase): - def __init__(self): - super(_BackupBase, self).__init__() - - -class RedisObject(_KVStoreBase): - def __init__(self): - super(RedisObject, self).__init__() - self.engine = "redis" - - -class MemcachedObject(_KVStoreBase): - def __init__(self): - super(MemcachedObject, self).__init__() - self.engine = "memcached" - - -class KVStoreObject(_KVStoreBase): - pass - - -class RedisBackupObject(_BackupBase): - def __init__(self): - super(RedisBackupObject, self).__init__() - - -class MemcachedBackupObject(_BackupBase): - def __init__(self): - super(MemcachedBackupObject, self).__init__() - - -class KVStoreBackupObject(_BackupBase): - pass diff --git a/apps/background/resource/database/nosql.py b/apps/background/resource/database/nosql.py deleted file mode 100644 index 78db1de3..00000000 --- a/apps/background/resource/database/nosql.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import NosqlManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class _NosqlBase(ResourceBaseObject): - def __init__(self): - self.resource = NosqlManager() - self.engine = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - if self.engine: - filters["engine"] = self.engine - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - if self.engine: - create_data["engine"] = self.engine - - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid, update_data=None): - update_data = update_data or {} - update_data["is_deleted"] = 1 - update_data["deleted_time"] = datetime.datetime.now() - count, data = self.update(rid, update_data=update_data) - return count - - def object_resource_id(self, rid): - data = self.show(rid) - if not data: - engine = self.engine or "database" - raise local_exceptions.ValueValidateError(engine, "rds database %s 不存在" % rid) - return data["resource_id"] - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class MongoDBObject(_NosqlBase): - def __init__(self): - super(MongoDBObject, self).__init__() - self.engine = "mongodb" - - -class NosqlObject(_NosqlBase): - pass - diff --git a/apps/background/resource/database/rds.py b/apps/background/resource/database/rds.py deleted file mode 100644 index 69d86fe9..00000000 --- a/apps/background/resource/database/rds.py +++ /dev/null @@ -1,224 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import RdsDbManager -from apps.background.models.dbserver import RdsAccountManager -from apps.background.models.dbserver import RdsPrivilegeManager -from apps.background.models.dbserver import RdsBackupManager -from apps.background.models.dbserver import RdsDatabaseManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class _RdsDbBase(ResourceBaseObject): - def __init__(self): - self.resource = RdsDbManager() - self.engine = "rds" - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - if self.engine: - filters["engine"] = self.engine - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - if (not create_data.get("engine")) and self.engine: - create_data["engine"] = self.engine - - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def query_one(self, where_data=None): - where_data = where_data or {} - where_data.update({"is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.engine: - where_data["engine"] = self.engine - - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid, update_data=None): - update_data = update_data or {} - update_data["is_deleted"] = 1 - update_data["deleted_time"] = datetime.datetime.now() - count, data = self.update(rid, update_data=update_data) - return count - - def object_resource_id(self, rid): - data = self.show(rid) - if not data: - engine = self.engine or "database" - raise local_exceptions.ValueValidateError(engine, "rds database %s 不存在" % rid) - return data["resource_id"] - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class RdsDBObject(_RdsDbBase): - pass - - -class RdsAccountObject(_RdsDbBase): - def __init__(self): - super(RdsAccountObject, self).__init__() - self.engine = "rds" - self.resource = RdsAccountManager() - - def query_account(self, username, where_data=None): - where_data = where_data or {} - where_data["name"] = username - data = self.query_one(where_data=where_data) - if not data: - engine = self.engine or "database" - raise local_exceptions.ValueValidateError(engine, "rds database account %s 不存在" % username) - - return data - - -class RdsPrivilegeObject(_RdsDbBase): - def __init__(self): - super(RdsPrivilegeObject, self).__init__() - self.engine = "rds" - self.resource = RdsPrivilegeManager() - - -class RdsBackupObject(_RdsDbBase): - def __init__(self): - super(RdsBackupObject, self).__init__() - self.engine = "rds" - self.resource = RdsBackupManager() - - -class MysqlObject(_RdsDbBase): - def __init__(self): - super(MysqlObject, self).__init__() - self.engine = "mysql" - - -class MariaDBObject(_RdsDbBase): - def __init__(self): - super(MariaDBObject, self).__init__() - self.engine = "mariadb" - - -class PostgreSQLObject(_RdsDbBase): - def __init__(self): - super(PostgreSQLObject, self).__init__() - self.engine = "PostgreSQL" - - -class MysqlDatabaseObject(_RdsDbBase): - def __init__(self): - super(MysqlDatabaseObject, self).__init__() - self.engine = "mysql" - self.resource = RdsDatabaseManager() - - -class MysqlAccountObject(RdsAccountObject): - def __init__(self): - super(MysqlAccountObject, self).__init__() - self.engine = "mysql" - - -class MysqlPrivilegeObject(RdsPrivilegeObject): - def __init__(self): - super(MysqlPrivilegeObject, self).__init__() - self.engine = "mysql" - - -class MysqlBackupObject(RdsBackupObject): - def __init__(self): - super(MysqlBackupObject, self).__init__() - self.engine = "mysql" - - -class MariaDBAccountObject(RdsAccountObject): - def __init__(self): - super(MariaDBAccountObject, self).__init__() - self.engine = "mariadb" - - -class MariaDBPrivilegeObject(RdsPrivilegeObject): - def __init__(self): - super(MariaDBPrivilegeObject, self).__init__() - self.engine = "mariadb" - - -class MariaDBBackupObject(RdsBackupObject): - def __init__(self): - super(MariaDBBackupObject, self).__init__() - self.engine = "mariadb" - - -class PostgreSQLAccountObject(RdsAccountObject): - def __init__(self): - super(PostgreSQLAccountObject, self).__init__() - self.engine = "PostgreSQL" - - -class PostgreSQLPrivilegeObject(RdsPrivilegeObject): - def __init__(self): - super(PostgreSQLPrivilegeObject, self).__init__() - self.engine = "PostgreSQL" - - -class PostgreSQLBackupObject(RdsBackupObject): - def __init__(self): - super(PostgreSQLBackupObject, self).__init__() - self.engine = "PostgreSQL" diff --git a/apps/background/resource/loadbalance/__init__.py b/apps/background/resource/loadbalance/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/loadbalance/lb.py b/apps/background/resource/loadbalance/lb.py deleted file mode 100644 index 8aadcbba..00000000 --- a/apps/background/resource/loadbalance/lb.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.logs import logger -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import LBManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class LBObject(ResourceBaseObject): - def __init__(self): - self.resource = LBManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid, update_data=None): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def lb_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("lb", "lb %s 不存在" % rid) - return data["resource_id"] - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) diff --git a/apps/background/resource/loadbalance/lb_attach.py b/apps/background/resource/loadbalance/lb_attach.py deleted file mode 100644 index c59911b8..00000000 --- a/apps/background/resource/loadbalance/lb_attach.py +++ /dev/null @@ -1,118 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import LBAttachManager -from apps.background.models.dbserver import LBAttachInstanceManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class LBAttachObject(ResourceBaseObject): - def __init__(self): - self.resource = LBAttachManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - res["backend_servers"] = json.loads(res["backend_servers"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - data["backend_servers"] = json.loads(data["backend_servers"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - data["backend_servers"] = json.loads(data["backend_servers"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - - -class LBAttachInstanceObject(object): - def __init__(self): - self.resource = LBAttachInstanceManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - - return count, results - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - return data - - def query_one(self, where_data=None): - where_data = where_data or {} - where_data.update({"is_deleted": 0}) - data = self.resource.get(filters=where_data) - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/loadbalance/listener.py b/apps/background/resource/loadbalance/listener.py deleted file mode 100644 index 6920c83a..00000000 --- a/apps/background/resource/loadbalance/listener.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import LBListenerManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class LBListenerObject(ResourceBaseObject): - def __init__(self): - self.resource = LBListenerManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("lb listener", "lb listener %s 不存在" % rid) - return data["resource_id"] - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/network/__init__.py b/apps/background/resource/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/network/connnect_network.py b/apps/background/resource/network/connnect_network.py deleted file mode 100644 index e576bf50..00000000 --- a/apps/background/resource/network/connnect_network.py +++ /dev/null @@ -1,92 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import CCNManager -from apps.background.models.dbserver import CCNAttachManager -from apps.background.models.dbserver import CCNBandwidthManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class _ConnectNetBase(ResourceBaseObject): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class CCNObject(_ConnectNetBase): - def __init__(self): - super(CCNObject, self).__init__() - self.resource = CCNManager() - - def resource_id(self, rid, where_data=None): - ccn = self.show(rid, where_data) - if not ccn: - raise local_exceptions.ValueValidateError("ccn", "ccn %s 不存在 或 不在同一区域" % rid) - return ccn["resource_id"] - - -class CCNAttachObject(_ConnectNetBase): - def __init__(self): - super(CCNAttachObject, self).__init__() - self.resource = CCNAttachManager() - - -class CCNBandwidthObject(_ConnectNetBase): - def __init__(self): - super(CCNBandwidthObject, self).__init__() - self.resource = CCNBandwidthManager() diff --git a/apps/background/resource/network/eip.py b/apps/background/resource/network/eip.py deleted file mode 100644 index 2bb0b191..00000000 --- a/apps/background/resource/network/eip.py +++ /dev/null @@ -1,102 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import EipManager -from apps.background.models.dbserver import EipAssociation -from apps.background.resource.resource_base import ResourceBaseObject - - -class _eipBase(ResourceBaseObject): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def query_one(self, where_data): - where_data.update({"is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class EipObject(_eipBase): - def __init__(self): - super(EipObject, self).__init__() - self.resource = EipManager() - - def eip_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("eip_id", "eip %s 不存在" % rid) - return data["resource_id"] - - def eip_resource_ipaddress(self, ipaddress): - data = self.query_one(where_data={"ipaddress": ipaddress}) - if not data: - raise local_exceptions.ValueValidateError("eip", "eip ipaddress %s 不存在" % ipaddress) - - return data["resource_id"] - - -class EipAssociationObject(_eipBase): - def __init__(self): - super(EipAssociationObject, self).__init__() - self.resource = EipAssociation() diff --git a/apps/background/resource/network/nat_gateway.py b/apps/background/resource/network/nat_gateway.py deleted file mode 100644 index 9ff6d249..00000000 --- a/apps/background/resource/network/nat_gateway.py +++ /dev/null @@ -1,65 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import NatGatewayManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class NatGatewayObject(ResourceBaseObject): - def __init__(self): - self.resource = NatGatewayManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/network/route_entry.py b/apps/background/resource/network/route_entry.py deleted file mode 100644 index 240f8385..00000000 --- a/apps/background/resource/network/route_entry.py +++ /dev/null @@ -1,65 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from apps.background.models.dbserver import RouteEntryManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class RouteEntryObject(ResourceBaseObject): - def __init__(self): - self.resource = RouteEntryManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/network/route_table.py b/apps/background/resource/network/route_table.py deleted file mode 100644 index 575a2b6f..00000000 --- a/apps/background/resource/network/route_table.py +++ /dev/null @@ -1,73 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import RouteTableManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class RouteTableObject(ResourceBaseObject): - def __init__(self): - self.resource = RouteTableManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def routeTable_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("route_table_id", "route table %s 不存在" % rid) - return data["resource_id"] - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/network/security_group.py b/apps/background/resource/network/security_group.py deleted file mode 100644 index 6eaa5e51..00000000 --- a/apps/background/resource/network/security_group.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import SecGroupManager -from apps.background.models.dbserver import SecGroupRuleManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class _SecBaseObject(ResourceBaseObject): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class SecGroupObject(_SecBaseObject): - def __init__(self): - super(SecGroupObject, self).__init__() - self.resource = SecGroupManager() - - def resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("security_group_id", "security group %s 不存在" % rid) - return data["resource_id"] - - -class SecGroupRuleObject(_SecBaseObject): - def __init__(self): - super(SecGroupRuleObject, self).__init__() - self.resource = SecGroupRuleManager() diff --git a/apps/background/resource/network/subnet.py b/apps/background/resource/network/subnet.py deleted file mode 100644 index 0562b723..00000000 --- a/apps/background/resource/network/subnet.py +++ /dev/null @@ -1,72 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.models.dbserver import SubnetManager -from apps.background.resource.resource_base import ResourceBaseObject - - -class SubnetObject(ResourceBaseObject): - def __init__(self): - self.resource = SubnetManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def subnet_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("subnet_id", "subnet %s 不存在" % rid) - return data["resource_id"] - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - diff --git a/apps/background/resource/network/vpc.py b/apps/background/resource/network/vpc.py deleted file mode 100644 index 715941b0..00000000 --- a/apps/background/resource/network/vpc.py +++ /dev/null @@ -1,75 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.resource.resource_base import ResourceBaseObject -from apps.background.models.dbserver import VpcManager - - -class VpcObject(ResourceBaseObject): - def __init__(self): - self.resource = VpcManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) if res["define_json"] else {} - res["result_json"] = json.loads(res["result_json"]) if res["result_json"] else {} - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def vpc_resource_id(self, rid, where_data=None): - vpc = self.show(rid, where_data) - if not vpc: - raise local_exceptions.ValueValidateError("vpc_id", "vpc %s 不存在 或 不在同一区域" % rid) - return vpc["resource_id"] - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete_obj(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.delete(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count diff --git a/apps/background/resource/resource_base.py b/apps/background/resource/resource_base.py deleted file mode 100644 index 2b99f922..00000000 --- a/apps/background/resource/resource_base.py +++ /dev/null @@ -1,250 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.encrypt_helper import encrypt_str -from lib.encrypt_helper import decrypt_str -from lib.uuid_util import get_uuid -from lib.logs import logger -from lib.json_helper import format_json_dumps -from core import local_exceptions -from apps.background.models.dbserver import CrsManager - - -class ResourceBaseObject(object): - resource = None - - def ora_show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - return self.resource.get(filters=where_data) - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class CrsObject(object): - def __init__(self, resource_name=None): - self.resource = CrsManager() - self.resource_name = resource_name - - def query_one(self, where_data=None): - where_data = where_data or {} - - if self.resource_name: - where_data["resource_name"] = self.resource_name - - data = self.resource.get(filters=where_data) - if data: - data["propertys"] = json.loads(data["propertys"]) - data["output_json"] = json.loads(data["output_json"]) - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def list(self, filters=None, page=None, - pagesize=None, orderby=None, - filter_in=None, filter_string=None): - ''' - - :param filters: - :param page: - :param pagesize: - :param orderby: - :param filter_in: - :param filter_string: - :return: - ''' - - filters = filters or {} - filter_in = filter_in or {} - filters["is_deleted"] = 0 - - if self.resource_name: - filters["resource_name"] = self.resource_name - - for key, value in filter_in.items(): - if value: - f = '' - for x in value: - f += "'" + x + "'," - f = f[:-1] - - x = '(' + f + ')' - if filter_string: - filter_string += 'and ' + key + " in " + x + " " - else: - filter_string = key + " in " + x + " " - - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["propertys"] = json.loads(res["propertys"]) - res["output_json"] = json.loads(res["output_json"]) - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - if self.resource_name: - create_data["resource_name"] = self.resource_name - - propertys = create_data.get("propertys", {}) - if propertys.get("password"): - password = propertys.get("password") - if not password.startswith("{cipher_a}"): - propertys["password"] = "{cipher_a}" + encrypt_str(password) - - create_data["propertys"] = propertys - _after_data = {} - for key, value in create_data.items(): - if isinstance(value, dict): - value = format_json_dumps(value) - - _after_data[key] = value - - _after_data["id"] = create_data.get("id") or get_uuid() - _after_data["created_time"] = datetime.datetime.now() - _after_data["updated_time"] = _after_data["created_time"] - return self.resource.create(data=_after_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.resource_name: - where_data["resource_name"] = self.resource_name - - data = self.resource.get(filters=where_data) - if data: - data["propertys"] = json.loads(data["propertys"]) - data["output_json"] = json.loads(data["output_json"]) - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - - if self.resource_name: - where_data["resource_name"] = self.resource_name - - propertys = update_data.get("propertys", {}) - if propertys.get("password"): - password = propertys.get("password") - if not password.startswith("{cipher_a}"): - propertys["password"] = "{cipher_a}" + encrypt_str(password) - - update_data["propertys"] = propertys - - _after_data = {} - for key, value in update_data.items(): - if isinstance(value, dict): - value = format_json_dumps(value) - - _after_data[key] = value - - _after_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=_after_data) - if data: - data["propertys"] = json.loads(data["propertys"]) - data["output_json"] = json.loads(data["output_json"]) - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def ora_update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - - if self.resource_name: - where_data["resource_name"] = self.resource_name - - propertys = update_data.get("propertys", {}) - if propertys.get("password"): - password = propertys.get("password") - if not password.startswith("{cipher_a}"): - propertys["password"] = "{cipher_a}" + encrypt_str(password) - - update_data["propertys"] = propertys - - _after_data = {} - for key, value in update_data.items(): - if isinstance(value, dict): - value = format_json_dumps(value) - - _after_data[key] = value - - _after_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=_after_data) - if data: - data["propertys"] = json.loads(data["propertys"]) - data["output_json"] = json.loads(data["output_json"]) - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid, update_data=None): - update_data = update_data or {} - update_data["is_deleted"] = 1 - update_data["deleted_time"] = datetime.datetime.now() - count, data = self.update(rid, update_data=update_data) - return count - - def object_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError(self.resource_name, "资源 %s 不存在" % rid) - return data["resource_id"] - - def object_resource_assetid(self, rid): - data = self.show(rid) - if not data: - return rid - return data["resource_id"] - - def object_asset_id(self, rid): - data = self.show(rid) - if data: - return data["resource_id"] - else: - logger.info("search id: %s, asset id: null, may it's asset id, return" % rid) - - return rid - - def asset_object_id(self, asset_id): - crs_data = CrsObject().query_one(where_data={"resource_id": asset_id, "is_deleted": 0}) - if crs_data: - return crs_data.get("id") - - return asset_id - - def ora_show(self, rid, where_data=None): - where_data = where_data or {} - if self.resource_name: - where_data["resource_name"] = self.resource_name - - where_data.update({"id": rid}) - return self.resource.get(filters=where_data) - - def ora_delete(self, rid): - where_data = {"id": rid} - if self.resource_name: - where_data["resource_name"] = self.resource_name - - return self.resource.delete(filters=where_data) diff --git a/apps/background/resource/storage/__init__.py b/apps/background/resource/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/storage/disk.py b/apps/background/resource/storage/disk.py deleted file mode 100644 index f0bfe423..00000000 --- a/apps/background/resource/storage/disk.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.resource.resource_base import ResourceBaseObject -from apps.background.models.dbserver import DiskAttachManager -from apps.background.models.dbserver import DiskManager - - -class _DiskBaseObject(ResourceBaseObject): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class DiskObject(_DiskBaseObject): - def __init__(self): - super(DiskObject, self).__init__() - self.resource = DiskManager() - - def disk_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("disk", "disk id %s 不存在" % rid) - return data["resource_id"] - - -class DiskAttachObject(_DiskBaseObject): - def __init__(self): - super(DiskAttachObject, self).__init__() - self.resource = DiskAttachManager() diff --git a/apps/background/resource/storage/object_storage.py b/apps/background/resource/storage/object_storage.py deleted file mode 100644 index 6697fb5f..00000000 --- a/apps/background/resource/storage/object_storage.py +++ /dev/null @@ -1,85 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.resource.resource_base import ResourceBaseObject -from apps.background.models.dbserver import ObjectStorageManager -from apps.background.models.dbserver import BucketObjectManager - - -class _StorageBaseObject(ResourceBaseObject): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class ObjectStorageObject(_StorageBaseObject): - def __init__(self): - super(ObjectStorageObject, self).__init__() - self.resource = ObjectStorageManager() - - def resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("object storage", "object storage %s 不存在" % rid) - return data["resource_id"] - - -class BucketObjectObject(_StorageBaseObject): - def __init__(self): - super(BucketObjectObject, self).__init__() - self.resource = BucketObjectManager() diff --git a/apps/background/resource/vm/__init__.py b/apps/background/resource/vm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/background/resource/vm/eni.py b/apps/background/resource/vm/eni.py deleted file mode 100644 index e4845df9..00000000 --- a/apps/background/resource/vm/eni.py +++ /dev/null @@ -1,86 +0,0 @@ -# coding: utf-8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.resource.resource_base import ResourceBaseObject -from apps.background.models.dbserver import NetworkInterfaceAttachManager -from apps.background.models.dbserver import NetworkInterfaceManager - - -class _ENIBaseObject(ResourceBaseObject): - def __init__(self): - self.resource = None - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - -class ENIObject(_ENIBaseObject): - def __init__(self): - super(ENIObject, self).__init__() - self.resource = NetworkInterfaceManager() - - def disk_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("ENI", "network interface id %s 不存在" % rid) - return data["resource_id"] - - -class ENIAttachObject(_ENIBaseObject): - def __init__(self): - super(ENIAttachObject, self).__init__() - self.resource = NetworkInterfaceAttachManager() - diff --git a/apps/background/resource/vm/instance.py b/apps/background/resource/vm/instance.py deleted file mode 100644 index 88ae7765..00000000 --- a/apps/background/resource/vm/instance.py +++ /dev/null @@ -1,84 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.resource.resource_base import ResourceBaseObject -from apps.background.models.dbserver import InstanceManager - - -class InstanceObject(ResourceBaseObject): - def __init__(self): - self.resource = InstanceManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None): - filters = filters or {} - filters["is_deleted"] = 0 - - count, results = self.resource.list(filters=filters, pageAt=page, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - res["define_json"] = json.loads(res["define_json"]) - res["result_json"] = json.loads(res["result_json"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def org_show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - data["define_json"] = json.loads(data["define_json"]) - data["result_json"] = json.loads(data["result_json"]) - - return count, data - - def delete(self, rid, update_data=None): - update_data = update_data or {} - update_data.update({"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - count, data = self.update(rid, update_data=update_data) - return count - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) - - def vm_resource_id(self, rid): - data = self.show(rid) - if not data: - raise local_exceptions.ValueValidateError("instance_id", "instance id %s 不存在" % rid) - return data["resource_id"] diff --git a/apps/background/resource/vm/instance_type.py b/apps/background/resource/vm/instance_type.py deleted file mode 100644 index 62ecf130..00000000 --- a/apps/background/resource/vm/instance_type.py +++ /dev/null @@ -1,112 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import datetime -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.background.resource.resource_base import ResourceBaseObject -from apps.background.models.dbserver import InstanceTypeManager - - -class InstanceTypeObject(ResourceBaseObject): - def __init__(self): - self.resource = InstanceTypeManager() - - def list(self, filters=None, page=None, pagesize=None, orderby=None, filter_string=None, filter_in=None): - filters = filters or {} - filter_in = filter_in or {} - - filters["is_deleted"] = 0 - - for key, value in filter_in.items(): - if value: - f = '' - for x in value: - f += "'" + x + "'," - f = f[:-1] - - x = '(' + f + ')' - if filter_string: - filter_string += 'and ' + key + " in " + x + " " - else: - filter_string = key + " in " + x + " " - - count, results = self.resource.list(filters=filters, pageAt=page, - filter_string=filter_string, - pageSize=pagesize, orderby=orderby) - data = [] - for res in results: - res["extend_info"] = json.loads(res["extend_info"]) - data.append(res) - - return count, data - - def create(self, create_data): - create_data["id"] = create_data.get("id") or get_uuid() - create_data["created_time"] = datetime.datetime.now() - create_data["updated_time"] = create_data["created_time"] - return self.resource.create(data=create_data) - - def show(self, rid, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - data = self.resource.get(filters=where_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - - return data - - def update(self, rid, update_data, where_data=None): - where_data = where_data or {} - where_data.update({"id": rid, "is_deleted": 0}) - update_data["updated_time"] = datetime.datetime.now() - count, data = self.resource.update(filters=where_data, data=update_data) - if data: - data["extend_info"] = json.loads(data["extend_info"]) - - return count, data - - def delete(self, rid): - count, data = self.update(rid, update_data={"is_deleted": 1, "deleted_time": datetime.datetime.now()}) - return count - - def type_resource_id(self, provider_id, name): - data = self.resource.get(filters={"provider_id": provider_id, - "name": name}) - if not data: - raise local_exceptions.ValueValidateError("instance type name", "instance type name %s 不存在" % name) - return data["origin_name"], data - - def convert_resource_id(self, provider_id, name): - data = self.resource.get(filters={"provider_id": provider_id, - "name": name}) - if data: - return data["origin_name"], data - else: - return name, {"cpu": 0, "memory": 0} - - def convert_asset(self, provider, asset_name, usage_type=None): - asset_name = str(asset_name) - filters = {"provider": provider, "origin_name": asset_name} - if usage_type: - filters["type"] = usage_type - data = self.resource.get(filters=filters) - if data: - return data["name"], {"cpu": data.get("cpu"), "memory": data.get("memory")} - else: - return asset_name, {"cpu": 0, "memory": 0} - - def convert_resource_name(self, provider, name, usage_type=None): - filters = {"provider": provider, "name": name} - if usage_type: - filters["type"] = usage_type - - data = self.resource.get(filters=filters) - if data: - return data["origin_name"], data - else: - return name, {"cpu": 0, "memory": 0} - - def ora_delete(self, rid): - return self.resource.delete(filters={"id": rid}) diff --git a/apps/common/__init__.py b/apps/common/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/common/convert_keys.py b/apps/common/convert_keys.py deleted file mode 100644 index b1024d88..00000000 --- a/apps/common/convert_keys.py +++ /dev/null @@ -1,606 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.logs import logger -from lib.json_helper import format_json_dumps - - -def validate_convert_key(defines): - x_res = {} - for key, define in defines.items(): - if (not isinstance(define, (basestring, dict))) or isinstance(define, list): - raise ValueError("错误的定义 合法值为 string " - "或json:{'type': , 'allow_null': <0/1>," - "'convert': , 'default':, 'equivalence': }" - "'property: {}'") - - if isinstance(define, dict): - if define.get("type", "string") not in ["string", "json", "int", "float", "list", "bool", "object"]: - raise ValueError("未知的类型约束 %s" % define.get("type")) - if define.get("allow_null", 1) not in [0, 1, '0', '1']: - raise ValueError("allow_null 合法值为 0/1") - else: - define["allow_null"] = int(define.get("allow_null", 1)) - - x_res[key] = define - - return x_res - - -def validate_convert_value(defines): - for key, define in defines.items(): - if not isinstance(define, (basestring, (basestring, bool, int, dict))): - raise ValueError("错误的定义 合法值为 string/bool/int " - "或json:{'type': , 'value':}") - - if isinstance(define, dict): - if define.get("type", "string") not in ["string", "json", "int", "float", "list", "bool", "object"]: - raise ValueError("未知的类型约束 %s" % define.get("type")) - - -def format_is_json(data): - if isinstance(data, dict): - return data - elif isinstance(data, basestring): - try: - data = json.loads(data) - except: - if data.startswith("{"): - data = eval(data) - else: - raise ValueError("data: %s is not json " % (data)) - return data - - -def validate_type(value, type): - ''' - - :param value: - :param type: - :return: - ''' - - if (type == "string") and (not isinstance(value, basestring)): - raise ValueError("%s 不是string" % value) - elif (type == "int") and not isinstance(value, int): - try: - value = int(value) - except: - raise ValueError("%s 不是int" % value) - elif (type == "float") and (not isinstance(value, float)): - try: - value = float(value) - except: - raise ValueError("%s 不是浮点类型" % value) - elif (type == "json") and (not isinstance(value, dict)): - try: - value = json.loads(value) - except: - raise ValueError("%s 不是json" % value) - elif (type == "object") and (not isinstance(value, dict)): - try: - value = json.loads(value) - except: - raise ValueError("%s 不是object" % value) - elif (type == "list") and (not isinstance(value, list)): - try: - if isinstance(value, basestring): - if value.startswith("["): - try: - value = json.loads(value) - except: - value = eval(value) - elif "," in value: - value = value.split(",") - elif ";" in value: - value = value.split(";") - else: - _v = " ".join(value.split()) - value = _v.split() - else: - raise ValueError() - except: - raise ValueError("%s 不是list类型" % value) - elif (type == "bool") and (not isinstance(value, bool)): - if isinstance(value, basestring): - if value.lower() == "true": - value = True - else: - value = False - elif isinstance(value, int): - if value: - value = True - else: - value = False - else: - raise ValueError("未知的 bool值: %s" % value) - else: - pass - - return value - - -def convert_key(key, value, define, extend=False): - ''' - - :param key: - :param value: - :param define: { - "type": "string", - "convert": "access_key", - "allow_null": 1, - "default": "" - } - :return: - ''' - - if isinstance(define, basestring): - if define == '-': - return {} - key = define or key - else: - if (value is None) and (not define.get("allow_null", 1)): - if extend: - value = None - else: - raise ValueError("key %s 不允许为空" % key) - if value is None: - value = define.get("default") or value - else: - value = validate_type(value, type=define.get("type", "string")) - - if define.get("convert"): - key = define.get("convert") or key - - if value: - return {key: value} - elif isinstance(value, (int, bool)): - return {key: value} - else: - return {} - - -def convert_key_only(key, define): - ''' - - :param key: - :param define: - :return: - ''' - - if isinstance(define, basestring): - key = define or key - else: - if define.get("convert"): - key = define.get("convert") or key - - return key - - -def convert_keys(datas, defines, is_update=False, is_extend=False): - ''' - - :param datas: - :param defines: - :return: - ''' - - result = {} - if is_update: - for key, value in datas.items(): - # 依据data数据进行字段筛选 - if defines.get(key) is not None: - result.update(convert_key(key, value, define=defines[key])) - else: - logger.info("not define key %s, skip it. can use define ('key': '-') remove it" % key) - - return result - - if is_extend: - if not defines: - return result - for key, value in datas.items(): - if defines.get(key) is not None: - result.update(convert_key(key, value, define=defines[key])) - else: - logger.info("data key %s not define, skip." % key) - return result - - for key, define in defines.items(): - # 依据定义字段转换,只转换defines中的字段,检查必要字段的传入,未定义字段移除 - _t = convert_key(key, datas.get(key), define=define) - if _t: - result.update(_t) - - return result - - -def ext_convert_keys(datas, defines, is_update=False, is_extend=False): - ''' - - :param datas: - :param defines: - :return: - ''' - - result = {} - - if is_extend: - if not defines: - return result - - for key, define in defines.items(): - if isinstance(define, dict) and define.get("define"): - if datas.get(key): - value = format_is_json(datas.get(key)) - else: - value = ext_convert_keys(datas=datas, defines=define.get("define"), is_extend=is_extend) - if value: - result[key] = value - else: - _t = convert_key(key, datas.get(key), define=define, extend=is_extend) - if _t: - result.update(_t) - - return result - - for key, define in defines.items(): - # 依据定义字段转换,只转换defines中的字段,检查必要字段的传入,未定义字段移除 - if isinstance(define, dict) and define.get("define"): - if datas.get(key): - value = format_is_json(datas.get(key)) - else: - value = convert_keys(datas=datas, defines=define.get("define"), is_extend=is_extend) - if value: - result[key] = value - else: - _t = convert_key(key, datas.get(key), define=define) - if _t: - result.update(_t) - - return result - - -def convert_value(value, define): - ''' - - :param value: - :param define: string or json - example: cidr replace cidr_block - define: cidr_block - or: {"value": "cidr_block", "type": "string"} - :return: - ''' - - if (value is None) or (define is None): - return value - if isinstance(define, (basestring, bool, int)): - value = define or value - elif isinstance(define, dict): - value = define.get("value", value) or value - value = validate_type(value, define.get("type", "string")) - else: - raise ValueError("转换配置错误, 类型错误") - - return value - - -def convert_values(data, define): - ''' - - :param data: example: {"cidr": "cidr_8"} - :param define: {"cidr_8": {"type": "string", "vlue": "192.168.8.0/20"}} - :return: - ''' - - res = {} - for key, value in data.items(): - res[key] = convert_value(value, define.get(value)) - - return res - - -def convert_extend_propertys(datas, extend_info, is_update=False): - ''' - - :param datas: - :param extend_info: - :return: - ''' - - def allowed_key(keys, data): - for key in keys: - if key not in data: - raise ValueError("不合法的参数%s" % key) - - if not extend_info: - logger.info("data: %s extend info define is null, so extend keys will be removed" % (format_json_dumps(datas))) - return {} - - ora_ext_info = {} - - if is_update: - for key, define in extend_info.items(): - if isinstance(define, (int, basestring, int, float, bool)): - if isinstance(define, basestring) and define == "-": - logger.info("key: %s removed" % key) - elif key in datas.keys(): - ora_ext_info[key] = datas.get(key) if datas.get(key) is not None else define - else: - logger.info("key: %s, not set in data" % key) - elif isinstance(define, dict): - if key in datas.keys(): - if define.get("value") is not None: - ora_ext_info[key] = datas.get(key) if datas.get(key) is not None else define.get("value") - else: - if datas.get(key) is not None: - ora_ext_info[key] = datas.get(key) - else: - logger.info("key: %s, not set in data" % key) - - if define.get("type") is not None and (key in ora_ext_info.keys()): - ora_ext_info[key] = validate_type(datas.get(key), type=define.get("type", "string")) - - return ora_ext_info - - for key, define in extend_info.items(): - if isinstance(define, (int, basestring, int, float, bool)): - if isinstance(define, basestring) and define == "-": - logger.info("key: %s removed" % key) - else: - value = datas.get(key) if datas.get(key) is not None else define - if value or isinstance(value, (int, bool)): - ora_ext_info[key] = value - else: - logger.info("key %s value is null, remove it" % key) - elif isinstance(define, dict): - value = datas.get(key) - if define.get("value") is not None: - value = value if value is not None else define.get("value") - if define.get("type") is not None and (key in datas.keys()): - if value is not None: - value = validate_type(value, type=define.get("type", "string")) - - if value or isinstance(value, (int, bool)): - ora_ext_info[key] = value - else: - logger.info("key %s value is null, remove it" % key) - - return ora_ext_info - - -def _format_type(value, type): - if type == "string": - value = str(value) - elif type in ["json", "object"]: - if not isinstance(value, dict): - try: - value = json.loads(value) - except Exception, e: - raise ValueError("value is not json") - elif type == "list": - if not isinstance(value, list): - value = [value] - elif type == "int": - try: - value = int(value) - except Exception, e: - raise ValueError("value is not int") - - elif type == "float": - try: - value = float(value) - except Exception, e: - raise ValueError("value is not float") - else: - raise ValueError("不支持的output类型") - - return value - - -def output_value(key, define, result): - ''' - - :param value: - :param define: string or json - example: cidr replace cidr_block - define: cidr_block - or: {"value": "cidr_block", "type": "string"} - :return: - ''' - - if (define is None): - logger.info("output %s define is null" % key) - return {} - if isinstance(define, basestring): - value = result.get(define) - elif isinstance(define, dict): - value = result.get(define.get("value")) - value = _format_type(value, type=define.get("type", "string")) - else: - raise ValueError("转换配置错误, 类型错误") - - return {key: value} - - -def output_values(defines, result): - res = {} - for key, define in defines.items(): - res.update(output_value(key, define, result)) - - return res - - -def read_output(key, define, result): - ''' - - :param key: - :param define: - example: cidr replace cidr_block - define: cidr_block - or: {"value": "cidr_block", "type": "string"} - :param result: - :return: - ''' - - if (define is None): - logger.info("output %s define is null" % key) - return {} - if isinstance(define, basestring): - value = result - elif isinstance(define, dict): - value = _format_type(result, type=define.get("type", "string")) - else: - raise ValueError("转换配置错误, 类型错误") - - return {key: value} - - -def define_relations_key(key, value, define, is_update=None): - ''' - - :param key: - :param value: - :param define: - :return: - ''' - - if not define: - return True - - if isinstance(define, basestring): - if define == '-': - return True - else: - if (not value) and (not define.get("allow_null", 1)): - if is_update: - return True - raise ValueError("key %s 不允许为空" % key) - - return False - - -def output_line(key, define): - if isinstance(define, basestring): - if define == "-": - return {} - elif isinstance(define, dict): - define = define.get("value") - if not define: - return {} - else: - raise ValueError("output define error") - - return {key: define} - - -class ConvertMetadata(object): - @classmethod - def apply_extend_info(cls, datas, extend_info): - ''' - - :param datas: - :param extend_info: - :return: - ''' - - if not extend_info: - logger.info( - "data: %s extend info define is null, so extend keys will be removed" % (format_json_dumps(datas))) - return {} - - ora_ext_info = {} - for key, define in extend_info.items(): - if isinstance(define, (int, basestring, int, float, bool)): - if isinstance(define, basestring) and define == "-": - logger.info("key: %s removed" % key) - else: - value = datas.get(key) if datas.get(key) is not None else define - if value or isinstance(value, (int, bool)): - ora_ext_info[key] = value - else: - logger.info("key %s value is null, remove it" % key) - elif isinstance(define, dict): - value = datas.get(key) - if define.get("value") is not None: - value = value if value is not None else define.get("value") - if define.get("type") is not None and (key in datas.keys()): - if value is not None: - value = validate_type(value, type=define.get("type", "string")) - - if value or isinstance(value, (int, bool)): - ora_ext_info[key] = value - else: - logger.info("key %s value is null, remove it" % key) - - return ora_ext_info - - @classmethod - def upgrade_extend_info(cls, datas, extend_info): - ''' - - :param datas: - :param extend_info: - :return: - ''' - - datas = datas or {} - if not extend_info: - logger.info( - "data: %s extend info define is null, so extend keys will be removed" % (format_json_dumps(datas))) - return {} - - ora_ext_info = {} - for key, define in extend_info.items(): - if isinstance(define, (int, basestring, int, float, bool)): - if isinstance(define, basestring) and define == "-": - logger.info("key: %s removed" % key) - elif key in datas.keys(): - ora_ext_info[key] = datas.get(key) # if datas.get(key) is not None else define - else: - logger.info("key: %s, not set in data" % key) - elif isinstance(define, dict): - if key in datas.keys(): - if define.get("value") is not None: - ora_ext_info[key] = datas.get(key) # if datas.get(key) is not None else define.get("value") - else: - if datas.get(key) is not None: - ora_ext_info[key] = datas.get(key) - else: - logger.info("key: %s, not set in data" % key) - - if define.get("type") is not None and (key in ora_ext_info.keys()): - ora_ext_info[key] = validate_type(ora_ext_info.get(key), type=define.get("type", "string")) - - return ora_ext_info - - @classmethod - def upgrade_keys(cls, datas, defines): - ''' - - :param datas: - :param defines: - :return: - ''' - - result = {} - for key, value in datas.items(): - if defines.get(key) is not None: - result.update(convert_key(key, value, define=defines[key])) - else: - logger.info("not define key %s, skip it. can use define ('key': '-') remove it" % key) - - return result - - @classmethod - def upgrade_extend_keys(cls, datas, defines): - ''' - - :param datas: - :param defines: - :return: - ''' - - return cls.upgrade_keys(datas, defines) diff --git a/apps/common/generate.py b/apps/common/generate.py deleted file mode 100644 index 0928124c..00000000 --- a/apps/common/generate.py +++ /dev/null @@ -1,34 +0,0 @@ -# -*- coding: utf-8 -*- - -import json - - -def generate_data(value, model): - if not model: - return value - type = model.get("type", "string").lower() - value = model.get("value", value) - if type == "string": - return str(value) - elif type == "int": - return int(value) - elif type == "float": - return float(value) - elif type == "list": - return [value] - elif type == "bool": - if isinstance(value, bool): - return value - elif value == 0 or value.lower() == "false": - return False - elif value == 1 or value.lower() == "true": - return True - else: - raise ValueError("不合法的bool类型值 %s" % value) - elif type in ["json", "object"]: - if isinstance(value, dict): - return value - else: - return json.loads(value) - - raise ValueError("不支持的类型 %s" % type) diff --git a/apps/common/reverse.py b/apps/common/reverse.py deleted file mode 100644 index d25a3bfc..00000000 --- a/apps/common/reverse.py +++ /dev/null @@ -1,14 +0,0 @@ -# -*- coding: utf-8 -*- - -import json - - -class Reverse(object): - @classmethod - def reverse_json(cls, data): - res = {} - for key, value in data.items(): - res[value] = key - - return res - diff --git a/apps/common/reverse_convert_keys.py b/apps/common/reverse_convert_keys.py deleted file mode 100644 index 27db36b2..00000000 --- a/apps/common/reverse_convert_keys.py +++ /dev/null @@ -1,205 +0,0 @@ -# -*- coding: utf-8 -*- - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.logs import logger -from lib.json_helper import format_json_dumps -from apps.common.reverse import Reverse - - -class ReverseProperty(object): - @classmethod - def reverse_key(cls, key, define): - ''' - - :param key: - :param define: - { - "type": "string", - "convert": "access_key", - "allow_null": 1, - "default": "" - } - :return: - ''' - - if isinstance(define, basestring): - if define == '-': - return {} - newkey = define or key - else: - newkey = key - if define.get("convert"): - newkey = define.get("convert") or key - - if newkey: - return {newkey: key} - else: - logger.info("key %s define is null, skip" % key) - return {} - - @classmethod - def reverse_key_equivalence(cls, key, define): - ''' - - :param key: - :param define: - { - "type": "string", - "convert": "access_key", - "allow_null": 1, - "default": "" - } - :return: - ''' - - if isinstance(define, basestring): - if define == '-': - return {} - else: - if define.get("equivalence"): - return {key: define.get("equivalence")} - - return {} - - @classmethod - def reverse_keys(cls, defines): - info = {} - for key, value in defines.items(): - info.update(cls.reverse_key(key, value)) - - return info - - @classmethod - def reverse_equivalence(cls, defines): - info = {} - for key, value in defines.items(): - info.update(cls.reverse_key_equivalence(key, value)) - - return info - - @classmethod - def reverse_output_line(cls, key, define): - if isinstance(define, basestring): - if define == "-": - return {} - newkey = define - elif isinstance(define, dict): - newkey = define.get("value") or key - else: - raise ValueError("output define error") - - return {newkey: key} - - @classmethod - def reverse_output_lines(cls, defines): - info = {} - for key, value in defines.items(): - info.update(cls.reverse_output_line(key, value)) - - return info - - @classmethod - def reverse_extend_key(cls, key, define): - ''' - - :param key: - :param define: - { - "type": "string", - "convert": "access_key", - "allow_null": 1, - "default": "" - } - :return: - ''' - - newkey = key - if isinstance(define, (basestring, int, float, bool)): - if define == '-': - return {} - # newkey = define or key - elif isinstance(define, dict): - newkey = key - if define.get("convert"): - newkey = define.get("convert") or key - - if newkey: - return {newkey: key} - else: - logger.info("extend key %s define is null, skip" % key) - return {} - - @classmethod - def reverse_extend_equivalence(cls, key, define): - ''' - - :param key: - :param define: - { - "type": "string", - "convert": "access_key", - "allow_null": 1, - "default": "" - } - :return: - ''' - - if isinstance(define, (basestring, int, float, bool)): - if define == '-': - return {} - elif isinstance(define, dict): - if define.get("equivalence"): - return {key: define.get("equivalence")} - - return {} - - @classmethod - def reverse_extend_keys(cls, defines): - info = {} - for key, value in defines.items(): - info.update(cls.reverse_extend_key(key, value)) - - return info - - @classmethod - def reverse_extend_key_equivalence(cls, defines): - info = {} - for key, value in defines.items(): - info.update(cls.reverse_extend_equivalence(key, value)) - - return info - - @classmethod - def format_keys(cls, defines): - result = {} - for key, define in defines.items(): - result.update(cls.reverse_key(key, define)) - - return result - - @classmethod - def format_value(cls, value, defines): - if not defines: - return value - - if isinstance(defines, (basestring, bool, int, float)): - return defines - - for key, define in defines.items(): - if isinstance(define, (basestring, bool, int, float)): - if value == define: - return key - elif isinstance(define, dict): - t = define.get("value", value) - if isinstance(t, (bool, int, float, basestring)): - if t == value: - return key - elif isinstance(t, dict): - t = json.dumps(t) - logger.info("warn: %s config is json, not apply now" % key) - else: - logger.info("warn: %s config is invalidate, not apply" % key) - - return value diff --git a/apps/common/toyaml.py b/apps/common/toyaml.py deleted file mode 100644 index 46d762d3..00000000 --- a/apps/common/toyaml.py +++ /dev/null @@ -1,13 +0,0 @@ -# -*- coding: utf-8 -*- - -import json -import yaml - - -def dict_to_yamlfile(data, filepath): - if not isinstance(data, dict): - raise ValueError("转换yaml的数据不为json") - - yaml.safe_dump(data=yaml.load(json.dumps(data)), - stream=open(filepath, 'w'), - default_flow_style=False) diff --git a/apps/common/validation.py b/apps/common/validation.py deleted file mode 100644 index 996845c1..00000000 --- a/apps/common/validation.py +++ /dev/null @@ -1,13 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import re - - -def validate_column_line(column): - if re.match(r'^[0-9a-zA-Z_.]{1,63}$', column): - return True - else: - raise ValueError("不合法的值, 需满足: [0-9a-zA-Z_.]") - diff --git a/apps/controller/__init__.py b/apps/controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/backend_controller.py b/apps/controller/backend_controller.py deleted file mode 100644 index dfa66e7f..00000000 --- a/apps/controller/backend_controller.py +++ /dev/null @@ -1,459 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import copy -import json -import traceback -from lib.logs import logger -from core import validation -from core.controller import BaseController -from lib.uuid_util import get_uuid -from core import local_exceptions -from apps.api.configer.resource import ResourceObject -from apps.controller.source_controller import BaseSourceController -from apps.api.conductor.provider import ProviderConductor -from apps.api.conductor.region import RegionConductor -from apps.api.conductor.type_format import TypeFormat -# from apps.api.conductor.apply_data_conductor import apply_data_builder -# from apps.api.conductor.source_data_conductor import query_data_builder -from apps.api.apibase_backend import ApiBackendBase -from apps.api.configer.region import ZoneApi -from apps.controller.configer.model_args import source_columns_outputs - - -def not_null(key, data): - if not data: - raise ValueError("%s 不能为空" % key) - - -def filter_action_output(out_datas, filters): - res = [] - for out_data in out_datas: - for key, value in filters.items(): - s_value = out_data.get(key) - if isinstance(s_value, (basestring, int, float)): - res.append({value: s_value}) - elif isinstance(s_value, list): - for s in s_value: - res.append({value: s}) - else: - logger.info("output value not string/int/list, skip ...") - - return res - - -class BackendClient(object): - @classmethod - def get_id(cls, data): - return data.get("id") or get_uuid() - - @classmethod - def get_provider(cls, data): - ''' - - :param data: - :return: - ''' - - provider = data.get("provider") - not_null("provider", provider) - - provider_data = ProviderConductor().find_provider_info(provider) - provider = provider_data.get("name") - return provider, provider_data - - @classmethod - def get_region(cls, provider, data): - ''' - - :param provider: - :param data: - :return: - ''' - - region = data.get("region_id") or data.get("region") - not_null("region_id", region) - _, region_info = RegionConductor().provider_region_info(provider=provider, region=region) - return region_info.get("name"), region_info - - @classmethod - def get_zone(cls, provider, region, data): - ''' - - :param provider: - :param region: - :param data: - :return: - ''' - - zone = data.get("zone_id") or data.get("zone") - if not zone: - return "", {} - - # not_null("zone", zone) - _, zone_info = RegionConductor().provider_zone_info(provider=provider, region=region, zone=zone) - return zone_info.get("name"), zone_info - - @classmethod - def get_secret(cls, provider, region, data): - secret = data.get("secret") - not_null("secret", secret) - return secret - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region_id", "provider"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "region_id", "zone_id", "provider", "secret"], - dicts=["extend_info"]) - - @classmethod - def apply_main_infos(cls, data): - ''' - - :param data: - :return: - ''' - - provider, provider_data = cls.get_provider(data) - region, region_info = cls.get_region(provider, data) - zone, zone_info = cls.get_zone(provider, region_info.get("id"), data) - secret = cls.get_secret(provider=provider, region=region_info.get("name"), data=data) - - main_info = dict(provider=provider, region=region, zone=zone, secret=secret) - main_body = dict(provider_data=provider_data, region_info=region_info, zone_info=zone_info) - return main_info, main_body - - @classmethod - def create(cls, resource, data, **kwargs): - ''' - - :param resource: - :param data: - :param kwargs: - :return: - ''' - - rid = cls.get_id(data) - base_info, base_bodys = cls.apply_main_infos(data) - - # 兼容extend info字段 - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) or asset_id - - data["region"] = data.get("region_id") or data.get("region") - data["zone"] = data.get("zone_id") or data.get("zone") - _, result = resource.create(rid=rid, base_info=base_info, base_bodys=base_bodys, - create_data=data, extend_info=extend_info, - asset_id=asset_id, resource_id=resource_id) - - return result - - @classmethod - def is_pre_action(cls, resource_object): - pre_action = resource_object.get("pre_action") - pre_action_output = resource_object.get("pre_action_output") - if pre_action and pre_action_output: - return True - else: - return False - - @classmethod - def source_pre_action(cls, rid, base_info, base_bodys, resource_object, data): - if cls.is_pre_action(resource_object): - pre_action = resource_object.get("pre_action") - pre_action_output = resource_object.get("pre_action_output") - client = ApiBackendBase(resource_name=pre_action, resource_workspace=pre_action) - results = client.get_remote_source(rid, base_info, base_bodys, query_data=data) - return filter_action_output(results, filters=pre_action_output) - - return [] - - @classmethod - def data_source_action(cls): - pass - - @classmethod - def format_query_data(cls): - pass - - @classmethod - def get_resource_object(cls, provider, resource_name): - resource_object = ResourceObject().query_one(where_data={"provider": provider, - "resource_type": resource_name}) - if not resource_object: - raise local_exceptions.ResourceConfigError("%s 资源未初始化完成配置" % resource_name) - - return resource_object - - @classmethod - def adder_return(cls, data, results, resource_name): - # 将传入参数返回 - res = [] - for result in results: - if isinstance(result, basestring): - res.append(result) - else: - for key, value in data.items(): - if value and not result.get(key): - result[key] = value - - x_add = source_columns_outputs(resource_name) - x_add.update(result) - res.append(x_add) - - return res - - @classmethod - def skipper_results(cls, provider, region, results, ignore_resources): - ''' - - :param provider: name - :param region: id - :return: - ''' - ignore_resources = TypeFormat.f_list(ignore_resources) - register_zones = ZoneApi().region_zones(region, provider) - - res = [] - for result in results: - if isinstance(result, basestring): - res.append(result) - else: - if result.get("zone") and (result.get("zone") not in register_zones): - logger.info("resource: %s ,zone: %s searched not in register zone, skip it" % ( - result.get("id"), result.get("zone"))) - elif result.get("zone_id") and (result.get("zone_id") not in register_zones): - logger.info("resource: %s ,zone id: %s searched not in register zone, skip it" % ( - result.get("id"), result.get("zone_id"))) - elif result.get("id") and result.get("id") in ignore_resources: - logger.info("ignore_resources skip id: %s" % (result.get("id"))) - else: - if result.get("asset_id") == result.get("id"): - result["id"] = "" - res.append(result) - - return res - - @classmethod - def filter_data(cls, data, pre_results): - resource_ids = TypeFormat.f_list(data.get("id")) - ignore_resources = TypeFormat.f_list(data.get("ignore_ids")) - - res = [] - if resource_ids: - for result in pre_results: - if result.get("id") in resource_ids: - t_data = copy.deepcopy(data) - t_data.update(result) - res.append(t_data) - - return res - - if ignore_resources: - for result in pre_results: - if result.get("id") and result.get("id") in ignore_resources: - logger.info("skip resource id : %s" % (result.get("id"))) - else: - t_data = copy.deepcopy(data) - t_data.update(result) - res.append(t_data) - - return res - - for result in pre_results: - t_data = copy.deepcopy(data) - t_data.update(result) - res.append(t_data) - - return res or [data] - - @classmethod - def one_query(cls, resource, rid, data, base_info, base_bodys): - provider_object = base_bodys["provider_data"] - region_object = base_bodys["region_info"] - results = resource.get_remote_source(rid, base_info, base_bodys, query_data=data) - results = cls.skipper_results(provider=provider_object["name"], region=region_object["id"], - results=results, ignore_resources=data.get("ignore_resources")) - results = cls.adder_return(data, results, resource_name=resource.resource_name) - return results - - @classmethod - def is_need_flush_list(cls, data): - if "ignore_ids" in data.keys() and data.get("ignore_ids"): - return True - - for key in data.keys(): - if key not in ["region", "region_id", "zone", "zone_id", "secret", "provider"] and data.get(key): - return False - - return True - - @classmethod - def format_filter_data(cls, data): - res = [] - if data.get("id"): - ids = TypeFormat.f_list(data.pop("id", None)) - for xid in ids: - tmp = copy.deepcopy(data) - tmp["id"] = xid - res.append(tmp) - - return res - else: - return [data] - - @classmethod - def skip_ingore_ids(cls, result, data): - ignore_ids = TypeFormat.f_list(data.get("ignore_ids")) - if ignore_ids: - res = [] - for xres in result: - if isinstance(xres, dict): - if xres.get("id") in ignore_ids: - logger.info("skip ignore_id : %s" % (result.get("id"))) - else: - res.append(xres) - else: - res.append(xres) - else: - return result - - @classmethod - def source_query_datas(cls, resource, rid, data, base_info, base_bodys): - query_datas = cls.format_filter_data(data) - result = [] - for query_data in query_datas: - x_res = cls.one_query(resource, rid, query_data, base_info, base_bodys) - result += x_res - - result = cls.skip_ingore_ids(result, data) - return result - - @classmethod - def main_query(cls, resource, rid, data, base_info, base_bodys): - provider_object = base_bodys["provider_data"] - resource_object = cls.get_resource_object(provider=provider_object["name"], - resource_name=resource.resource_name) - - if cls.is_pre_action(resource_object): - logger.info("flush pre action ....") - pre_results = cls.source_pre_action(rid, base_info, base_bodys, resource_object, data) - logger.info("pre action result : %s" % (json.dumps(pre_results))) - query_datas = cls.filter_data(data, pre_results) - - result = [] - for query_data in query_datas: - try: - logger.info("try query %s" % (json.dumps(query_data))) - x_res = cls.one_query(resource, rid, query_data, base_info, base_bodys) - except: - logger.info(traceback.format_exc()) - logger.info( - "data: %s flush source after list, filter data error, may skip ..." % (json.dumps(query_data))) - x_res = [] - result += x_res - - return result - else: - return cls.source_query_datas(resource=resource, rid=rid, data=data, - base_info=base_info, base_bodys=base_bodys) - - # if cls.is_need_flush_list(data) and cls.is_pre_action(resource_object): - # pre_results = cls.source_pre_action(rid, base_info, base_bodys, resource_object, data) - # query_datas = cls.filter_data(data, pre_results) - # - # result = [] - # for query_data in query_datas: - # x_res = cls.one_query(resource, rid, query_data, base_info, base_bodys) - # result += x_res - # - # return result - # else: - # return cls.source_query_datas(resource=resource, rid=rid, data=data, - # base_info=base_info, base_bodys=base_bodys) - - @classmethod - def query(cls, resource, data, **kwargs): - ''' - - :param resource: - :param data: - :param kwargs: - :return: - ''' - - rid = cls.get_id(data) - base_info, base_bodys = cls.apply_main_infos(data) - - # 兼容extend info字段 - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - data["region"] = data.get("region_id") or data.get("region") - data["zone"] = data.get("zone_id") or data.get("zone") - return cls.main_query(resource, rid, data, base_info, base_bodys) - - -class BackendAddController(BaseController): - name = "BackendAdd" - resource_describe = "BackendAdd" - allow_methods = ("POST",) - resource = None - - def before_handler(self, request, data, **kwargs): - BackendClient.not_null(data) - BackendClient.validate_keys(data) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - return BackendClient.create(resource=self.resource, data=data) - - -class BackendDeleteController(BaseController): - name = "Backend" - resource_describe = "Backend" - allow_methods = ("POST",) - resource = None - - def before_handler(self, request, data, **kwargs): - validation.not_allowed_null(data=data, - keys=["id"] - ) - - validation.validate_string("id", data.get("id")) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - rid = data.pop("id", None) - result = self.resource.destroy(rid) - return {"count": result, "id": rid} - - -class BackendSourceController(BaseController): - name = "Backend" - resource_describe = "Backend" - allow_methods = ("POST",) - resource = None - - def before_handler(self, request, data, **kwargs): - BackendClient.not_null(data) - BackendClient.validate_keys(data) - - def response_templete(self, data): - return [] - - def main_response(self, request, data, **kwargs): - return BackendClient.query(resource=self.resource, data=data) diff --git a/apps/controller/configer/__init__.py b/apps/controller/configer/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/configer/commonkey_controller.py b/apps/controller/configer/commonkey_controller.py deleted file mode 100644 index 1b25d6f9..00000000 --- a/apps/controller/configer/commonkey_controller.py +++ /dev/null @@ -1,127 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core import local_exceptions as exception_common -from apps.api.configer.commonkey import CommonKeyObject - - - -class CommonKeyController(BackendController): - resource = CommonKeyObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "resource", "key", "enabled"]) - return self.resource.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "resource", "key", "property"]) - validation.not_allowed_null(data=data, - keys=["resource", "key", "property"] - ) - - validation.validate_string("id", data.get("id")) - validation.validate_string("resource", data["resource"]) - validation.validate_string("key", data.get("key")) - validation.validate_string("property", data.get("property")) - - - def create(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - create_data = {"id": data.get("id") or get_uuid(), - "resource": data["resource"], - "key": data.get("key"), - "property": data.get("property") - } - - return self.resource.create(create_data) - - -class CommonKeyIdController(BackendIdController): - resource = CommonKeyObject() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.show(rid) - - def before_handler(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["resource", "key", "property", "enabled"]) - - validation.validate_string("resource", data["resource"]) - validation.validate_string("key", data.get("key")) - validation.validate_string("property", data.get("property")) - validation.validate_bool("enabled", data.get("enabled")) - - def update(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) - - diff --git a/apps/controller/configer/config_controller.py b/apps/controller/configer/config_controller.py deleted file mode 100644 index 67274ac8..00000000 --- a/apps/controller/configer/config_controller.py +++ /dev/null @@ -1,211 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from apps.common.convert_keys import validate_convert_key -from apps.common.convert_keys import validate_convert_value -from apps.api.configer.value_config import ValueConfigObject - - -class ConfigController(BackendController): - resource = ValueConfigObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "resource", "provider", "property", "enabled"]) - - filter_string = None - for key in ["resource", "provider", "property"]: - if data.get(key): - if filter_string: - filter_string += 'and ' + key + " like '%" + data.get(key) + "%' " - else: - filter_string = key + " like '%" + data.get(key) + "%' " - data.pop(key, None) - - return self.resource.list(filters=data, page=page, - filter_string=filter_string, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "resource", "property", "value_config"]) - validation.not_allowed_null(data=data, - keys=["provider", "property", "resource"] - - ) - - validation.validate_string("id", data.get("id")) - validation.validate_string("provider", data["provider"]) - validation.validate_dict("value_config", data.get("value_config")) - validation.validate_string("property", data.get("property")) - validation.validate_string("resource", data.get("resource")) - - def create(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - value_config = validation.validate_dict("value_config", data.get("value_config")) or {} - validate_convert_value(value_config) - create_data = {"id": data.get("id") or get_uuid(), - "resource": data["resource"], - "provider": data.get("provider"), - "property": data.get("property"), - "value_config": json.dumps(value_config) - } - - return self.resource.create(create_data) - - -class ConfigIdController(BackendIdController): - resource = ValueConfigObject() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.show(rid) - - def before_handler(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["provider", "resource", "property", "value_config"]) - - validation.validate_bool("enabled", data.get("enabled")) - validation.validate_string("id", data.get("id")) - validation.validate_string("provider", data.get("provider")) - validation.validate_dict("value_config", data.get("value_config")) - validation.validate_string("property", data.get("property")) - validation.validate_string("resource", data.get("resource")) - - def update(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - if data.get("value_config") is not None: - value_config = validation.validate_dict("value_config", data.get("value_config")) - validate_convert_value(value_config) - data["value_config"] = json.dumps(value_config) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) - - -class ConfigAttrController(BackendIdController): - resource = ValueConfigObject() - allow_methods = ('GET',) - - def get_configs(self, data): - data["resource"] = data.pop("resource_type", None) - _, config_datas = self.resource.list(filters=data) - - res = {} - for xdata in config_datas: - res[xdata["property"]] = xdata.get("value_config") - - return res - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - validation.allowed_key(data, ["resource_type", "provider"]) - validation.not_allowed_null(["resource_type", "provider"], data) - - configs = self.get_configs(data) - - columns = configs.keys() - res = [] - for column in columns: - res.append({"id": column, "name": column}) - - return {"resource": res, "attribute": configs} - - -class ConfigListController(BackendIdController): - resource = ValueConfigObject() - allow_methods = ('GET',) - - def get_configs(self, data): - data["resource"] = data.pop("resource_type", None) - config_data = self.resource.query_one(where_data=data) - - return config_data.get("value_config") or {} - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - validation.allowed_key(data, ["resource_type", "provider", "property"]) - validation.not_allowed_null(["resource_type", "provider", "property"], data) - - configs = self.get_configs(data) - - res = [] - for key, value in configs.items(): - res.append({"id": key, "name": key, "origin_name": value}) - return {"resource": res} diff --git a/apps/controller/configer/defines.py b/apps/controller/configer/defines.py deleted file mode 100644 index 5deac20e..00000000 --- a/apps/controller/configer/defines.py +++ /dev/null @@ -1,812 +0,0 @@ -# _ coding:utf-8 _*_ - - -xml_register = { - "region": { - "apply": { - "path": "/terraform/v1/az/backend/region/apply", - "method": "POST", - "notnull": ["asset_id", "provider"], - "inputParameters": ['id', 'name', 'provider', 'asset_id', 'extend_info', 'secret'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/az/backend/region/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/az/backend/region/source", - "method": "POST", - "notnull": [], - "inputParameters": ['id', 'provider', 'asset_id', 'name'], - "outputParameters": ['errorCode', 'errorMessage', 'name', 'id', 'asset_id', 'extend_info', - 'provider'] - }, - }, - "az": { - "apply": { - "path": "/terraform/v1/az/backend/zone/apply", - "method": "POST", - "notnull": ["asset_id", "provider"], - "inputParameters": ['id', 'name', 'provider', 'asset_id', "region_id", 'extend_info', 'secret'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/az/backend/zone/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/az/backend/zone/source", - "method": "POST", - "notnull": [], - "inputParameters": ['id', 'provider', 'asset_id', 'name', "region_id"], - "outputParameters": ['errorCode', 'errorMessage', 'name', 'id', 'asset_id', 'extend_info', "region_id", - 'provider'] - }, - }, - "vpc": { - "apply": { - "path": "/terraform/v1/network/backend/vpc/apply", - "method": "POST", - "notnull": ["name", "cidr", "region", "provider"], - "inputParameters": ['id', 'name', 'secret', 'provider', "region_id", 'cidr', 'asset_id', - 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/vpc/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/vpc/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'cidr', 'id'], - "outputParameters": ['errorCode', 'errorMessage', 'name', 'cidr', 'asset_id', "region_id", 'secret', - 'provider'] - }, - }, - "subnet": { - "apply": { - "path": "/terraform/v1/network/backend/subnet/apply", - "method": "POST", - "notnull": ['name', 'provider', 'vpc_id', "zone_id", "region_id", 'cidr'], - "inputParameters": ['id', 'name', 'secret', 'provider', 'vpc_id', "zone_id", "region_id", 'cidr', - 'asset_id', 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/subnet/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/subnet/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', "zone_id", 'cidr', 'vpc_id', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'errorCode', 'name', "zone_id", 'asset_id', 'vpc_id', - 'cidr', 'errorMessage'] - } - }, - "route_table": { - "apply": { - "path": "/terraform/v1/network/backend/route_table/apply", - "method": "POST", - "notnull": ['name', 'provider', 'vpc_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'vpc_id', "zone_id", "region_id", - 'asset_id', 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/route_table/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/route_table/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'name', 'asset_id', 'errorMessage', 'errorCode', - 'vpc_id'] - } - }, - "route_entry": { - "apply": { - "path": "/terraform/v1/network/backend/route_entry/apply", - "method": "POST", - "notnull": ['name', 'provider', 'vpc_id', 'destination', 'route_table_id', 'next_type', 'next_hub', - "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'vpc_id', 'destination', 'route_table_id', - 'next_type', 'next_hub', "zone_id", "region_id", 'resource_id', 'asset_id', 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/route_entry/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/route_entry/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'route_table_id', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'next_hub', 'asset_id', 'errorMessage', - 'errorCode', 'name', 'destination', 'next_type', 'route_table_id'] - } - }, - "peer_connection": { - "apply": { - "path": "/terraform/v1/network/backend/peer_connection/apply", - "method": "POST", - "notnull": ['name', 'provider', 'vpc_id', 'peer_vpc_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'vpc_id', 'peer_vpc_id', 'peer_region', "zone_id", - "region_id", 'asset_id', 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/peer_connection/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/peer_connection/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'peer_vpc_id', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'name', 'asset_id', 'errorMessage', 'errorCode', - 'vpc_id', 'peer_vpc_id', 'peer_region'] - } - }, - "security_group": { - "apply": { - "path": "/terraform/v1/network/backend/security_group/apply", - "method": "POST", - "notnull": ['name', 'provider', 'vpc_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'vpc_id', "zone_id", "region_id", - 'asset_id', 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/security_group/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/security_group/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'name', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'vpc_id'] - } - }, - "security_group_rule": { - "apply": { - "path": "/terraform/v1/network/backend/security_group_rule/apply", - "method": "POST", - "notnull": ['provider', 'security_group_id', "region_id", 'type', 'cidr_ip', 'ip_protocol', 'ports', 'policy'], - "inputParameters": ['id', 'name', 'secret', 'provider', 'security_group_id', "zone_id", "region_id", 'type', - 'cidr_ip', 'ip_protocol', 'ports', 'policy', 'resource_id', 'asset_id', 'description', - 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/security_group_rule/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/security_group_rule/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'security_group_id'], - "outputParameters":["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', - 'errorCode', 'name', 'security_group_id', 'type', 'cidr_ip', - 'ip_protocol', 'ports', 'policy', 'description', - 'from_port', 'to_port', 'priority', 'nic_type'] - } - }, - "ipaddress_group": { - "apply": { - "path": "/terraform/v1/network/backend/ip_group/apply", - "method": "POST", - "notnull": ['name', 'provider', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', "addresses", "zone_id", "region_id", - 'asset_id', 'extend_info'], - "outputParameters": ["errorMessage", "errorCode", "id", "asset_id"] - }, - "destroy": { - "path": "/terraform/v1/network/backend/ip_group/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/ip_group/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'name', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'addresses'] - } - }, - "nat": { - "apply": { - "path": "/terraform/v1/network/backend/nat/apply", - "method": "POST", - "notnull": ['name', 'provider', 'vpc_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'vpc_id', 'subnet_id', 'eip', "zone_id", "region_id", - 'asset_id', 'extend_info', 'bandwidth'], - "outputParameters": ['errorMessage', 'errorCode', 'ipaddress', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/network/backend/nat/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/nat/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'name','id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'vpc_id', 'eip', 'ipaddress', 'bandwidth'] - } - }, - "eip": { - "apply": { - "path": "/terraform/v1/network/backend/eip/apply", - "method": "POST", - "notnull": ['name', 'provider', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', "zone_id", "region_id", 'asset_id', - 'extend_info', "charge_type"], - "outputParameters": ['errorMessage', 'errorCode', 'ipaddress', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/network/backend/eip/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/eip/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'ipaddress', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'ipaddress', "charge_type"] - } - }, - "eip_association": { - "apply": { - "path": "/terraform/v1/network/backend/eip_association/apply", - "method": "POST", - "notnull": ['provider', 'eip_id', 'instance_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'eip_id', 'instance_id', 'private_ip', "zone_id", - "region_id", 'resource_id', 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/network/backend/eip_association/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "ccn": { - "apply": { - "path": "/terraform/v1/network/backend/ccn/apply", - "method": "POST", - "notnull": ['name', 'provider', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', "zone_id", "region_id", 'asset_id', - 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'ipaddress', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/network/backend/ccn/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/network/backend/ccn/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name'] - } - }, - "ccn_attach": { - "apply": { - "path": "/terraform/v1/network/backend/ccn_attach/apply", - "method": "POST", - "notnull": ['provider', 'ccn_id', 'instance_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'ccn_id', 'instance_id', 'instance_type', - 'instance_region', "zone_id", "region_id", 'resource_id', 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/network/backend/ccn_attach/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "ccn_bandwidth": { - "apply": { - "path": "/terraform/v1/network/backend/ccn_bandwidth/apply", - "method": "POST", - "notnull": ['provider', 'ccn_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'ccn_id', 'from_region', 'dest_region', 'bandwidth', - "zone_id", "region_id", 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/network/backend/ccn_bandwidth/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "disk": { - "apply": { - "path": "/terraform/v1/storage/backend/disk/apply", - "method": "POST", - "notnull": ['name', 'provider', 'type', 'size', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'type', 'size', "zone_id", "region_id", - 'asset_id', 'extend_info', "charge_type"], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/storage/backend/disk/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/storage/backend/disk/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', - 'name', 'instance_id', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'type', 'size', "zone_id", "charge_type", "instance_id"] - } - }, - "disk_attach": { - "apply": { - "path": "/terraform/v1/storage/backend/disk_attach/apply", - "method": "POST", - "notnull": ['name', 'provider', 'disk_id', 'instance_id', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'disk_id', 'instance_id', "zone_id", "region_id", - 'asset_id', 'resource_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/storage/backend/disk/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "object_storage": { - "apply": { - "path": "/terraform/v1/storage/backend/object_storage/apply", - "method": "POST", - "notnull": ['name', 'provider', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'acl', 'appid', "zone_id", "region_id", - 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'url', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/storage/backend/object_storage/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/storage/backend/object_storage/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'acl', 'url'] - } - }, - "instance": { - "apply": { - "path": "/terraform/v1/vm/backend/instance/apply", - "method": "POST", - "notnull": ['name', 'provider', 'subnet_id', 'hostname', 'image', 'instance_type', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'subnet_id', 'hostname', 'image', 'instance_type', - 'disk_type', 'disk_size', 'password', 'security_group_id', 'vpc_id', 'power_action', - 'asset_id', 'data_disks', "zone_id", "region_id", 'extend_info', "charge_type"], - "outputParameters": ['errorMessage', 'errorCode', 'ipaddress', 'cpu', 'memory', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/vm/backend/instance/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/vm/backend/instance/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'ipaddress', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'subnet_id', 'hostname', 'ipaddress', 'image', 'instance_type', 'disk_type', - 'disk_size', 'password', 'security_group_id', 'vpc_id', 'data_disks', "zone_id", - 'power_action', 'force_delete', "charge_type"] - } - }, - "network_interface": { - "apply": { - "path": "/terraform/v1/storage/backend/network_interface/apply", - "method": "POST", - "notnull": ['name', 'provider', 'subnet_id', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'ipaddress', 'subnet_id', 'vpc_id', - 'security_group_id', "zone_id", "region_id", 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'ipaddress', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/storage/backend/network_interface/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/storage/backend/network_interface/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'ipaddress', 'vpc_id', 'subnet_id', 'security_group_id'] - } - }, - "network_interface_attach": { - "apply": { - "path": "/terraform/v1/vm/backend/network_interface/apply", - "method": "POST", - "notnull": ['name', 'provider', 'network_interface_id', 'instance_id', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'network_interface_id', 'instance_id', "zone_id", - "region_id", 'extend_info', 'asset_id', 'resource_id'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/vm/backend/network_interface/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "lb": { - "apply": { - "path": "/terraform/v1/loadbalance/backend/lb/apply", - "method": "POST", - "notnull": ['name', 'provider', 'subnet_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'subnet_id', 'network_type', 'vpc_id', "zone_id", - "region_id", 'ipaddress', 'asset_id', 'extend_info', "charge_type"], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/loadbalance/backend/lb/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/loadbalance/backend/lb/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'name', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'network_type', 'vpc_id', 'subnet_id', 'ipaddress', "charge_type"] - } - }, - "lb_listener": { - "apply": { - "path": "/terraform/v1/loadbalance/backend/lb_listener/apply", - "method": "POST", - "notnull": ['provider', 'lb_id', 'port', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'lb_id', 'port', 'protocol', 'backend_port', - 'health_check', 'health_check_uri', "zone_id", "region_id", 'asset_id', - 'extend_info', 'default_action'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/loadbalance/backend/lb_listener/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/loadbalance/backend/lb_listener/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'lb_id', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', - 'name', 'lb_id', 'port', 'protocol', 'health_check', 'health_check_uri', - 'default_action'] - } - }, - "lb_rule": { - "apply": { - "path": "/terraform/v1/loadbalance/backend/lb_rule/apply", - "method": "POST", - "notnull": ['provider', "region_id", 'lb_id'], - "inputParameters": ['id', 'provider', 'secret', "region_id", "zone_id", 'listener_id', 'extend_info', - 'lb_id', 'security_group_id', 'frontend_port', 'name', - 'asset_id', 'action', 'condition', 'resource_id'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/loadbalance/backend/lb_rule/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/loadbalance/backend/lb_rule/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'listener_id', 'lb_id', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', - 'name', 'lb_id', 'listener_id', "region_id", "zone_id", - 'security_group_id', 'frontend_port'] - } - }, - "lb_server_group": { - "apply": { - "path": "/terraform/v1/loadbalance/backend/lb_server_group/apply", - "method": "POST", - "notnull": ['provider', "region_id", 'name', 'lb_id'], - "inputParameters": ['id', 'provider', 'secret', "region_id", "zone_id", - 'name', 'lb_id', "asset_id", 'instance_id', 'port'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/loadbalance/backend/lb_server_group/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/loadbalance/backend/lb_server_group/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', "id", - 'asset_id', 'instance_id', 'lb_id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', - 'errorMessage', 'errorCode', 'name', - 'lb_id', 'instance_id', "region_id", "zone_id", 'port'] - } - }, - "lb_attach": { - "apply": { - "path": "/terraform/v1/loadbalance/backend/lb_attach/apply", - "method": "POST", - "notnull": ['provider', 'lb_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'lb_id', 'listener_id', 'backend_servers', - 'instance_id', 'weight', 'port', "zone_id", "region_id", 'extend_info', - 'asset_id', 'group_id', 'resource_id'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/loadbalance/backend/lb_attach/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/loadbalance/backend/lb_attach/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', - 'asset_id', 'instance_id', 'lb_id'], - "outputParameters":["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'lb_id', 'listener_id', 'backend_servers', 'instance_id', 'weight', 'port'] - } - }, - "mysql": { - "apply": { - "path": "/terraform/v1/database/backend/mysql/apply", - "method": "POST", - "notnull": ['name', 'provider', 'subnet_id', 'version', 'instance_type', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'subnet_id', 'user', 'password', 'port', - 'disk_type', 'disk_size', 'version', 'instance_type', 'vpc_id', 'security_group_id', - 'second_slave_zone', 'first_slave_zone', "zone_id", "region_id", 'asset_id', - 'extend_info', "charge_type"], - "outputParameters": ['errorMessage', 'errorCode', 'asset_id', 'user', 'password', 'ipaddress', 'port', - 'id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/mysql/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/database/backend/mysql/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'ipaddress', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'subnet_id', 'user', 'password', 'port', 'disk_type', 'disk_size', 'version', - 'instance_type', 'vpc_id', 'security_group_id', 'second_slave_zone', - 'first_slave_zone', "zone_id", 'ipaddress', "charge_type"] - } - }, - "mysql_database": { - "apply": { - "path": "/terraform/v1/database/backend/mysql_database/apply", - "method": "POST", - "notnull": ['name', 'provider', 'mysql_id', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'mysql_id', "zone_id", "region_id", 'resource_id', - 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/mysql_database/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "mysql_account": { - "apply": { - "path": "/terraform/v1/database/backend/mysql_account/apply", - "method": "POST", - "notnull": ['name', 'provider', 'mysql_id', 'password', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'mysql_id', 'password', "zone_id", "region_id", - 'resource_id', 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/mysql_account/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "mysql_privilege": { - "apply": { - "path": "/terraform/v1/database/backend/mysql_privilege/apply", - "method": "POST", - "notnull": ['provider', 'mysql_id', 'username', 'database', 'privileges', "region_id"], - "inputParameters": ['id', 'secret', 'provider', 'mysql_id', 'username', 'database', 'privileges', "zone_id", - "region_id", 'resource_id', 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/mysql_privilege/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "mysql_backup": { - "apply": { - "path": "/terraform/v1/database/backend/mysql_backup/apply", - "method": "POST", - "notnull": ['provider', 'mysql_id', 'backup_model', 'backup_time', "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'mysql_id', 'backup_model', 'backup_time', "zone_id", - "region_id", 'resource_id', 'asset_id', 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'id', 'asset_id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/mysql_backup/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - } - }, - "db_subnet_group": { - "apply": { - "path": "/terraform/v1/database/backend/db_subnet_group/apply", - "method": "POST", - "notnull": ['name', 'provider', 'subnet_id', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'subnet_id', - "zone_id", "region_id", 'asset_id', - 'extend_info'], - "outputParameters": ['errorMessage', 'errorCode', 'asset_id', 'id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/db_subnet_group/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/database/backend/db_subnet_group/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'id'], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'subnet_id', "zone_id", ] - } - }, - "redis": { - "apply": { - "path": "/terraform/v1/database/backend/redis/apply", - "method": "POST", - "notnull": ['provider', 'instance_type', "zone_id", "region_id"], - "inputParameters": ['id', 'name', 'secret', 'provider', 'password', 'port', - 'version', 'instance_type', 'vpc_id', 'security_group_id', - "zone_id", "region_id", 'asset_id', 'extend_info', "charge_type"], - "outputParameters": ['errorMessage', 'errorCode', 'asset_id', 'ipaddress', 'port', - 'id'] - }, - "destroy": { - "path": "/terraform/v1/database/backend/redis/destroy", - "method": "POST", - "notnull": ["id"], - "inputParameters": ["id"], - "outputParameters": ["errorMessage", "errorCode", "id"] - }, - "query": { - "path": "/terraform/v1/database/backend/redis/source", - "method": "POST", - "notnull": ["region_id", "provider"], - "inputParameters": ["region_id", 'secret', 'provider', 'asset_id', "id"], - "outputParameters": ["region_id", 'secret', 'provider', 'asset_id', 'errorMessage', 'errorCode', 'name', - 'subnet_id', 'password', 'port', 'version', - 'instance_type', 'vpc_id', 'security_group_id', "zone_id", 'ipaddress', "charge_type"] - } - }, -} - -# print(xml_register.keys()) -# res = {} -# for key, define in xml_register.items(): -# res[key] = define.keys() -# -# print res \ No newline at end of file diff --git a/apps/controller/configer/generate_xml.py b/apps/controller/configer/generate_xml.py deleted file mode 100644 index ab922df8..00000000 --- a/apps/controller/configer/generate_xml.py +++ /dev/null @@ -1,285 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import copy -from xml.dom import minidom -from lib.logs import logger -from core.controller import BackendController -from apps.api.configer.resource import ResourceObject -from apps.controller.configer.defines import xml_register - -template = ''' - - - - - - - - /terraformIndex - - - - - - - - - - - - - - - - - - - - - - - - - -''' - - -def fetch_columns(provider, defines): - result = [] - if not defines: - return result - - for key, define in defines.items(): - if isinstance(define, basestring): - _t = {key: "[%s N]" % provider} - result.append(_t) - else: - try: - if define.get("define"): - if define.get("convert"): - x = "N" if int(define.get("allow_null", "1")) else "Y" - _t = {key: "[%s %s]" % (provider, x)} - result.append(_t) - result += fetch_columns(provider=provider, defines=define.get("define")) - else: - x = "N" if int(define.get("allow_null", "1")) else "Y" - _t = {key: "[%s %s]" % (provider, x)} - result.append(_t) - except Exception, e: - raise e - - - return result - - -class ResBase(object): - def resource_objets(self, name): - _, datas = ResourceObject().list(filters={"resource_type": name}) - return datas - - def zip_columns(self, defines): - result = {} - for define in defines: - key = define.keys()[0] - if key in result.keys(): - result[key] = result[key] + "," + define[key] - else: - result[key] = define[key] - - return result - - def revert_common_columns(self, sys_define, defines): - res = {} - for s_define in sys_define: - x_desc = defines.get(s_define) or "" - if x_desc: - x_desc = "," + str(x_desc) - res[s_define] = "common" + x_desc - - defines.update(res) - return defines - - def register_resource_infos(self, datas): - x_columns = [] - x_output = [] - for data in datas: - try: - x_columns += fetch_columns(provider=data.get("provider"), defines=data.get("resource_property")) - x_output += fetch_columns(provider=data.get("provider"), defines=data.get("resource_output")) - except Exception, e: - raise e - - columns = self.zip_columns(x_columns) - output = self.zip_columns(x_output) - - return columns, output - - def _upgrade_apply_info(self, datas, inputkeys, outputkey): - inputkeys = inputkeys + ["id", "asset_id"] - inputkeys = list(set(inputkeys)) - outputkey = outputkey + ["id", "asset_id"] - outputkey = list(set(outputkey)) - - x_input, x_output = self.register_resource_infos(datas) - inputkeys = self.revert_common_columns(inputkeys, x_input) - outputkey = self.revert_common_columns(outputkey, x_output) - return inputkeys, outputkey - - def upgrade_apply_info(self, name, define): - datas = self.resource_objets(name) - inputkeys, outputkey = self._upgrade_apply_info(datas, - inputkeys=define.get("inputParameters"), - outputkey=define.get("outputParameters")) - - - p = [] - for data in datas: - p.append(data["provider"]) - - define["inputParameters"] = inputkeys - define["outputParameters"] = outputkey - define["provider"] = ",".join(p) - - return define - - def register_source_infos(self, datas): - x_columns = [] - x_output = [] - for data in datas: - x_columns += fetch_columns(provider=data.get("provider"), defines=data.get("data_source")) - - x_output += fetch_columns(provider=data.get("provider"), defines=data.get("data_source_output")) - - columns = self.zip_columns(x_columns) - output = self.zip_columns(x_output) - return columns, output - - def _upgrade_query_info(self, datas, inputkeys, outputkey): - inputkeys = inputkeys + ["id", "asset_id"] - inputkeys = list(set(inputkeys)) - outputkey = outputkey + ["id", "asset_id"] - outputkey = list(set(outputkey)) - - x_input, x_output = self.register_source_infos(datas) - inputkeys = self.revert_common_columns(inputkeys, x_input) #list(set(inputkeys + x_input)) - outputkey = self.revert_common_columns(outputkey, x_output) #list(set(outputkey + x_output)) - return inputkeys, outputkey - - def upgrade_query_info(self, name, define): - datas = self.resource_objets(name) - inputkeys, outputkey = self._upgrade_query_info(datas, - inputkeys=define.get("inputParameters"), - outputkey=define.get("outputParameters")) - - define["inputParameters"] = inputkeys - define["outputParameters"] = outputkey - - p = [] - for data in datas: - p.append(data["provider"]) - define["provider"] = ",".join(p) - - return define - - def plugin_tag(self, name): - return '' % name - - def interface(self, action, path, method, description): - description = description or "common" - return '' % ( - action, path, method, description) - - def inputparameter(self, notnull, columns): - result = '' - x_columns = columns if isinstance(columns, list) else columns.keys() - - for key in x_columns: - description = "common" if isinstance(columns, list) else columns.get(key) - - if key == "password": - x = 'password' % description - else: - if key in notnull: - x = '%s' % ( - description, key) - else: - x = '%s' % ( - description, key) - - result += x - - x_result = "%s" % result - return x_result - - def outputparameter(self, columns): - result = '' - x_columns = columns if isinstance(columns, list) else columns.keys() - - for key in x_columns: - description = "common" if isinstance(columns, list) else columns.get(key) - if key == "password": - x = 'password' % description - else: - x = '%s' % (description, key) - - result += x - - x_result = "%s" % result - return x_result - - def format_plugin_interface(self, name, defines): - result = '' - for key, define in defines.items(): - if key not in ["region", "az"]: - if key == "apply": - define = self.upgrade_apply_info(name, define) - elif key == "query": - define = self.upgrade_query_info(name, define) - interface_str = self.interface(action=key, path=define.get("path"), - method=define.get("method"), description=define.get("provider", "common")) - input_str = self.inputparameter(notnull=define.get("notnull"), columns=define.get("inputParameters")) - out_str = self.outputparameter(columns=define.get("outputParameters")) - x_result = interface_str + input_str + out_str + "" - - result += x_result - - return result - - def generate_resource_xml(self): - ''' - package -> template -> plugins - :return: - ''' - - result = '' - resource_register = copy.deepcopy(xml_register) - for key, defines in resource_register.items(): - plugin_str = self.plugin_tag(key) - body_str = self.format_plugin_interface(key, defines) - x_result = plugin_str + body_str + "" - result += x_result - - result = '''%s''' % result - - xml_result = '''''' + template + result + '''''' - title = '''''' - - xml_result = title + xml_result - print(xml_result) - xml_result = xml_result.replace('\n', '').replace('\r', '').replace('\t', '') - xml_result = minidom.parseString(xml_result.encode('utf-8')) - return xml_result.toprettyxml(indent=' ', encoding='UTF-8') - - -class ResourceXmlController(BackendController): - allow_methods = ('GET',) - resource = None - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - xml_str = ResBase().generate_resource_xml() - return 1, {"result": xml_str} diff --git a/apps/controller/configer/model_args.py b/apps/controller/configer/model_args.py deleted file mode 100644 index 3371e02f..00000000 --- a/apps/controller/configer/model_args.py +++ /dev/null @@ -1,732 +0,0 @@ -# coding: utf-8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -resouce_property_models = { - "provider": [ - "secret_id", - "secret_key", - "region" - ], - "vpc": [ - "name", - "cidr" - ], - "subnet": [ - "cidr", - "name", - "vpc_id", - "zone_id" - ], - "route_table": [ - "name", - "vpc_id" - ], - "route_entry": [ - "name", - "vpc_id", - "route_table_id", - "next_type", - "next_hub", - "destination" - ], - "security_group": [ - "name", - "vpc_id" - ], - "security_group_rule": [ - "description", - "type", - "security_group_id", - "cidr_ip", - "policy", - "ip_protocol", - "ports" - ], - "ipaddress_group": [ - "name", - "addresses" - ], - "nat": [ - "name", - "vpc_id", - "subnet_id", - "eip", - "bandwidth" - ], - "peer_connection": [ - "name", - "vpc_id", - "peer_vpc_id", - "peer_region" - ], - "eip": [ - "name", - "charge_type" - ], - "eip_association": [ - "name", - "eip_id", - "instance_id", - "private_ip" - ], - "lb": [ - "name", - "network_type", - "vpc_id", - "subnet_id", - "charge_type" - ], - "lb_listener": [ - "name", - "port", - "protocol", - "backend_port", - "health_check", - "health_check_uri", - "lb_id" - ], - "lb_rule": [ - "listener_id", "lb_id", - "security_group_id", "frontend_port", - "name" - ], - "lb_server_group": [ - "name", - "lb_id", - "instance_id", - "port" - ], - "lb_attach": [ - "backend_servers", - "instance_id", - "weight", - "listener_id", - "lb_id" - ], - "disk": [ - "name", - "type", - "size", - "zone_id", - "charge_type" - ], - "disk_attach": [ - "disk_id", - "instance_id" - ], - "network_interface": [ - "name", - "ipaddress", - "vpc_id", - "subnet_id", - "security_group_id" - ], - "network_interface_attach": [ - "network_interface_id", - "instance_id" - ], - "object_storage": [ - "name", - "acl" - ], - "bucket_object": [ - "key", - "context", - "source", - "bucket_id" - ], - "ccn": [ - "name" - ], - "ccn_attach": [ - "instance_type", - "instance_region", - "instance_id", - "ccn_id" - ], - "ccn_bandwidth": [ - "bandwidth", - "from_region", - "dest_region", - "ccn_id" - ], - "instance": [ - "name", - "hostname", - "password", - "vpc_id", - "security_group_id", - "data_disks", - "instance_type", - "disk_type", - "disk_size", - "subnet_id", - "zone_id", - "image", - "power_action", - "force_delete", - "charge_type" - ], - "mysql": [ - "name", - "charge_type", - "engine", - "zone_id", - "version", - "disk_type", - "disk_size", - "subnet_id", - "instance_type", - "vpc_id", - "security_group_id", - "port", - "user", - "first_slave_zone", - "second_slave_zone", - "password", - "parameters", - "force_delete" - ], - "mysql_database": [ - "name", - "mysql_id" - ], - "mysql_account": [ - "name", - "password", - "mysql_id" - ], - "mysql_privilege": [ - "usrename", - "mysql_id", - "database_columns", - "database", - "privileges" - ], - "mysql_backup": [ - "backup_model", - "mysql_id", - "backup_time" - ], - "db_subnet_group": [ - "subnet_id", - "name" - ], - "mariadb": [ - "charge_type", - "name", - "engine", - "zone_id", - "version", - "disk_type", - "disk_size", - "subnet_id", - "instance_type", - "vpc_id", - "security_group_id", - "port", - "user", - "first_slave_zone", - "second_slave_zone", - "password", - "force_delete" - ], - "postgreSQL": [ - "charge_type", - "name", - "engine", - "zone_id", - "version", - "disk_type", - "disk_size", - "subnet_id", - "instance_type", - "vpc_id", - "security_group_id", - "port", - "user", - "first_slave_zone", - "second_slave_zone", - "password", - "force_delete" - ], - "rds": [ - "name", - "engine", - "zone_id", - "version", - "disk_type", - "disk_size", - "subnet_id", - "instance_type", - "vpc_id", - "security_group_id", - "port", - "user", - "password", - "charge_type", - "force_delete" - ], - "nosql": [ - "name", - "engine", - "zone_id", - "version", - "subnet_id", - "instance_type", - "port", - "password", - "charge_type", - "force_delete" - ], - "mongodb": [ - "name", - "engine", - "zone_id", - "version", - "subnet_id", - "instance_type", - # "port", - "disk_size", - "password", - "charge_type", - "force_delete" - ], - "kvstore": [ - "name", - "engine", - "zone_id", - "version", - "subnet_id", - "vpc_id", - "security_group_id", - "instance_type", - "port", - "password", - "charge_type", - "force_delete" - ], - "redis": [ - "name", - "engine", - "zone_id", - "version", - "subnet_id", - "vpc_id", - "security_group_id", - "instance_type", - "port", - "password", - "charge_type" - ], - "memcached": [ - "name", - "engine", - "zone_id", - "version", - "subnet_id", - "vpc_id", - "security_group_id", - "instance_type", - "port", - "password", - "force_delete", - "charge_type" - ], - "kvstore_backup": [ - "backup_time", - "backup_period", - "kvstore_id" - ], - "redis_backup": [ - "backup_time", - "backup_period", - "redis_id" - ], - "memcached_backup": [ - "backup_time", - "backup_period", - "memcached_id" - ], -} - -output_property_models = { - "vpc": [ - "asset_id" - ], - "subnet": [ - "asset_id" - ], - "route_table": [ - "asset_id" - ], - "route_entry": [ - "asset_id" - ], - "ipaddress_group": [ - "asset_id" - ], - "security_group": [ - "asset_id" - ], - "security_group_rule": [ - "asset_id" - ], - "nat": [ - "asset_id", - "ipaddress" - ], - "peer_connection": [ - "asset_id" - ], - "eip": [ - "asset_id", - "ipaddress" - ], - "eip_association": [ - "asset_id" - ], - "lb": [ - "asset_id", - "ipaddress" - ], - "lb_listener": [ - "asset_id" - ], - "lb_rule": [ - "asset_id" - ], - "lb_server_group": [ - "asset_id" - ], - "lb_attach": [ - "asset_id" - ], - "disk": [ - "asset_id" - ], - "disk_attach": [ - "asset_id" - ], - "network_interface": [ - "asset_id", - "ipaddress" - ], - "network_interface_attach": [ - "asset_id" - ], - "object_storage": [ - "asset_id", - "url" - ], - "bucket_object": [ - "asset_id" - ], - "ccn": [ - "asset_id" - ], - "ccn_attach": [ - "asset_id" - ], - "ccn_bandwidth": [ - "asset_id" - ], - "instance": [ - "asset_id", - "ipaddress", - "public_ip" - ], - "mysql": [ - "asset_id", - "ipaddress", - "port" - ], - "mysql_database": [ - "asset_id", - ], - "mysql_account": [ - "asset_id", - ], - "mysql_privilege": [ - "asset_id", - ], - "mysql_backup": [ - "asset_id", - ], - "db_subnet_group": [ - "asset_id" - ], - "mariadb": [ - "asset_id", - "ipaddress", - "port" - ], - "postgreSQL": [ - "asset_id", - "ipaddress", - "port" - ], - "rds": [ - "asset_id", - "ipaddress", - "port" - ], - "nosql": [ - "asset_id", - "ipaddress", - "port" - ], - "mongodb": [ - "asset_id", - "ipaddress", - "port" - ], - "kvstore": [ - "asset_id", - "ipaddress", - "port" - ], - "redis": [ - "asset_id", - "ipaddress", - "port" - ], - "memcached": [ - "asset_id", - "ipaddress", - "port" - ], - "memcached_backup": [ - "asset_id" - ], - "redis_backup": [ - "asset_id" - ], - "kvstore_backup": [ - "asset_id" - ], -} - -data_source_models = { - "vpc": [ - "asset_id", - "cidr" - ], - "subnet": [ - "asset_id", - "cidr", - "vpc_id" - ], - "route_table": [ - "asset_id", - ], - "route_entry": [ - "asset_id", - "route_table_id" - ], - "ipaddress_group": [ - "asset_id", - "name", - ], - "security_group": [ - "asset_id", - "name", - ], - "security_group_rule": [ - "asset_id", - "security_group_id" - ], - "nat": [ - "asset_id", - "name", - ], - "peer_connection": [ - "asset_id", - "peer_vpc_id" - ], - "eip": [ - "asset_id", - "ipaddress" - ], - "eip_association": [ - "asset_id", - "instance_id" - ], - "lb": [ - "asset_id", - "name" - ], - "lb_listener": [ - "asset_id", - "lb_id" - ], - "lb_rule": [ - "asset_id", - "listener_id", - "lb_id", - ], - "lb_server_group": [ - "asset_id", - "lb_id", - "instance_id" - ], - "lb_attach": [ - "asset_id", - "instance_id" - ], - "disk": [ - "asset_id", - "name", - "instance_id" - ], - "disk_attach": [ - "asset_id", - "instance_id" - ], - "network_interface": [ - "asset_id", - ], - "network_interface_attach": [ - "asset_id" - ], - "object_storage": [ - "asset_id" - ], - "bucket_object": [ - "asset_id" - ], - "ccn": [ - "asset_id" - ], - "ccn_attach": [ - "asset_id" - ], - "ccn_bandwidth": [ - "asset_id" - ], - "instance": [ - "asset_id", - "ipaddress" - ], - "mysql": [ - "asset_id", - "ipaddress" - ], - "mysql_database": [ - "asset_id", - ], - "db_subnet_group": [ - "asset_id" - ], - "mysql_account": [ - "asset_id", - "instance_id", - "name" - ], - "mysql_privilege": [ - "asset_id", - ], - "mysql_backup": [ - "asset_id", - ], - "mariadb": [ - "asset_id" - ], - "postgreSQL": [ - "asset_id" - ], - "rds": [ - "asset_id" - ], - "nosql": [ - "asset_id" - ], - "mongodb": [ - "asset_id" - ], - "kvstore": [ - "asset_id" - ], - "redis": [ - "asset_id" - ], - "memcached": [ - "asset_id", - ], - "memcached_backup": [ - "asset_id" - ], - "redis_backup": [ - "asset_id" - ], - "kvstore_backup": [ - "asset_id" - ], -} - - -def property_necessary(resource_name, resource_property): - if resource_name not in resouce_property_models.keys(): - return - - columns_property = resouce_property_models.get(resource_name) - for column in columns_property: - if column not in resource_property.keys(): - raise ValueError("缺少必要的property: %s" % column) - - -def output_necessary(resource_name, resource_output): - if resource_name not in output_property_models.keys(): - return - - columns_property = output_property_models.get(resource_name) - for column in columns_property: - if column not in resource_output.keys(): - raise ValueError("缺少必要的output property: %s" % column) - - # for key in resource_output.keys(): - # if key not in columns_property: - # raise ValueError("不合法的output property: %s, 允许值:%s" % (key, ",".join(columns_property))) - - -def data_source_output_necessary(resource_name, resource_property): - if resource_name not in resouce_property_models.keys(): - return - - if not resource_property: - return - - columns_property = resouce_property_models.get(resource_name) - for column in columns_property: - if column not in resource_property.keys(): - raise ValueError("data_source_output缺少必要的property: %s" % column) - - columns_property = output_property_models.get(resource_name) - for column in columns_property: - if column not in resource_property.keys(): - raise ValueError("data_source_output缺少必要的property: %s" % column) - - -def source_necessary(resource_name, data_source): - if resource_name not in data_source_models.keys(): - return - - if not data_source: - return - - columns_property = data_source_models.get(resource_name) - for column in columns_property: - if column not in data_source.keys(): - raise ValueError("缺少必要的data source 字段: %s" % column) - - -def source_columns_outputs(resource_name): - res = resouce_property_models.get(resource_name) or [] - res2 = output_property_models.get(resource_name) or [] - - x_res = res + res2 - result = {} - for key in x_res: - result[key] = "" - - return result diff --git a/apps/controller/configer/provider_controller.py b/apps/controller/configer/provider_controller.py deleted file mode 100644 index ac5ae742..00000000 --- a/apps/controller/configer/provider_controller.py +++ /dev/null @@ -1,127 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from apps.common.convert_keys import validate_convert_key -from apps.common.convert_keys import validate_convert_value -from apps.api.configer.provider import ProviderApi -from apps.api.configer.provider import ProviderObject -from .model_args import property_necessary - - -class ProviderController(BackendController): - allow_methods = ('GET', "POST") - resource = ProviderObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - validation.allowed_key(data.keys(), ["id", "name", "display_name", "region", "enabled"]) - - filter_string = None - for key in ["name", "display_name", "region"]: - if data.get(key): - if filter_string: - filter_string += 'and ' + key + " like '%" + data.get(key) + "%' " - else: - filter_string = key + " like '%" + data.get(key) + "%' " - data.pop(key, None) - - return self.resource.list(filters=data, page=page, - filter_string=filter_string, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["id", "name", "zone", "secret_id", - "secret_key", "region", "enabled", - "extend_info", "plugin_source", - "provider_property", "display_name"]) - validation.not_allowed_null(data=data, - keys=["name",] - ) - - validation.validate_string("id", data.get("id")) - validation.validate_string("name", data["name"]) - validation.validate_string("display_name", data.get("display_name")) - validation.validate_string("secret_id", data.get("secret_id")) - validation.validate_string("secret_key", data.get("secret_key")) - validation.validate_dict("extend_info", data.get("extend_info")) - validation.validate_dict("provider_property", data.get("provider_property")) - - def create(self, request, data, **kwargs): - ''' - - :param request: - :param data: - extend_info: {} define example: {"version": "v1.1.0"} - provider_property {}revert property for provider, example secret_key to key - define example: {"secret_key": "key"} - :param kwargs: - :return: - ''' - #todo 移除provider 认证信息必填项, - name = data.get("name") - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) or {} - provider_property = validation.validate_dict("provider_property", data.get("provider_property")) or {} - provider_property = validate_convert_key(provider_property) - validate_convert_value(extend_info) - property_necessary(resource_name="provider", - resource_property=provider_property) - - ProviderApi().create_provider_workspace(provider=name) - create_data = {"id": data.get("id") or get_uuid(), - "name": data["name"], - "display_name": data.get("display_name"), - "secret_id": data.get("secret_id"), - "secret_key": data.get("secret_key"), - "extend_info": json.dumps(extend_info), - "provider_property": json.dumps(provider_property), - "is_init": 1 - } - - return self.resource.create(create_data) - - -class ProviderIdController(BackendIdController): - resource = ProviderObject() - - def show(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.show(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["zone", "secret_id", - "secret_key", "region", "enabled", - "extend_info", "plugin_source", - "provider_property", "display_name"]) - - validation.validate_string("name", data.get("name")) - validation.validate_string("display_name", data.get("display_name")) - validation.validate_string("secret_id", data.get("secret_id")) - validation.validate_string("secret_key", data.get("secret_key")) - validation.validate_dict("extend_info", data.get("extend_info")) - validation.validate_dict("provider_property", data.get("provider_property")) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - - if data.get("extend_info") is not None: - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - validate_convert_value(extend_info) - data["extend_info"] = json.dumps(extend_info) - - if data.get("provider_property") is not None: - provider_property = validation.validate_dict("provider_property", data.get("provider_property")) or {} - provider_property = validate_convert_key(provider_property) - property_necessary(resource_name="provider", - resource_property=provider_property) - data["provider_property"] = json.dumps(provider_property) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) diff --git a/apps/controller/configer/provider_secret_controller.py b/apps/controller/configer/provider_secret_controller.py deleted file mode 100644 index 01511d7a..00000000 --- a/apps/controller/configer/provider_secret_controller.py +++ /dev/null @@ -1,142 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from lib.logs import logger -from lib.encrypt_helper import decrypt_str -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from apps.common.validation import validate_column_line -from apps.api.configer.provider_secret import SecretApi -from apps.api.configer.provider_secret import ProviderSecretObject - - -class ProviderSecretController(BackendController): - allow_methods = ('GET', "POST") - resource = ProviderSecretObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - validation.allowed_key(data.keys(), ["id", "name", "display_name", "region", "provider", "enabled"]) - - filter_string = None - for key in ["name", "display_name", "provider", "region"]: - if data.get(key): - if filter_string: - filter_string += 'and ' + key + " like '%" + data.get(key) + "%' " - else: - filter_string = key + " like '%" + data.get(key) + "%' " - data.pop(key, None) - - return self.resource.list(filters=data, page=page, - filter_string=filter_string, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["id", "name", "display_name", "provider", "server", - "secret_info", "region", "extend_info"]) - validation.not_allowed_null(data=data, - keys=["name", "provider", "secret_info"] - ) - - validation.validate_string("id", data.get("id")) - validation.validate_string("name", data["name"]) - validation.validate_string("server", data.get("server")) - validation.validate_string("display_name", data.get("display_name")) - validation.validate_string("provider", data.get("provider")) - validation.validate_string("secret_info", data.get("secret_info")) - validation.validate_dict("extend_info", data.get("extend_info")) - validation.validate_string("region", data.get("region")) - - def is_unique_name(self, provider, name): - if self.resource.name_object(name=name, provider=provider): - raise ValueError("provider %s secret %s exists" % (provider, name)) - - def create(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - name = data.get("name") - validate_column_line(name) - - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) or {} - secret_info = data.get("secret_info") #validation.validate_dict("secret_info", data.get("secret_info")) - - - if not secret_info: - raise ValueError("secret 认证信息不能为空,数据格式为JSON") - - self.is_unique_name(provider=data.get("provider"), name=data.get("name")) - - create_data = {"id": data.get("id") or get_uuid(), - "name": data["name"], - "server": data.get("server"), - "provider": data.get("provider"), - "display_name": data.get("display_name"), - "region": data.get("region"), - "extend_info": json.dumps(extend_info), - "secret_info": secret_info - } - - return self.resource.create(create_data) - - -class ProviderSecretIdController(BackendIdController): - resource = ProviderSecretObject() - - def decrypt_key(self, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - def show(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - decrypt = 0 - try: - decrypt = int(data.get("decrypt", "0")) - except: - logger.info("decrypt args error") - - res = self.resource.show(rid) - if decrypt: - res["secret_info"] = json.loads(self.decrypt_key(res.get("secret_info"))) - - return res - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["id", "name", "display_name", "provider", - "secret_info", "region", "extend_info"]) - - validation.validate_string("id", data.get("id")) - validation.validate_string("name", data["name"]) - validation.validate_string("display_name", data.get("display_name")) - validation.validate_string("provider", data.get("provider")) - validation.validate_string("secret_info", data.get("secret_info")) - validation.validate_dict("extend_info", data.get("extend_info")) - validation.validate_string("region", data.get("region")) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - - if data.get("extend_info") is not None: - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - data["extend_info"] = json.dumps(extend_info) - - if data.get("secret_info") is not None: - provider_property = data.get("secret_info") #validation.validate_dict("secret_info", data.get("secret_info")) or {} - data["secret_info"] = provider_property - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) diff --git a/apps/controller/configer/resource_controller.py b/apps/controller/configer/resource_controller.py deleted file mode 100644 index 711c4683..00000000 --- a/apps/controller/configer/resource_controller.py +++ /dev/null @@ -1,373 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import copy -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from apps.common.convert_keys import validate_convert_key -from apps.common.convert_keys import validate_convert_value -from apps.api.configer.resource import ResourceObject -from apps.api.configer.provider import ProviderObject -from .model_args import output_property_models -from .model_args import property_necessary -from .model_args import output_necessary -from .model_args import source_necessary -from .model_args import data_source_output_necessary - - -def format_argument(key, data): - if not data: - return "" - if isinstance(data, dict): - return data - elif isinstance(data, basestring): - data = data.strip() - if data.startswith("{"): - try: - json.loads(data) - except: - try: - eval(data) - except: - raise ValueError("data: %s is not json " % (data)) - - return data - else: - raise ValueError("key: %s 应为json或string" % key) - - -def get_columns(defines): - result = [] - for key, define in defines.items(): - if isinstance(define, basestring): - result.append(key) - elif isinstance(define, dict): - if define.get("define"): - result.append(key) - result += get_columns(defines.get("define")) - else: - # tkey = define.get("convert") or key - result.append(key) - else: - pass - - return result - - -class ResourceController(BackendController): - resource = ResourceObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - validation.allowed_key(data, ["id", "provider", "resource_type", "resource_name", - "data_source_argument", "data_source_name"]) - - filter_string = None - for key in ["resource_type", "provider", "resource_name", "data_source_name"]: - if data.get(key): - if filter_string: - filter_string += 'and ' + key + " like '%" + data.get(key) + "%' " - else: - filter_string = key + " like '%" + data.get(key) + "%' " - data.pop(key, None) - - return self.resource.list(filters=data, page=page, - filter_string=filter_string, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["id", "provider", "resource_type", "extend_info", - "resource_name", "resource_property", "resource_output", - "data_source", "data_source_name", - "pre_action", "pre_action_output", - "data_source_output", "data_source_argument"]) - validation.not_allowed_null(data=data, - keys=["provider", "resource_type"] - ) - - validation.validate_string("id", data.get("id")) - validation.validate_string("provider", data["provider"]) - validation.validate_string("resource_type", data.get("resource_type")) - - # for resource - validation.validate_string("resource_name", data.get("resource_name")) - validation.validate_dict("extend_info", data.get("extend_info")) - validation.validate_dict("resource_property", data.get("resource_property")) - validation.validate_dict("resource_output", data.get("resource_output")) - - # for data source - validation.validate_string("data_source_name", data.get("data_source_name")) - validation.validate_string("data_source_argument", data.get("data_source_argument")) - validation.validate_dict("data_source", data.get("data_source")) - validation.validate_dict("data_source_output", data.get("data_source_output")) - - # pre action - validation.validate_dict("pre_action_output", data.get("pre_action_output")) - validation.validate_string("pre_action", data.get("pre_action")) - - def create(self, request, data, **kwargs): - ''' - - :param request: - :param data: - extend_info: {} define example: {"version": "v1.1.0"} - resource_property {}define property for provider, example secret_key to key - define example: {"secret_key": "key"} - :param kwargs: - :return: - ''' - - # resource - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) or {} - resource_property = validation.validate_dict("resource_property", data.get("resource_property")) or {} - resource_output = validation.validate_dict("resource_output", data.get("resource_output")) or {} - - data_source = validation.validate_dict("data_source", data.get("data_source")) - data_source_output = validation.validate_dict("data_source_output", data.get("data_source_output")) - - pre_action_output = validation.validate_dict("pre_action_output", data.get("pre_action_output")) - for _, value in pre_action_output.items(): - if not isinstance(value, basestring): - raise ValueError("pre_action_output 为key-value定义") - if len(pre_action_output) > 1: - raise ValueError("output 只支持最多一个参数过滤") - - resource_property = validate_convert_key(resource_property) - validate_convert_value(extend_info) - validate_convert_value(resource_output) - data_source_output = validate_convert_key(data_source_output) - property_necessary(resource_name=data["resource_type"], - resource_property=resource_property) - - output_necessary(resource_name=data["resource_type"], - resource_output=resource_output) - - source_necessary(resource_name=data["resource_type"], - data_source=data_source) - - data_source_output_necessary(resource_name=data["resource_type"], - resource_property=data_source_output) - - data_source_argument = format_argument("data_source_argument", data.get("data_source_argument")) - - resource_name = data.get("resource_name", "") or "" - ProviderObject().provider_name_object(data["provider"]) - create_data = {"id": data.get("id") or get_uuid(), - "provider": data["provider"], - "resource_type": data.get("resource_type"), - "resource_name": resource_name, - "extend_info": json.dumps(extend_info), - "resource_property": json.dumps(resource_property), - "resource_output": json.dumps(resource_output), - "data_source_name": data.get("data_source_name"), - "data_source_argument": data_source_argument, - "data_source_output": json.dumps(data_source_output), - "data_source": json.dumps(data_source), - "pre_action": data.get("pre_action"), - "pre_action_output": json.dumps(pre_action_output) - } - - return self.resource.create(create_data) - - -class ResourceIdController(BackendIdController): - resource = ResourceObject() - - def show(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.show(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["provider", "resource_type", "extend_info", - "resource_name", "resource_property", - "enabled", "resource_output", - "data_source", "data_source_name", - "data_source_output", "pre_action", - "pre_action_output", - "data_source_argument"]) - - validation.validate_string("provider", data["provider"]) - validation.validate_string("resource_type", data.get("resource_type")) - - # for resource - validation.validate_string("resource_name", data.get("resource_name")) - validation.validate_dict("extend_info", data.get("extend_info")) - validation.validate_dict("resource_property", data.get("resource_property")) - validation.validate_dict("resource_output", data.get("resource_output")) - - # for data source - validation.validate_string("data_source_name", data.get("data_source_name")) - validation.validate_string("data_source_argument", data.get("data_source_argument")) - - # pre action - validation.validate_string("pre_action", data.get("pre_action")) - validation.validate_dict("pre_action_output", data.get("pre_action_output")) - - format_argument("data_source_argument", data.get("data_source_argument")) - validation.validate_dict("data_source", data.get("data_source")) - validation.validate_dict("data_source_output", data.get("data_source_output")) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - if data.get("extend_info") is not None: - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - validate_convert_value(extend_info) - data["extend_info"] = json.dumps(extend_info) - - if data.get("resource_property") is not None: - resource_property = validation.validate_dict("resource_property", data.get("resource_property")) or {} - resource_property = validate_convert_key(resource_property) - property_necessary(resource_name=data["resource_type"], - resource_property=resource_property) - - data["resource_property"] = json.dumps(resource_property) - - if data.get("resource_output") is not None: - resource_output = validation.validate_dict("resource_output", data.get("resource_output")) or {} - validate_convert_value(resource_output) - output_necessary(resource_name=data["resource_type"], - resource_output=resource_output) - - data["resource_output"] = json.dumps(resource_output) - - if data.get("data_source") is not None: - data_source = validation.validate_dict("data_source", data.get("data_source")) - source_necessary(resource_name=data["resource_type"], - data_source=data_source) - - data["data_source"] = json.dumps(data_source) - - if data.get("data_source_output") is not None: - data_source_output = validation.validate_dict("data_source_output", data.get("data_source_output")) - data_source_output = validate_convert_key(data_source_output) - - data_source_output_necessary(resource_name=data["resource_type"], - resource_property=data_source_output) - - for _, value in data_source_output.items(): - if not isinstance(value, (basestring, dict)): - raise ValueError("data_source_output 为key-value定义") - - data["data_source_output"] = json.dumps(data_source_output) - - if data.get("pre_action_output") is not None: - pre_action_output = validation.validate_dict("pre_action_output", data.get("pre_action_output")) - - for _, value in pre_action_output.items(): - if not isinstance(value, basestring): - raise ValueError("data_source_output 为key-value定义") - - if len(pre_action_output) > 1: - raise ValueError("output 只支持最多一个参数过滤") - - data["pre_action_output"] = json.dumps(pre_action_output) - - if "provider" in data.keys(): - if not data.get("provider"): - raise ValueError("provider 不能为空") - ProviderObject().provider_name_object(data["provider"]) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) - - -class ResourceListController(BackendIdController): - resource = ResourceObject() - allow_methods = ('GET',) - - def get_resource_list(self, data): - filter_string = None - _, resource_lists = self.resource.list(filters=data, page=1, filter_string=filter_string, - pagesize=10000, orderby=None) - - res = [] - for xres in resource_lists: - res.append(xres["resource_type"]) - - return list(set(res)) - - def show(self, request, data, **kwargs): - provider = data.get("provider") - if provider: - config_columns = self.get_resource_list(data={"provider": provider}) - columns = output_property_models.keys() + config_columns - else: - columns = output_property_models.keys() - - columns = list(set(columns)) - res = [] - for column in columns: - res.append({"id": column, "name": column}) - - return {"resource": res} - - -class ResourceAttrController(BackendIdController): - resource = ResourceObject() - allow_methods = ('GET',) - - def show(self, request, data, **kwargs): - validation.allowed_key(data, ["resource_type", "provider"]) - validation.not_allowed_null(["resource_type", "provider"], data) - - define_data = self.resource.query_one(where_data={"provider": data.get("provider"), - "resource_type": data.get("resource_type")}) - - define = define_data.get("resource_property") or {} - out_define = define_data.get("resource_output") or {} - - columns = get_columns(define) + get_columns(out_define) - columns = list(set(columns)) - - res = [] - for column in columns: - res.append({"id": column, "name": column}) - - return {"resource": res} - - -class HintResourceController(BackendIdController): - resource = ResourceObject() - allow_methods = ('GET',) - - def format_resource_type(self, datas): - models = copy.deepcopy(output_property_models) - for data in datas: - resource_type = data.get("resource_type") - if models.get(resource_type): - r_out = data.get("resource_output") or {} - tmp = models[resource_type] - models[resource_type] = list(set(r_out.keys() + tmp)) - # else: - # tmp = data.get("resource_output") or {} - # models[resource_type] = tmp.keys() - - return models - - def get_resource_list(self, data): - filter_string = None - _, resource_lists = self.resource.list(filters=data, page=1, filter_string=filter_string, - pagesize=10000, orderby=None) - return resource_lists - - def show(self, request, data, **kwargs): - validation.allowed_key(data, ["resource_type"]) - resource_attribute = self.format_resource_type(self.get_resource_list(data)) - - result = ["$zone", "$region", "$instance.type", - "$instance.type.cpu", "$instance.type.memory", "$resource"] - - for xres in resource_attribute.keys(): - result.append("$resource.%s" % (xres)) - - res = [] - for column in result: - res.append({"id": column, "name": column}) - - return {"resource": res, "attribute": resource_attribute} diff --git a/apps/controller/configer/route.py b/apps/controller/configer/route.py deleted file mode 100644 index 2bbdc65f..00000000 --- a/apps/controller/configer/route.py +++ /dev/null @@ -1,39 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url -from provider_controller import ProviderController -from provider_controller import ProviderIdController -from resource_controller import ResourceController -from resource_controller import ResourceIdController -from resource_controller import ResourceAttrController -from resource_controller import ResourceListController -from resource_controller import HintResourceController -from config_controller import ConfigController -from config_controller import ConfigIdController -from config_controller import ConfigAttrController -from config_controller import ConfigListController -from provider_secret_controller import ProviderSecretController -from provider_secret_controller import ProviderSecretIdController -from generate_xml import ResourceXmlController - - -urlpatterns = [ - url(r'^provider$', ProviderController()), - url(r'^provider/(?P[\w-]+)$', ProviderIdController()), - - url(r'^resource$', ResourceController()), - url(r'^resource/(?P[\w-]+)$', ResourceIdController()), - url(r'^xml/resource$', ResourceXmlController()), - - url(r'^keyconfig$', ConfigController()), - url(r'^keyconfig/(?P[\w-]+)$', ConfigIdController()), - - url(r'^secret$', ProviderSecretController()), - url(r'^secret/(?P[\w-]+)$', ProviderSecretIdController()), - - url(r'^resourceAttr$', ResourceAttrController()), - url(r'^resourceList$', ResourceListController()), - url(r'^resourceHint$', HintResourceController()), - url(r'^configAttr$', ConfigAttrController()), - url(r'^configList$', ConfigListController()), -] diff --git a/apps/controller/database/__init__.py b/apps/controller/database/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/database/kv_controller/__init__.py b/apps/controller/database/kv_controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/database/kv_controller/kvstore_backup_controller.py b/apps/controller/database/kv_controller/kvstore_backup_controller.py deleted file mode 100644 index 2f7f7dd0..00000000 --- a/apps/controller/database/kv_controller/kvstore_backup_controller.py +++ /dev/null @@ -1,152 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.database.kvstore.kvstore_backup import KvBackupApi -from apps.api.database.kvstore.kvstore_backup import KvBackupBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data, keyname): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "backup_time", - "backup_period", keyname]) - - @classmethod - def not_null(cls, data, keyname): - validation.not_allowed_null(data=data, - keys=["region", "provider", "zone", - "backup_time", "backup_period", keyname] - ) - - @classmethod - def validate_keys(cls, data, keyname): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "backup_time", - "backup_period", keyname], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, keyname, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - kvstore_id = data.pop(keyname, None) - backup_time = data.pop("backup_time", None) - backup_period = data.pop("backup_period", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - d = dict(kvstore_id=kvstore_id, - backup_time=backup_time, - backup_period=backup_period) - - create_data = {"name": name} - create_data.update(d) - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class KvBackupController(BackendController): - allow_methods = ('GET', 'POST') - resource = KvBackupApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "zone", "provider_id", - 'resource_id', "kvstore_id"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def keyname(self): - return "kvstore_id" - - def before_handler(self, request, data, **kwargs): - keyname = self.keyname() - ResBase.allow_key(data, keyname) - ResBase.not_null(data, keyname) - ResBase.validate_keys(data, keyname) - - def create(self, request, data, **kwargs): - keyname = self.keyname() - res, _ = ResBase.create(resource=self.resource, data=data, keyname=keyname) - return 1, res - - -class KvBackupIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = KvBackupApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - force_delete = data.get("force_delete", False) - return self.resource.destroy(rid) - - -class KvBackupAddController(BackendAddController): - allow_methods = ("POST",) - resource = KvBackupBackendApi() - - -class KvBackupDeleteController(BackendDeleteController): - name = "KvBackup" - resource_describe = "KvBackup" - allow_methods = ("POST",) - resource = KvBackupBackendApi() - - -class KvBackupSourceController(BackendSourceController): - name = "KvBackup" - resource_describe = "KvBackup" - allow_methods = ("POST",) - resource = KvBackupBackendApi() diff --git a/apps/controller/database/kv_controller/kvstore_controller.py b/apps/controller/database/kv_controller/kvstore_controller.py deleted file mode 100644 index c4c9d3ad..00000000 --- a/apps/controller/database/kv_controller/kvstore_controller.py +++ /dev/null @@ -1,180 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.database.kvstore.kvstore import KvStoreApi -from apps.api.database.kvstore.kvstore import KvStoreBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", - "password", "port", "version", "instance_type", - "vpc_id", "security_group_id", "engine", - "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "zone", "name", - "version", "subnet_id", "instance_type"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "password", "port", "version", - "instance_type", "charge_type", - "vpc_id", "engine"], - lists=["security_group_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - port = data.pop("port", None) - password = data.pop("password", None) - version = data.pop("version", None) - instance_type = data.pop("instance_type", None) - engine = data.pop("engine", None) - vpc_id = data.pop("vpc_id", None) - charge_type = data.pop("charge_type", None) - - security_group_id = validation.validate_list("security_group_id", data.pop("security_group_id", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - d = dict(version=version, port=port, - password=password, engine=engine, - instance_type=instance_type, - vpc_id=vpc_id, - security_group_id=security_group_id, - subnet_id=subnet_id, - charge_type=charge_type) - - create_data = {"name": name} - create_data.update(d) - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - # _password = base64.b64decode(result.get("password")) if result.get("password") else None - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "port": result.get("port"), - "resource_id": str(result.get("resource_id"))[:64]} - - return res, result - - -class KvStoreController(BackendController): - allow_methods = ('GET', 'POST') - resource = KvStoreApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "zone", "provider_id", - 'resource_id', "name", "enabled", - "subnet_id", "instance_type", "version", - "ipaddress", "port", 'engine']) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class KvStoreIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = KvStoreApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - force_delete = data.get("force_delete", False) - return self.resource.destroy(rid) - - -class KvStoreAddController(BackendAddController): - allow_methods = ("POST",) - resource = KvStoreBackendApi() - - -class KvStoreDeleteController(BackendDeleteController): - name = "KvStore" - resource_describe = "KvStore" - allow_methods = ("POST",) - resource = KvStoreBackendApi() - - -class KvStoreSourceController(BackendSourceController): - resource_describe = "KvStore" - allow_methods = ("POST",) - resource = KvStoreBackendApi() - - -class KvStoreSGSourceController(BackendSourceController): - resource_describe = "KvStore" - allow_methods = ("POST",) - resource = KvStoreBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id, **kwargs): - return self.resource.sg_kv_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/database/kv_controller/memcached_conrtoller.py b/apps/controller/database/kv_controller/memcached_conrtoller.py deleted file mode 100644 index d50ca900..00000000 --- a/apps/controller/database/kv_controller/memcached_conrtoller.py +++ /dev/null @@ -1,69 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import local_exceptions -from lib.uuid_util import get_uuid -from apps.api.database.kvstore.memcached import MemcachedApi -from apps.api.database.kvstore.memcached import MemcachedBackendApi -from apps.api.database.kvstore.memcached import MemcachedBackupApi -from apps.api.database.kvstore.memcached import MemcachedBackupBackendApi -from .kvstore_controller import KvStoreController -from .kvstore_controller import KvStoreIdController -from .kvstore_controller import KvStoreAddController -from .kvstore_controller import KvStoreDeleteController -from .kvstore_backup_controller import KvBackupController -from .kvstore_backup_controller import KvBackupIdController -from .kvstore_backup_controller import KvBackupAddController -from .kvstore_backup_controller import KvBackupDeleteController - - -class MemcachedController(KvStoreController): - allow_methods = ('GET', 'POST') - resource = MemcachedApi() - - -class MemcachedIdController(KvStoreIdController): - allow_methods = ('GET', 'DELETE',) - resource = MemcachedApi() - - -class MemcachedAddController(KvStoreAddController): - allow_methods = ("POST",) - resource = MemcachedBackendApi() - - -class MemcachedDeleteController(KvStoreDeleteController): - name = "Memcached" - resource_describe = "Memcached" - allow_methods = ("POST",) - resource = MemcachedBackendApi() - - -class MemBackupController(KvBackupController): - allow_methods = ('GET', 'POST') - resource = MemcachedBackupApi() - - def keyname(self): - return "memcached_id" - - -class MemBackupIdController(KvBackupIdController): - allow_methods = ('GET', 'DELETE',) - resource = MemcachedBackupApi() - - -class MemBackupAddController(KvBackupAddController): - allow_methods = ("POST",) - resource = MemcachedBackupBackendApi() - - def keyname(self): - return "memcached_id" - - -class MemBackupDeleteController(KvBackupDeleteController): - name = "MemBackup" - resource_describe = "MemBackup" - allow_methods = ("POST",) - resource = MemcachedBackupBackendApi() diff --git a/apps/controller/database/kv_controller/redis_controller.py b/apps/controller/database/kv_controller/redis_controller.py deleted file mode 100644 index 331672fe..00000000 --- a/apps/controller/database/kv_controller/redis_controller.py +++ /dev/null @@ -1,77 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import local_exceptions -from lib.uuid_util import get_uuid -from apps.api.database.kvstore.redis import RedisApi -from apps.api.database.kvstore.redis import RedisBackendApi -from apps.api.database.kvstore.redis import RedisBackupApi -from apps.api.database.kvstore.redis import RedisBackupBackendApi -from .kvstore_controller import KvStoreController -from .kvstore_controller import KvStoreIdController -from .kvstore_controller import KvStoreAddController -from .kvstore_controller import KvStoreDeleteController -from .kvstore_controller import KvStoreSourceController -from .kvstore_backup_controller import KvBackupController -from .kvstore_backup_controller import KvBackupIdController -from .kvstore_backup_controller import KvBackupAddController -from .kvstore_backup_controller import KvBackupDeleteController - - -class RedisController(KvStoreController): - allow_methods = ('GET', 'POST') - resource = RedisApi() - - -class RedisIdController(KvStoreIdController): - allow_methods = ('GET', 'DELETE',) - resource = RedisApi() - - -class RedisAddController(KvStoreAddController): - allow_methods = ("POST",) - resource = RedisBackendApi() - - -class RedisDeleteController(KvStoreDeleteController): - name = "Redis" - resource_describe = "Redis" - allow_methods = ("POST",) - resource = RedisBackendApi() - - -class RedisSourceController(KvStoreSourceController): - name = "Redis" - resource_describe = "Redis" - allow_methods = ("POST",) - resource = RedisBackendApi() - - -class RedisBackupController(KvBackupController): - allow_methods = ('GET', 'POST') - resource = RedisBackupApi() - - def keyname(self): - return "redis_id" - - -class RedisBackupIdController(KvBackupIdController): - allow_methods = ('GET', 'DELETE',) - resource = RedisBackupApi() - - -class RedisBackupAddController(KvBackupAddController): - allow_methods = ("POST",) - resource = RedisBackupBackendApi() - - def keyname(self): - return "redis_id" - - -class RedisBackupDeleteController(KvBackupDeleteController): - name = "RedisBackup" - resource_describe = "RedisBackup" - allow_methods = ("POST",) - resource = RedisBackupBackendApi() diff --git a/apps/controller/database/maridb_controller/__init__.py b/apps/controller/database/maridb_controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/database/maridb_controller/maridb_controller.py b/apps/controller/database/maridb_controller/maridb_controller.py deleted file mode 100644 index 5d74cf29..00000000 --- a/apps/controller/database/maridb_controller/maridb_controller.py +++ /dev/null @@ -1,202 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from lib.encrypt_helper import decrypt_str -from apps.api.database.rds.mariadb import MariaDBApi -from apps.api.database.rds.mariadb import MariaDBBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", - "user", "password", "port", "disk_type", - "disk_size", "version", "instance_type", - "vpc_id", "security_group_id", - "second_slave_zone", "first_slave_zone", - "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "version", "subnet_id", "instance_type"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "user", "password", "disk_type", - "version", "instance_type", - "vpc_id", "second_slave_zone", - "first_slave_zone", "charge_type"], - ports=["port"], - ints=["disk_size"], - lists=["security_group_id"], - dicts=["extend_info"]) - - @classmethod - def decrypt_key(cls, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - port = data.pop("port", None) - password = data.pop("password", None) - user = data.pop("user", None) - version = data.pop("version", None) - disk_type = data.pop("disk_type", None) - disk_size = data.pop("disk_size", None) - instance_type = data.pop("instance_type", None) - first_slave_zone = data.pop("first_slave_zone", None) - second_slave_zone = data.pop("second_slave_zone", None) - vpc_id = data.pop("vpc_id", None) - charge_type = data.pop("charge_type", None) - security_group_id = validation.validate_list("security_group_id", data.pop("security_group_id", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - d = dict(version=version, port=port, - password=password, user=user, - instance_type=instance_type, - vpc_id=vpc_id, first_slave_zone=first_slave_zone, - second_slave_zone=second_slave_zone, - security_group_id=security_group_id, - disk_type=disk_type, disk_size=disk_size, - subnet_id=subnet_id, charge_typ=charge_type) - - create_data = {"name": name} - create_data.update(d) - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - _password = cls.decrypt_key(result.get("password")) - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "port": result.get("port"), "user": result.get("user"), - "password": _password, - "resource_id": str(result.get("resource_id"))[:64]} - - return res, result - - -class MariaDBController(BackendController): - allow_methods = ('GET', 'POST') - resource = MariaDBApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled", - "subnet_id", "instance_type", "version", - "ipaddress", "port", - "disk_type", "disk_size"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MariaDBIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MariaDBApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class MariaDBAddController(BackendAddController): - allow_methods = ("POST",) - resource = MariaDBBackendApi() - - -class MariaDBDeleteController(BackendDeleteController): - name = "MariaDB" - resource_describe = "MariaDB" - allow_methods = ("POST",) - resource = MariaDBBackendApi() - - -class MariaDBSourceController(BackendSourceController): - name = "MariaDB" - resource_describe = "MariaDB" - allow_methods = ("POST",) - resource = MariaDBBackendApi() - - -class MariaDBSGSourceController(BackendSourceController): - name = "MariaDB" - resource_describe = "MariaDB" - allow_methods = ("POST",) - resource = MariaDBBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id): - return self.resource.sg_rds_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/database/mogodb_controller/__init__.py b/apps/controller/database/mogodb_controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/database/mogodb_controller/mongodb_controller.py b/apps/controller/database/mogodb_controller/mongodb_controller.py deleted file mode 100644 index c15dee0a..00000000 --- a/apps/controller/database/mogodb_controller/mongodb_controller.py +++ /dev/null @@ -1,191 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from lib.encrypt_helper import decrypt_str -from apps.api.database.nosql.mogodb import MongodbApi -from apps.api.database.nosql.mogodb import MongodbBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", "password", - "disk_size", "version", "instance_type", - "vpc_id", "security_group_id", "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "version", "subnet_id", "instance_type"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "password", "disk_type", - "version", "instance_type", - "vpc_id", "charge_type"], - ints=["disk_size"], - lists=["security_group_id"], - dicts=["extend_info"]) - - @classmethod - def decrypt_key(cls, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - port = data.pop("port", None) - password = data.pop("password", None) - version = data.pop("version", None) - disk_size = data.pop("disk_size", None) - instance_type = data.pop("instance_type", None) - vpc_id = data.pop("vpc_id", None) - charge_type = data.pop("charge_type", None) - - security_group_id = validation.validate_list("security_group_id", data.pop("security_group_id", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - d = dict(version=version, port=port, - password=password, - instance_type=instance_type, - vpc_id=vpc_id, - security_group_id=security_group_id, - disk_size=disk_size, - subnet_id=subnet_id, - charge_type=charge_type) - - create_data = {"name": name} - create_data.update(d) - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - _password = cls.decrypt_key(result.get("password")) - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "port": result.get("port"), - "resource_id": str(result.get("resource_id"))[:64]} - - return res, result - - -class MongoDBController(BackendController): - allow_methods = ('GET', 'POST') - resource = MongodbApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled", - "subnet_id", "instance_type", "version"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MongoDBIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MongodbApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class MongoDBAddController(BackendAddController): - allow_methods = ("POST",) - resource = MongodbBackendApi() - - -class MongoDBDeleteController(BackendDeleteController): - name = "MongoDB" - resource_describe = "MongoDB" - allow_methods = ("POST",) - resource = MongodbBackendApi() - - -class MongoDBSourceController(BackendSourceController): - name = "MongoDB" - resource_describe = "MongoDB" - allow_methods = ("POST",) - resource = MongodbBackendApi() - - -class MongoDBSGSourceController(BackendSourceController): - name = "MongoDB" - resource_describe = "MongoDB" - allow_methods = ("POST",) - resource = MongodbBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id, **kwargs): - return self.resource.sg_nosql_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/database/mysql_controller/__init__.py b/apps/controller/database/mysql_controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/database/mysql_controller/account.py b/apps/controller/database/mysql_controller/account.py deleted file mode 100644 index e0433ce2..00000000 --- a/apps/controller/database/mysql_controller/account.py +++ /dev/null @@ -1,136 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.database.mysql.account import MysqlAccountApi -from apps.api.database.mysql.account import MysqlAccountBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "mysql_id", "password", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "password", "mysql_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_string("password", data["password"], minlen=7, maxlen=32) - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", - "mysql_id", "password", ], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - password = data.pop("password", None) - mysql_id = data.pop("mysql_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - create_data = {"name": name, "mysql_id": mysql_id, - "password": password} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class MysqlAccountController(BackendController): - allow_methods = ('GET', 'POST') - resource = MysqlAccountApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "rds_id", "name", "enabled"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MysqlAccountIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MysqlAccountApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class MysqlAccountAddController(BackendAddController): - allow_methods = ("POST",) - resource = MysqlAccountBackendApi() - - - - -class MysqlAccountDeleteController(BackendDeleteController): - name = "MysqlAccount" - resource_describe = "MysqlAccount" - allow_methods = ("POST",) - resource = MysqlAccountBackendApi() - - diff --git a/apps/controller/database/mysql_controller/backup.py b/apps/controller/database/mysql_controller/backup.py deleted file mode 100644 index dd500015..00000000 --- a/apps/controller/database/mysql_controller/backup.py +++ /dev/null @@ -1,137 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.database.mysql.backup import MysqlBackupApi -from apps.api.database.mysql.backup import MysqlBackupBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "mysql_id", - "backup_model", "backup_time"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "backup_time", - "backup_model", "mysql_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "mysql_id", - "backup_model", "backup_time"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - backup_time = data.pop("backup_time", None) - backup_model = data.pop("backup_model", None) - mysql_id = data.pop("mysql_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - create_data = {"name": name, "mysql_id": mysql_id, - "backup_time": backup_time, - "backup_model": backup_model} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class MysqlBackupController(BackendController): - allow_methods = ('GET', 'POST') - resource = MysqlBackupApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "rds_id", "name", "enabled"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MysqlBackupIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MysqlBackupApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class MysqlBackupAddController(BackendAddController): - allow_methods = ("POST",) - resource = MysqlBackupBackendApi() - - - - -class MysqlBackupDeleteController(BackendDeleteController): - name = "MysqlBackup" - resource_describe = "MysqlBackup" - allow_methods = ("POST",) - resource = MysqlBackupBackendApi() - diff --git a/apps/controller/database/mysql_controller/database.py b/apps/controller/database/mysql_controller/database.py deleted file mode 100644 index de857c1f..00000000 --- a/apps/controller/database/mysql_controller/database.py +++ /dev/null @@ -1,130 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.database.mysql.database import MysqlDatabaseApi -from apps.api.database.mysql.database import MysqlDatabaseBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "mysql_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "mysql_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "mysql_id", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - mysql_id = data.pop("mysql_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "mysql_id": mysql_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class MysqlDatabaseController(BackendController): - allow_methods = ('GET', 'POST') - resource = MysqlDatabaseApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "rds_id", "name", "enabled"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MysqlDatabaseIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MysqlDatabaseApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class MysqlDatabaseAddController(BackendAddController): - allow_methods = ("POST",) - resource = MysqlDatabaseBackendApi() - - - -class MysqlDatabaseDeleteController(BackendDeleteController): - name = "MysqlDatabase" - resource_describe = "MysqlDatabase" - allow_methods = ("POST",) - resource = MysqlDatabaseBackendApi() - - diff --git a/apps/controller/database/mysql_controller/instance.py b/apps/controller/database/mysql_controller/instance.py deleted file mode 100644 index caccaf67..00000000 --- a/apps/controller/database/mysql_controller/instance.py +++ /dev/null @@ -1,204 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from lib.encrypt_helper import decrypt_str -from apps.api.database.mysql.instance import MysqlApi -from apps.api.database.mysql.instance import MysqlBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", - "user", "password", "port", "disk_type", - "disk_size", "version", "instance_type", - "vpc_id", "security_group_id", - "second_slave_zone", "first_slave_zone", - "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "version", "subnet_id", "instance_type"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "user", "password", "disk_type", - "version", "instance_type", - "vpc_id", "second_slave_zone", - "first_slave_zone", "charge_type"], - ports=["port"], - ints=["disk_size"], - lists=["security_group_id"], - dicts=["extend_info"]) - - @classmethod - def decrypt_key(cls, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - port = data.pop("port", None) - password = data.pop("password", None) - user = data.pop("user", None) - version = data.pop("version", None) - disk_type = data.pop("disk_type", None) - disk_size = data.pop("disk_size", None) - instance_type = data.pop("instance_type", None) - first_slave_zone = data.pop("first_slave_zone", None) - second_slave_zone = data.pop("second_slave_zone", None) - vpc_id = data.pop("vpc_id", None) - - charge_type = data.pop("charge_type", None) - security_group_id = validation.validate_list("security_group_id", data.pop("security_group_id", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - d = dict(version=version, port=port, - password=password, user=user, - instance_type=instance_type, - vpc_id=vpc_id, first_slave_zone=first_slave_zone, - second_slave_zone=second_slave_zone, - security_group_id=security_group_id, - disk_type=disk_type, disk_size=disk_size, - subnet_id=subnet_id, charge_type=charge_type) - - create_data = {"name": name} - create_data.update(d) - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - _password = cls.decrypt_key(result.get("password")) - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "port": result.get("port"), "user": result.get("user"), - "password": _password, - "resource_id": str(result.get("resource_id"))[:64]} - - return res, result - - -class MysqlController(BackendController): - allow_methods = ('GET', 'POST') - resource = MysqlApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled", - "subnet_id", "instance_type", "version", - "ipaddress", "port", - "disk_type", "disk_size"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MysqlIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MysqlApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - force_delete = data.get("force_delete", False) - return self.resource.destroy(rid) - - -class MysqlAddController(BackendAddController): - allow_methods = ("POST",) - resource = MysqlBackendApi() - - -class MysqlDeleteController(BackendDeleteController): - name = "Mysql" - resource_describe = "Mysql" - allow_methods = ("POST",) - resource = MysqlBackendApi() - - -class MysqlSourceController(BackendSourceController): - name = "Mysql" - resource_describe = "Mysql" - allow_methods = ("POST",) - resource = MysqlBackendApi() - - -class MysqlSGSourceController(BackendSourceController): - name = "Mysql" - resource_describe = "Mysql" - allow_methods = ("POST",) - resource = MysqlBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id, **kwargs): - return self.resource.sg_mysql_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/database/mysql_controller/privilege.py b/apps/controller/database/mysql_controller/privilege.py deleted file mode 100644 index 8e13e332..00000000 --- a/apps/controller/database/mysql_controller/privilege.py +++ /dev/null @@ -1,136 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from lib.uuid_util import get_uuid -from apps.api.database.mysql.account import MysqlPrivilegeApi -from apps.api.database.mysql.account import MysqlPrivilegeBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "mysql_id", - "username", "database", "privileges"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "username", - "database", "privileges", "mysql_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "mysql_id", - "username", "database", "privileges"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - username = data.pop("username", None) - database = data.pop("database", None) - privileges = data.pop("privileges", None) - mysql_id = data.pop("mysql_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - d = dict(username=username, mysql_id=mysql_id, - database=database, privileges=privileges, ) - create_data = {"name": name} - create_data.update(d) - - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class MysqlPrivilegeController(BackendController): - allow_methods = ('GET', 'POST') - resource = MysqlPrivilegeApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "rds_id", "account_name", "database", "enabled"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class MysqlPrivilegeIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = MysqlPrivilegeApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class MysqlPrivilegeAddController(BackendAddController): - allow_methods = ("POST",) - resource = MysqlPrivilegeBackendApi() - - -class MysqlPrivilegeDeleteController(BackendDeleteController): - name = "MysqlPrivilege" - resource_describe = "MysqlPrivilege" - allow_methods = ("POST",) - resource = MysqlPrivilegeBackendApi() diff --git a/apps/controller/database/mysql_controller/subnet_group.py b/apps/controller/database/mysql_controller/subnet_group.py deleted file mode 100644 index 634d1692..00000000 --- a/apps/controller/database/mysql_controller/subnet_group.py +++ /dev/null @@ -1,134 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.database.rds.subnet_group import SubnetGroupApi -from apps.api.database.rds.subnet_group import SubnetGroupBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "subnet_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "subnet_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "subnet_id", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "subnet_id": subnet_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))} - return res, result - - -class SubnetGroupController(BackendController): - allow_methods = ('GET', 'POST') - resource = SubnetGroupApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", "name", "enabled"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class SubnetGroupIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = SubnetGroupApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class SubnetGroupAddController(BackendAddController): - allow_methods = ("POST",) - resource = SubnetGroupBackendApi() - - -class SubnetGroupDeleteController(BackendDeleteController): - name = "SubnetGroup" - resource_describe = "SubnetGroup" - allow_methods = ("POST",) - resource = SubnetGroupBackendApi() - - -class SubnetGroupSourceController(BackendSourceController): - name = "SubnetGroup" - resource_describe = "SubnetGroup" - allow_methods = ("POST",) - resource = SubnetGroupBackendApi() diff --git a/apps/controller/database/postgresql_controller/__init__.py b/apps/controller/database/postgresql_controller/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/database/postgresql_controller/postgreSQL_controller.py b/apps/controller/database/postgresql_controller/postgreSQL_controller.py deleted file mode 100644 index e4574a38..00000000 --- a/apps/controller/database/postgresql_controller/postgreSQL_controller.py +++ /dev/null @@ -1,203 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import base64 -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from lib.encrypt_helper import decrypt_str -from apps.api.database.rds.postgresql import PostgreSQLApi -from apps.api.database.rds.postgresql import PostgreSQLBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", - "user", "password", "port", "disk_type", - "disk_size", "version", "instance_type", - "vpc_id", "security_group_id", - "second_slave_zone", "first_slave_zone", - "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "version", "subnet_id", "instance_type"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "user", "password", "disk_type", - "version", "instance_type", - "vpc_id", "second_slave_zone", - "first_slave_zone", "charge_type"], - ports=["port"], - ints=["disk_size"], - lists=["security_group_id"], - dicts=["extend_info"]) - - @classmethod - def decrypt_key(cls, str): - if str: - if str.startswith("{cipher_a}"): - str = str[len("{cipher_a}"):] - str = decrypt_str(str) - - return str - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - port = data.pop("port", None) - password = data.pop("password", None) - user = data.pop("user", None) - version = data.pop("version", None) - disk_type = data.pop("disk_type", None) - disk_size = data.pop("disk_size", None) - instance_type = data.pop("instance_type", None) - first_slave_zone = data.pop("first_slave_zone", None) - second_slave_zone = data.pop("second_slave_zone", None) - vpc_id = data.pop("vpc_id", None) - charge_type = data.pop("charge_type", None) - - security_group_id = validation.validate_list("security_group_id", data.pop("security_group_id", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - d = dict(version=version, port=port, - password=password, user=user, - instance_type=instance_type, - vpc_id=vpc_id, first_slave_zone=first_slave_zone, - second_slave_zone=second_slave_zone, - security_group_id=security_group_id, - disk_type=disk_type, disk_size=disk_size, - subnet_id=subnet_id, charge_type=charge_type) - - create_data = {"name": name} - create_data.update(d) - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - _password = cls.decrypt_key(result.get("password")) - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "port": result.get("port"), "user": result.get("user"), - "password": _password, - "resource_id": str(result.get("resource_id"))[:64]} - - return res, result - - -class PostgreSQLController(BackendController): - allow_methods = ('GET', 'POST') - resource = PostgreSQLApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled", - "subnet_id", "instance_type", "version", - "ipaddress", "port", - "disk_type", "disk_size"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class PostgreSQLIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = PostgreSQLApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class PostgreSQLAddController(BackendAddController): - allow_methods = ("POST",) - resource = PostgreSQLBackendApi() - - -class PostgreSQLDeleteController(BackendDeleteController): - name = "PostgreSQL" - resource_describe = "PostgreSQL" - allow_methods = ("POST",) - resource = PostgreSQLBackendApi() - - -class PostgreSQLSourceController(BackendSourceController): - name = "PostgreSQL" - resource_describe = "PostgreSQL" - allow_methods = ("POST",) - resource = PostgreSQLBackendApi() - - -class PostgreSQLSGSourceController(BackendSourceController): - name = "PostgreSQL" - resource_describe = "PostgreSQL" - allow_methods = ("POST",) - resource = PostgreSQLBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id, **kwargs): - return self.resource.sg_rds_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/database/route.py b/apps/controller/database/route.py deleted file mode 100644 index 0a7af034..00000000 --- a/apps/controller/database/route.py +++ /dev/null @@ -1,81 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url -from mysql_controller import instance as mysql_instance -from mysql_controller import database as mysql_database -from mysql_controller import account as mysql_account -from mysql_controller import privilege as mysql_privilege -from mysql_controller import backup as mysql_backup -from mysql_controller import subnet_group as rds_subnet_group -from kv_controller import kvstore_controller -from kv_controller import kvstore_backup_controller -from kv_controller import redis_controller -from kv_controller import memcached_conrtoller - -urlpatterns = [ - url(r'^mysql$', mysql_instance.MysqlController()), - url(r'^mysql/(?P[\w-]+)$', mysql_instance.MysqlIdController()), - url(r'^backend/mysql/apply$', mysql_instance.MysqlAddController()), - url(r'^backend/mysql/destroy$', mysql_instance.MysqlDeleteController()), - url(r'^backend/mysql/source$', mysql_instance.MysqlSourceController()), - url(r'^backend/mysql/security_group/source$', mysql_instance.MysqlSGSourceController()), - - url(r'^mysql_database$', mysql_database.MysqlDatabaseController()), - url(r'^mysql_database/(?P[\w-]+)$', mysql_database.MysqlDatabaseIdController()), - url(r'^backend/mysql_database/apply$', mysql_database.MysqlDatabaseAddController()), - url(r'^backend/mysql_database/destroy$', mysql_database.MysqlDatabaseDeleteController()), - - url(r'^mysql_account$', mysql_account.MysqlAccountController()), - url(r'^mysql_account/(?P[\w-]+)$', mysql_account.MysqlAccountIdController()), - url(r'^backend/mysql_account/apply$', mysql_account.MysqlAccountAddController()), - url(r'^backend/mysql_account/destroy$', mysql_account.MysqlAccountDeleteController()), - - url(r'^mysql_privilege$', mysql_privilege.MysqlPrivilegeController()), - url(r'^mysql_privilege/(?P[\w-]+)$', mysql_privilege.MysqlPrivilegeIdController()), - url(r'^backend/mysql_privilege/apply$', mysql_privilege.MysqlPrivilegeAddController()), - url(r'^backend/mysql_privilege/destroy$', mysql_privilege.MysqlPrivilegeDeleteController()), - - url(r'^mysql_backup$', mysql_backup.MysqlBackupController()), - url(r'^mysql_backup/(?P[\w-]+)$', mysql_backup.MysqlBackupIdController()), - url(r'^backend/mysql_backup/apply$', mysql_backup.MysqlBackupAddController()), - url(r'^backend/mysql_backup/destroy$', mysql_backup.MysqlBackupDeleteController()), - - url(r'^rds_subnet_group$', rds_subnet_group.SubnetGroupController()), - url(r'^rds_subnet_group/(?P[\w-]+)$', rds_subnet_group.SubnetGroupIdController()), - url(r'^backend/rds_subnet_group/apply$', rds_subnet_group.SubnetGroupAddController()), - url(r'^backend/rds_subnet_group/destroy$', rds_subnet_group.SubnetGroupDeleteController()), - url(r'^backend/rds_subnet_group/source$', rds_subnet_group.SubnetGroupSourceController()), - - url(r'^redis$', redis_controller.RedisController()), - url(r'^redis/(?P[\w-]+)$', redis_controller.RedisIdController()), - url(r'^backend/redis/apply$', redis_controller.RedisAddController()), - url(r'^backend/redis/destroy$', redis_controller.RedisDeleteController()), - url(r'^backend/redis/source$', redis_controller.RedisSourceController()), - - url(r'^redis_backup$', redis_controller.RedisBackupController()), - url(r'^redis_backup/(?P[\w-]+)$', redis_controller.RedisBackupIdController()), - url(r'^backend/redis_backup/apply$', redis_controller.RedisBackupAddController()), - url(r'^backend/redis_backup/destroy$', redis_controller.RedisBackupDeleteController()), - - url(r'^memcached$', memcached_conrtoller.MemcachedController()), - url(r'^memcached/(?P[\w-]+)$', memcached_conrtoller.MemcachedIdController()), - url(r'^backend/memcached/apply$', memcached_conrtoller.MemcachedAddController()), - url(r'^backend/memcached/destroy$', memcached_conrtoller.MemcachedDeleteController()), - - url(r'^memcached_backup$', memcached_conrtoller.MemBackupController()), - url(r'^memcached_backup/(?P[\w-]+)$', memcached_conrtoller.MemBackupIdController()), - url(r'^backend/memcached_backup/apply$', memcached_conrtoller.MemBackupAddController()), - url(r'^backend/memcached_backup/destroy$', memcached_conrtoller.MemBackupDeleteController()), - - url(r'^kvstore$', kvstore_controller.KvStoreController()), - url(r'^kvstore/(?P[\w-]+)$', kvstore_controller.KvStoreIdController()), - url(r'^backend/kvstore/apply$', kvstore_controller.KvStoreAddController()), - url(r'^backend/kvstore/destroy$', kvstore_controller.KvStoreDeleteController()), - url(r'^backend/kvstore/source$', kvstore_controller.KvStoreSourceController()), - url(r'^backend/kvstore/security_group/source$', kvstore_controller.KvStoreSGSourceController()), - - url(r'^kvstore_backup$', kvstore_backup_controller.KvBackupController()), - url(r'^kvstore_backup/(?P[\w-]+)$', kvstore_backup_controller.KvBackupIdController()), - url(r'^backend/kvstore_backup/apply$', kvstore_backup_controller.KvBackupAddController()), - url(r'^backend/kvstore_backup/destroy$', kvstore_backup_controller.KvBackupDeleteController()), -] diff --git a/apps/controller/loadbalance/__init__.py b/apps/controller/loadbalance/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/loadbalance/lb_attach_controller.py b/apps/controller/loadbalance/lb_attach_controller.py deleted file mode 100644 index 0de6803e..00000000 --- a/apps/controller/loadbalance/lb_attach_controller.py +++ /dev/null @@ -1,168 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.loadbalance.lb_attach import LBAttachApi -from apps.api.loadbalance.lb_attach import LBAttachBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "lb_id", - "listener_id", "backend_servers"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "lb_id", "backend_servers"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "lb_id", - "listener_id"], - lists=["backend_servers"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - lb_id = data.pop("lb_id", None) - listener_id = data.pop("listener_id", None) - backend_servers = validation.validate_list("backend_servers", data.pop("backend_servers", None)) - - if not backend_servers: - instance_id = data.pop("instance_id", None) - backend_servers = {"instance_id": instance_id} - if not instance_id: - raise local_exceptions.ValueValidateError("backend_servers", "backend servers not permit null") - - weight = data.pop("weight", None) - port = data.pop("port", None) - if weight is not None: - backend_servers["weight"] = weight - if port is not None: - backend_servers["port"] = port - - if not backend_servers: - raise local_exceptions.ValueValidateError("backend_servers", "backend servers not permit null") - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - create_data = {"name": name, "lb_id": lb_id, - "listener_id": listener_id, - "backend_servers": backend_servers} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class LBAttachController(BackendController): - allow_methods = ('GET', 'POST') - resource = LBAttachApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - # todo 使用instance id 进行搜索/ instance - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "listener_id", "lb_id"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class LBAttachIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = LBAttachApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class LBDetachController(BackendIdController): - allow_methods = ('DELETE',) - resource = LBAttachApi() - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - instance_id = kwargs.pop("instance", None) - return self.resource.remove_instance(rid, instance_id) - - -class LBAttachAddController(BackendAddController): - allow_methods = ("POST",) - resource = LBAttachBackendApi() - - -class LBAttachDeleteController(BackendDeleteController): - name = "LBAttach" - resource_describe = "LBAttach" - allow_methods = ("POST",) - resource = LBAttachBackendApi() - - -class LBAttachSourceController(BackendSourceController): - name = "VPC" - resource_describe = "VPC" - allow_methods = ("POST",) - resource = LBAttachBackendApi() diff --git a/apps/controller/loadbalance/lb_controller.py b/apps/controller/loadbalance/lb_controller.py deleted file mode 100644 index c722f9fd..00000000 --- a/apps/controller/loadbalance/lb_controller.py +++ /dev/null @@ -1,161 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.loadbalance.lb import LBApi -from apps.api.loadbalance.lb import LBBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", - "network_type", "vpc_id", "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "subnet_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "network_type", "vpc_id", "charge_type"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - vpc_id = data.pop("vpc_id", None) - network_type = data.pop("network_type", None) - charge_type = data.pop("charge_type", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id, "charge_type": charge_type, - "subnet_id": subnet_id, "network_type": network_type} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class LBController(BackendController): - allow_methods = ('GET', 'POST') - resource = LBApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', "ipaddress", - "provider_id", "name", "subnet_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class LBIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = LBApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class LBIdDettachController(BackendIdController): - allow_methods = ('PATCH',) - resource = LBApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class LBAddController(BackendAddController): - allow_methods = ("POST",) - resource = LBBackendApi() - - -class LBDeleteController(BackendDeleteController): - name = "LB" - resource_describe = "LB" - allow_methods = ("POST",) - resource = LBBackendApi() - - -class LBSourceController(BackendSourceController): - name = "LB" - resource_describe = "LB" - allow_methods = ("POST",) - resource = LBBackendApi() diff --git a/apps/controller/loadbalance/lb_rule_controller.py b/apps/controller/loadbalance/lb_rule_controller.py deleted file mode 100644 index 24c0054b..00000000 --- a/apps/controller/loadbalance/lb_rule_controller.py +++ /dev/null @@ -1,165 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.loadbalance.lb_rule import LBRuleApi -from apps.api.loadbalance.lb_rule import LBRuleBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "listener_id", "extend_info", "lb_id", - "security_group_id", "frontend_port", - "name", - # "domain", "url", - # "health_check_http_code", "health_check_interval", - # "health_check_uri", "health_check_connect_port", - # "health_check_timeout", "health_check_http_method", - # "scheduler", "certificate_id", "certificate_ca_id" - ]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "lb_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_port(data.get("frontend_port"), permit_null=True) - validation.validate_collector(data=data, - strings=["id", "provider", "secret", "region", "zone", - "listener_id", "lb_id", - "security_group_id", - "name", - # "domain", "url", - # "health_check_http_code", "health_check_interval", - # "health_check_uri", "health_check_connect_port", - # "health_check_timeout", "health_check_http_method", - # "scheduler", "certificate_id", "certificate_ca_id" - ], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - - frontend_port = validation.validate_port(data.pop("frontend_port", None), permit_null=True) - - create_data = {} - for key in ["listener_id", "lb_id", - "security_group_id", - "name", - # "domain", "url", - # "health_check_http_code", "health_check_interval", - # "health_check_uri", "health_check_connect_port", - # "health_check_timeout", "health_check_http_method", - # "scheduler", "certificate_id", "certificate_ca_id" - ]: - if data.get(key) is not None: - create_data[key] = data.pop(key, None) - - if frontend_port: - create_data["frontend_port"] = frontend_port - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))} - return res, result - - -class LBRuleController(BackendController): - allow_methods = ('GET', 'POST') - resource = LBRuleApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "lb_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class LBRuleIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = LBRuleApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class LBRuleAddController(BackendAddController): - allow_methods = ("POST",) - resource = LBRuleBackendApi() - - -class LBRuleDeleteController(BackendDeleteController): - name = "LBRule" - resource_describe = "LBRule" - allow_methods = ("POST",) - resource = LBRuleBackendApi() - - -class LBRuleSourceController(BackendSourceController): - name = "LBRule" - resource_describe = "LBRule" - allow_methods = ("POST",) - resource = LBRuleBackendApi() diff --git a/apps/controller/loadbalance/listener_controller.py b/apps/controller/loadbalance/listener_controller.py deleted file mode 100644 index df4bd522..00000000 --- a/apps/controller/loadbalance/listener_controller.py +++ /dev/null @@ -1,146 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.loadbalance.listener import LBListenerApi -from apps.api.loadbalance.listener import LBListenerBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "lb_id", - "port", "protocol", "backend_port", - "health_check", "health_check_uri", ]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "lb_id", "port"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_port(data.get("port")) - validation.validate_port(data.get("backend_port"), permit_null=True) - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "lb_id", "protocol", - "health_check", "health_check_uri"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - lb_id = data.pop("lb_id", None) - port = int(data.pop("port")) - protocol = data.pop("protocol", None) - backend_port = validation.validate_port(data.pop("backend_port", None), permit_null=True) - health_check = data.pop("health_check", None) - health_check_uri = data.pop("health_check_uri", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "lb_id": lb_id, "port": port, - "protocol": protocol, "backend_port": backend_port, - "health_check": health_check, - "health_check_uri": health_check_uri} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class LBListenerController(BackendController): - allow_methods = ('GET', 'POST') - resource = LBListenerApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "lb_id", "port", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class LBListenerIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = LBListenerApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class LBListenerAddController(BackendAddController): - allow_methods = ("POST",) - resource = LBListenerBackendApi() - - -class LBListenerDeleteController(BackendDeleteController): - name = "LBListener" - resource_describe = "LBListener" - allow_methods = ("POST",) - resource = LBListenerBackendApi() - - -class LBListenerSourceController(BackendSourceController): - name = "LBListener" - resource_describe = "LBListener" - allow_methods = ("POST",) - resource = LBListenerBackendApi() diff --git a/apps/controller/loadbalance/route.py b/apps/controller/loadbalance/route.py deleted file mode 100644 index cb12bf36..00000000 --- a/apps/controller/loadbalance/route.py +++ /dev/null @@ -1,41 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url -import lb_controller -import lb_attach_controller -import listener_controller -import server_group_controller -import lb_rule_controller - -urlpatterns = [ - url(r'^lb$', lb_controller.LBController()), - url(r'^lb/(?P[\w-]+)$', lb_controller.LBIdController()), - url(r'^backend/lb/apply$', lb_controller.LBAddController()), - url(r'^backend/lb/destroy$', lb_controller.LBDeleteController()), - url(r'^backend/lb/source$', lb_controller.LBSourceController()), - - url(r'^lb_listener$', listener_controller.LBListenerController()), - url(r'^lb_listener/(?P[\w-]+)$', listener_controller.LBListenerIdController()), - url(r'^backend/lb_listener/apply$', listener_controller.LBListenerAddController()), - url(r'^backend/lb_listener/destroy$', listener_controller.LBListenerDeleteController()), - url(r'^backend/lb_listener/source$', listener_controller.LBListenerSourceController()), - - url(r'^lb_attach$', lb_attach_controller.LBAttachController()), - url(r'^lb_attach/(?P[\w-]+)$', lb_attach_controller.LBAttachIdController()), - url(r'^lb_attach/(?P[\w-]+)/(?P[\w-]+)$', lb_attach_controller.LBDetachController()), - url(r'^backend/lb_attach/apply$', lb_attach_controller.LBAttachAddController()), - url(r'^backend/lb_attach/destroy$', lb_attach_controller.LBAttachDeleteController()), - url(r'^backend/lb_attach/source$', lb_attach_controller.LBAttachSourceController()), - - url(r'^lb_rule$', lb_rule_controller.LBRuleController()), - url(r'^lb_rule/(?P[\w-]+)$', lb_rule_controller.LBRuleIdController()), - url(r'^backend/lb_rule/apply$', lb_rule_controller.LBRuleAddController()), - url(r'^backend/lb_rule/destroy$', lb_rule_controller.LBRuleDeleteController()), - url(r'^backend/lb_rule/source$', lb_rule_controller.LBRuleSourceController()), - - url(r'^lb_server_group$', server_group_controller.LBGroupController()), - url(r'^lb_server_group/(?P[\w-]+)$', server_group_controller.LBGroupIdController()), - url(r'^backend/lb_server_group/apply$', server_group_controller.LBGroupAddController()), - url(r'^backend/lb_server_group/destroy$', server_group_controller.LBGroupDeleteController()), - url(r'^backend/lb_server_group/source$', server_group_controller.LBGroupSourceController()), -] diff --git a/apps/controller/loadbalance/server_group_controller.py b/apps/controller/loadbalance/server_group_controller.py deleted file mode 100644 index 67a56d1c..00000000 --- a/apps/controller/loadbalance/server_group_controller.py +++ /dev/null @@ -1,139 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.loadbalance.lb_group import LBGroupApi -from apps.api.loadbalance.lb_group import LBGroupBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "lb_id", "instance_id", "port"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "lb_id", "name"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_port(data.get("port"), permit_null=True) - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "lb_id", "instance_id", - ], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - lb_id = data.pop("lb_id", None) - x_port = data.pop("port", None) - port = int(x_port) if x_port is not None else None - instance_id = data.pop("instance_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "lb_id": lb_id, "port": port, - "instance_id": instance_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))} - return res, result - - -class LBGroupController(BackendController): - allow_methods = ('GET', 'POST') - resource = LBGroupApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "lb_id", "port", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class LBGroupIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = LBGroupApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class LBGroupAddController(BackendAddController): - allow_methods = ("POST",) - resource = LBGroupBackendApi() - - -class LBGroupDeleteController(BackendDeleteController): - name = "LBGroup" - resource_describe = "LBGroup" - allow_methods = ("POST",) - resource = LBGroupBackendApi() - - -class LBGroupSourceController(BackendSourceController): - name = "LBGroup" - resource_describe = "LBGroup" - allow_methods = ("POST",) - resource = LBGroupBackendApi() diff --git a/apps/controller/network/__init__.py b/apps/controller/network/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/network/ccn_attach_controller.py b/apps/controller/network/ccn_attach_controller.py deleted file mode 100644 index 151a578c..00000000 --- a/apps/controller/network/ccn_attach_controller.py +++ /dev/null @@ -1,142 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.ccn_attach import CCNAttachApi -from apps.api.network.ccn_attach import CCNAttachBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "ccn_id", "instance_id", - "instance_type", "instance_region", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "ccn_id", "instance_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "ccn_id", "instance_id", - "instance_type", "instance_region"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - vpc_id = data.pop("instance_id", None) - instance_region = data.pop("instance_region", None) - instance_type = data.pop("instance_type", None) - ccn_id = data.pop("ccn_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id, - "instance_region": instance_region, - "instance_type": instance_type, - "ccn_id": ccn_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class CCNAttachController(BackendController): - allow_methods = ('GET', 'POST') - resource = CCNAttachApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', "ccn_id", - "instance_type", "instance_region", "instance_id", - "provider_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class CCNAttachIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = CCNAttachApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class CCNAttachAddController(BackendAddController): - allow_methods = ("POST",) - resource = CCNAttachBackendApi() - - -class CCNAttachDeleteController(BackendDeleteController): - name = "CCNAttach" - resource_describe = "CCNAttach" - allow_methods = ("POST",) - resource = CCNAttachBackendApi() - - -class CCNAttachSourceController(BackendSourceController): - name = "CCNAttach" - resource_describe = "CCNAttach" - allow_methods = ("POST",) - resource = CCNAttachBackendApi() diff --git a/apps/controller/network/ccn_bandwidth_controller.py b/apps/controller/network/ccn_bandwidth_controller.py deleted file mode 100644 index 1fcb9e95..00000000 --- a/apps/controller/network/ccn_bandwidth_controller.py +++ /dev/null @@ -1,143 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.ccn_bandwidth import CCNBandwidthApi -from apps.api.network.ccn_bandwidth import CCNBandwidthBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "ccn_id", "from_region", - "dest_region", "bandwidth", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "ccn_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "ccn_id", - "from_region", "dest_region", "bandwidth"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - from_region = data.pop("from_region", None) - dest_region = data.pop("dest_region", None) - bandwidth = data.pop("bandwidth", None) - ccn_id = data.pop("ccn_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "ccn_id": ccn_id, - "bandwidth": bandwidth, - "from_region": from_region, - "dest_region": dest_region} - - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class CCNBandwidthController(BackendController): - allow_methods = ('GET', 'POST') - resource = CCNBandwidthApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', "ccn_id", - "from_region", "dest_region", "bandwidth", - "provider_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class CCNBandwidthIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = CCNBandwidthApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class CCNBandwidthAddController(BackendAddController): - allow_methods = ("POST",) - resource = CCNBandwidthBackendApi() - - -class CCNBandwidthDeleteController(BackendDeleteController): - name = "CCNBandwidth" - resource_describe = "CCNBandwidth" - allow_methods = ("POST",) - resource = CCNBandwidthBackendApi() - - -class CCNBandSourceController(BackendSourceController): - name = "CCNBandwidth" - resource_describe = "CCNBandwidth" - allow_methods = ("POST",) - resource = CCNBandwidthBackendApi() diff --git a/apps/controller/network/ccn_controller.py b/apps/controller/network/ccn_controller.py deleted file mode 100644 index 2bf0cfd4..00000000 --- a/apps/controller/network/ccn_controller.py +++ /dev/null @@ -1,132 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.connnect_network import CCNApi -from apps.api.network.connnect_network import CCNBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class CCNController(BackendController): - allow_methods = ('GET', 'POST') - resource = CCNApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class CCNIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = CCNApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class CCNAddController(BackendAddController): - allow_methods = ("POST",) - resource = CCNBackendApi() - - -class CCNDeleteController(BackendDeleteController): - name = "CCN" - resource_describe = "CCN" - allow_methods = ("POST",) - resource = CCNBackendApi() - - -class CCNSourceController(BackendSourceController): - name = "CCN" - resource_describe = "CCN" - allow_methods = ("POST",) - resource = CCNBackendApi() diff --git a/apps/controller/network/eip_association_controller.py b/apps/controller/network/eip_association_controller.py deleted file mode 100644 index 6c4706af..00000000 --- a/apps/controller/network/eip_association_controller.py +++ /dev/null @@ -1,140 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.eip_association import EipAssociationApi -from apps.api.network.eip_association import EipAssociationBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "eip_id", "instance_id", - "private_ip", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "eip_id", "instance_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "eip_id", - "instance_id", "private_ip"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - eip_id = data.pop("eip_id", None) - instance_id = data.pop("instance_id", None) - eni_id = data.pop("eni_id", None) # 统一使用instance id 不使用eni - private_ip = data.pop("private_ip", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "eip_id": eip_id, - "instance_id": instance_id, - "eni_id": eni_id, "private_ip": private_ip} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class EipAssociationController(BackendController): - allow_methods = ('GET', 'POST') - resource = EipAssociationApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', "instance_id", - "provider_id", "name", "eip_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class EipAssociationIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = EipAssociationApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class EipAssociationAddController(BackendAddController): - allow_methods = ("POST",) - resource = EipAssociationBackendApi() - - -class EipAssociationDeleteController(BackendDeleteController): - name = "EipAssociation" - resource_describe = "EipAssociation" - allow_methods = ("POST",) - resource = EipAssociationBackendApi() - - -class EipAssSourceController(BackendSourceController): - name = "EipAssociation" - resource_describe = "EipAssociation" - allow_methods = ("POST",) - resource = EipAssociationBackendApi() diff --git a/apps/controller/network/eip_controller.py b/apps/controller/network/eip_controller.py deleted file mode 100644 index 40b43781..00000000 --- a/apps/controller/network/eip_controller.py +++ /dev/null @@ -1,134 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.eip import EipApi -from apps.api.network.eip import EipBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "charge_type", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "charge_type"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - charge_type = data.pop("charge_type", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "charge_type": charge_type} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64], - "ipaddress": result.get("ipaddress")} - return res, result - - -class EipController(BackendController): - allow_methods = ('GET', 'POST') - resource = EipApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "ipaddress", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class EipIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = EipApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class EipAddController(BackendAddController): - allow_methods = ("POST",) - resource = EipBackendApi() - - -class EipDeleteController(BackendDeleteController): - name = "Eip" - resource_describe = "Eip" - allow_methods = ("POST",) - resource = EipBackendApi() - - -class EipSourceController(BackendSourceController): - name = "Eip" - resource_describe = "Eip" - allow_methods = ("POST",) - resource = EipBackendApi() diff --git a/apps/controller/network/ip_group_controller.py b/apps/controller/network/ip_group_controller.py deleted file mode 100644 index 7640e0a8..00000000 --- a/apps/controller/network/ip_group_controller.py +++ /dev/null @@ -1,27 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from apps.api.network.ip_group import IpGroupBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class IpGroupAddController(BackendAddController): - allow_methods = ("POST",) - resource = IpGroupBackendApi() - - -class IpGroupDeleteController(BackendDeleteController): - name = "IpGroup" - resource_describe = "IpGroup" - allow_methods = ("POST",) - resource = IpGroupBackendApi() - - -class IpGroupSourceController(BackendSourceController): - name = "IpGroup" - resource_describe = "IpGroup" - allow_methods = ("POST",) - resource = IpGroupBackendApi() diff --git a/apps/controller/network/nat_controller.py b/apps/controller/network/nat_controller.py deleted file mode 100644 index f7468936..00000000 --- a/apps/controller/network/nat_controller.py +++ /dev/null @@ -1,150 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BaseController -from core.controller import BackendIdController -from lib.uuid_util import get_uuid -from apps.api.network.nat_gateway import NatGatewayApi -from apps.api.network.nat_gateway import NatGatewayBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "vpc_id", "subnet_id", "eip", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "vpc_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "vpc_id", "eip", - "subnet_id", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - vpc_id = data.pop("vpc_id", None) - subnet_id = data.pop("subnet_id", None) - eip = data.pop("eip", None) - # eip = validation.validate_list("eip", data.pop("eip", None)) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - bandwidth = data.pop("bandwidth", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id, - "subnet_id": subnet_id, "eip": eip, - "bandwidth": bandwidth} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "ipaddress": result.get("ipaddress"), - "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class NatGatewayController(BackendController): - allow_methods = ('GET', 'POST') - resource = NatGatewayApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "vpc", "subnet", - "ipaddress", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class NatGatewayIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = NatGatewayApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["name"]) - validation.not_allowed_null(data=data, - keys=["name"] - ) - - validation.validate_string("name", data["name"]) - - -class NatGatewayAddController(BackendAddController): - allow_methods = ("POST",) - resource = NatGatewayBackendApi() - - -class NatGatewayDeleteController(BackendDeleteController): - name = "NatGateway" - resource_describe = "NatGateway" - allow_methods = ("POST",) - resource = NatGatewayBackendApi() - - -class NatSourceController(BackendSourceController): - name = "NatGateway" - resource_describe = "NatGateway" - allow_methods = ("POST",) - resource = NatGatewayBackendApi() diff --git a/apps/controller/network/peer_connection_controller.py b/apps/controller/network/peer_connection_controller.py deleted file mode 100644 index 03e0f1c4..00000000 --- a/apps/controller/network/peer_connection_controller.py +++ /dev/null @@ -1,138 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.peer_connection import PeerConnApi -from apps.api.network.peer_connection import PeerConnBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "vpc_id", "peer_vpc_id", "peer_region", - "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "vpc_id", "peer_vpc_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "vpc_id", "secret", - "peer_vpc_id", "peer_region"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - vpc_id = data.pop("vpc_id", None) - peer_vpc_id = data.pop("peer_vpc_id", None) - peer_region = data.pop("peer_region", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id, - "peer_vpc_id": peer_vpc_id, "peer_region": peer_region} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class PeerConnController(BackendController): - allow_methods = ('GET', 'POST') - resource = PeerConnApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class PeerConnIdController(BackendIdController): - allow_methods = ('GET', 'DELETE',) - resource = PeerConnApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class PeerConnAddController(BackendAddController): - allow_methods = ("POST",) - resource = PeerConnBackendApi() - - -class PeerConnDeleteController(BackendDeleteController): - name = "PeerConn" - resource_describe = "PeerConn" - allow_methods = ("POST",) - resource = PeerConnBackendApi() - - -class PeerConnSourceController(BackendSourceController): - name = "PeerConn" - resource_describe = "PeerConn" - allow_methods = ("POST",) - resource = PeerConnBackendApi() diff --git a/apps/controller/network/route.py b/apps/controller/network/route.py deleted file mode 100644 index 2f6a524a..00000000 --- a/apps/controller/network/route.py +++ /dev/null @@ -1,114 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url - -import vpc_controller -import nat_controller -import eip_controller -import eip_association_controller -import ccn_controller -import ccn_attach_controller -import ccn_bandwidth_controller -import subnet_controller -import routetable_controller -import route_entry_controller -import security_group_controller -import security_group_rule_controller -import peer_connection_controller -import ip_group_controller - -from route_entry_controller import RouteEntryController -from route_entry_controller import RouteEntryIdController -from route_entry_controller import RouteEntryAddController -from route_entry_controller import RouteEntryDeleteController - -from security_group_controller import SecGroupController -from security_group_controller import SecGroupIdController -from security_group_controller import SecGroupAddController -from security_group_controller import SecGroupDeleteController - -from security_group_rule_controller import SecGroupRuleController -from security_group_rule_controller import SecGroupRuleIdController -from security_group_rule_controller import SecGroupRuleAddController -from security_group_rule_controller import SecGroupRuleDeleteController - -urlpatterns = [ - url(r'^vpc$', vpc_controller.VPCController()), - url(r'^vpc/(?P[\w-]+)$', vpc_controller.VPCIdController()), - url(r'^backend/vpc/apply$', vpc_controller.VPCAddController()), - url(r'^backend/vpc/destroy$', vpc_controller.VPCDeleteController()), - url(r'^backend/vpc/source$', vpc_controller.VPCSourceController()), - - url(r'^backend/ip_group/apply$', ip_group_controller.IpGroupAddController()), - url(r'^backend/ip_group/destroy$', ip_group_controller.IpGroupDeleteController()), - url(r'^backend/ip_group/source$', ip_group_controller.IpGroupSourceController()), - - url(r'^subnet$', subnet_controller.SubnetController()), - url(r'^subnet/(?P[\w-]+)$', subnet_controller.SubnetIdController()), - url(r'^backend/subnet/apply$', subnet_controller.SubnetAddController()), - url(r'^backend/subnet/destroy$', subnet_controller.SubnetDeleteController()), - url(r'^backend/subnet/source$', subnet_controller.SubnetSourceController()), - - url(r'^route_table$', routetable_controller.RouteTableController()), - url(r'^route_table/(?P[\w-]+)$', routetable_controller.RouteTableIdController()), - url(r'^backend/route_table/apply$', routetable_controller.RouteTableAddController()), - url(r'^backend/route_table/destroy$', routetable_controller.RouteTableDeleteController()), - url(r'^backend/route_table/source$', routetable_controller.RouteTableSourceController()), - - url(r'^route_entry$', RouteEntryController()), - url(r'^route_entry/(?P[\w-]+)$', RouteEntryIdController()), - url(r'^backend/route_entry/apply$', RouteEntryAddController()), - url(r'^backend/route_entry/destroy$', RouteEntryDeleteController()), - url(r'^backend/route_entry/source$', route_entry_controller.RTRuleSourceController()), - - url(r'^security_group$', SecGroupController()), - url(r'^security_group/(?P[\w-]+)$', SecGroupIdController()), - url(r'^backend/security_group/apply$', SecGroupAddController()), - url(r'^backend/security_group/destroy$', SecGroupDeleteController()), - url(r'^backend/security_group/source$', security_group_controller.SGSourceController()), - - url(r'^security_group_rule$', SecGroupRuleController()), - url(r'^security_group_rule/(?P[\w-]+)$', SecGroupRuleIdController()), - url(r'^backend/security_group_rule/apply$', SecGroupRuleAddController()), - url(r'^backend/security_group_rule/destroy$', SecGroupRuleDeleteController()), - url(r'^backend/security_group_rule/source$', security_group_rule_controller.SGRuleSourceController()), - - url(r'^nat$', nat_controller.NatGatewayController()), - url(r'^nat/(?P[\w-]+)$', nat_controller.NatGatewayIdController()), - url(r'^backend/nat/apply$', nat_controller.NatGatewayAddController()), - url(r'^backend/nat/destroy$', nat_controller.NatGatewayDeleteController()), - url(r'^backend/nat/source$', nat_controller.NatSourceController()), - - url(r'^eip$', eip_controller.EipController()), - url(r'^eip/(?P[\w-]+)$', eip_controller.EipIdController()), - url(r'^backend/eip/apply$', eip_controller.EipAddController()), - url(r'^backend/eip/destroy$', eip_controller.EipDeleteController()), - url(r'^backend/eip/source$', eip_controller.EipSourceController()), - - url(r'^eip_association$', eip_association_controller.EipAssociationController()), - url(r'^eip_association/(?P[\w-]+)$', eip_association_controller.EipAssociationIdController()), - url(r'^backend/eip_association/apply$', eip_association_controller.EipAssociationAddController()), - url(r'^backend/eip_association/destroy$', eip_association_controller.EipAssociationDeleteController()), - - url(r'^ccn$', ccn_controller.CCNController()), - url(r'^ccn/(?P[\w-]+)$', ccn_controller.CCNIdController()), - url(r'^backend/ccn/apply$', ccn_controller.CCNAddController()), - url(r'^backend/ccn/destroy$', ccn_controller.CCNDeleteController()), - url(r'^backend/ccn/source$', ccn_controller.CCNSourceController()), - - url(r'^ccn_attach$', ccn_attach_controller.CCNAttachController()), - url(r'^ccn_attach/(?P[\w-]+)$', ccn_attach_controller.CCNAttachIdController()), - url(r'^backend/ccn_attach/apply$', ccn_attach_controller.CCNAttachAddController()), - url(r'^backend/ccn_attach/destroy$', ccn_attach_controller.CCNAttachDeleteController()), - - url(r'^ccn_bandwidth$', ccn_bandwidth_controller.CCNBandwidthController()), - url(r'^ccn_bandwidth/(?P[\w-]+)$', ccn_bandwidth_controller.CCNBandwidthIdController()), - url(r'^backend/ccn_bandwidth/apply$', ccn_bandwidth_controller.CCNBandwidthAddController()), - url(r'^backend/ccn_bandwidth/destroy$', ccn_bandwidth_controller.CCNBandwidthDeleteController()), - - url(r'^peer_connection$', peer_connection_controller.PeerConnController()), - url(r'^peer_connection/(?P[\w-]+)$', peer_connection_controller.PeerConnIdController()), - url(r'^backend/peer_connection/apply$', peer_connection_controller.PeerConnAddController()), - url(r'^backend/peer_connection/destroy$', peer_connection_controller.PeerConnDeleteController()), - url(r'^backend/peer_connection/source$', peer_connection_controller.PeerConnSourceController()), -] diff --git a/apps/controller/network/route_entry_controller.py b/apps/controller/network/route_entry_controller.py deleted file mode 100644 index 75836b61..00000000 --- a/apps/controller/network/route_entry_controller.py +++ /dev/null @@ -1,149 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from lib.logs import logger -from core.response_hooks import format_string -from apps.controller.configer.model_args import source_columns_outputs -from apps.api.configer.region import ZoneApi -from apps.api.network.route_entry import RouteEntryApi -from apps.api.network.route_entry import RouteEntryBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "vpc_id", "destination", - "route_table_id", "next_type", - "next_hub", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "vpc_id", - "route_table_id", "next_type", - "next_hub", "destination"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "vpc_id", "destination", - "route_table_id", "next_type", "next_hub"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - vpc_id = data.pop("vpc_id", None) - route_table = data.pop("route_table_id", None) - next_type = data.pop("next_type", None) - next_hub = data.pop("next_hub", None) - destination = data.pop("destination", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id, "destination": destination, - "route_table_id": route_table, "next_type": next_type, - "next_hub": next_hub} - - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class RouteEntryController(BackendController): - allow_methods = ('GET', 'POST') - resource = RouteEntryApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', 'destination', - "provider_id", "name", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class RouteEntryIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = RouteEntryApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class RouteEntryAddController(BackendAddController): - allow_methods = ("POST",) - resource = RouteEntryBackendApi() - - -class RouteEntryDeleteController(BackendDeleteController): - name = "RouteEntry" - resource_describe = "RouteEntry" - allow_methods = ("POST",) - resource = RouteEntryBackendApi() - - -class RTRuleSourceController(BackendSourceController): - name = "RouteEntry" - resource_describe = "RouteEntry" - allow_methods = ("POST",) - resource = RouteEntryBackendApi() diff --git a/apps/controller/network/routetable_controller.py b/apps/controller/network/routetable_controller.py deleted file mode 100644 index ec4c8711..00000000 --- a/apps/controller/network/routetable_controller.py +++ /dev/null @@ -1,146 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.route_table import RouteTableApi -from apps.api.network.route_table import RouteTableBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "vpc_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "vpc_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "vpc_id", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - vpc_id = data.pop("vpc_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class RouteTableController(BackendController): - allow_methods = ('GET', 'POST') - resource = RouteTableApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class RouteTableIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = RouteTableApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["name"]) - validation.not_allowed_null(data=data, - keys=["name"] - ) - - validation.validate_string("name", data["name"]) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - name = data.pop("name", None) - return self.resource.update(rid, name, extend_info={}) - - -class RouteTableAddController(BackendAddController): - allow_methods = ("POST",) - resource = RouteTableBackendApi() - - -class RouteTableDeleteController(BackendDeleteController): - name = "RouteTable" - resource_describe = "RouteTable" - allow_methods = ("POST",) - resource = RouteTableBackendApi() - - -class RouteTableSourceController(BackendSourceController): - name = "RouteTable" - resource_describe = "RouteTable" - allow_methods = ("POST",) - resource = RouteTableBackendApi() diff --git a/apps/controller/network/security_group_controller.py b/apps/controller/network/security_group_controller.py deleted file mode 100644 index 935fa290..00000000 --- a/apps/controller/network/security_group_controller.py +++ /dev/null @@ -1,133 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.security_group import SecGroupApi -from apps.api.network.security_group import SecGroupBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "vpc_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "vpc_id", "name"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "vpc_id", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - secret = data.pop("secret", None) - provider = data.pop("provider", None) - rid = data.pop("id", None) or get_uuid() - name = data.pop("name", None) - zone = data.pop("zone", None) - region = data.pop("region", None) - vpc_id = data.pop("vpc_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "vpc_id": vpc_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class SecGroupController(BackendController): - allow_methods = ('GET', 'POST') - resource = SecGroupApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "vpc", "name", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class SecGroupIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = SecGroupApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class SecGroupAddController(BackendAddController): - allow_methods = ("POST",) - resource = SecGroupBackendApi() - - -class SecGroupDeleteController(BackendDeleteController): - name = "SecGroup" - resource_describe = "SecGroup" - allow_methods = ("POST",) - resource = SecGroupBackendApi() - - -class SGSourceController(BackendSourceController): - name = "SecGroup" - resource_describe = "SecGroup" - allow_methods = ("POST",) - resource = SecGroupBackendApi() diff --git a/apps/controller/network/security_group_rule_controller.py b/apps/controller/network/security_group_rule_controller.py deleted file mode 100644 index f868b3d2..00000000 --- a/apps/controller/network/security_group_rule_controller.py +++ /dev/null @@ -1,147 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.security_group_rule import SecGroupRuleApi -from apps.api.network.security_group_rule import SecGroupRuleBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "security_group_id", - "type", "cidr_ip", "ip_protocol", - "ports", "policy", "description"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "security_group_id", - "type", "cidr_ip", "ip_protocol", "ports", "policy"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", - "type", "cidr_ip", "ip_protocol", - "ports", "policy", "security_group_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - name = data.pop("name", None) - secret = data.pop("secret", None) - region = data.pop("region", None) - provider = data.pop("provider", None) - - zone = data.pop("zone", None) - security_group_id = data.pop("security_group_id", None) - type = data.pop("type", None) - cidr_ip = data.pop("cidr_ip", None) - ip_protocol = data.pop("ip_protocol", None) - ports = data.pop("ports", None) - policy = data.pop("policy", None) - description = data.pop("description") - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - data.update(extend_info) - - create_data = {"name": name, "security_group_id": security_group_id, - "type": type, "cidr_ip": cidr_ip, "ip_protocol": ip_protocol, - "ports": ports, "policy": policy, "description": description} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class SecGroupRuleController(BackendController): - allow_methods = ('GET', 'POST') - resource = SecGroupRuleApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "security_group_id", "name"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class SecGroupRuleIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = SecGroupRuleApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class SecGroupRuleAddController(BackendAddController): - allow_methods = ("POST",) - resource = SecGroupRuleBackendApi() - - -class SecGroupRuleDeleteController(BackendDeleteController): - name = "SecGroupRule" - resource_describe = "SecGroupRule" - allow_methods = ("POST",) - resource = SecGroupRuleBackendApi() - - -class SGRuleSourceController(BackendSourceController): - name = "SecGroupRule" - resource_describe = "SecGroupRule" - allow_methods = ("POST",) - resource = SecGroupRuleBackendApi() diff --git a/apps/controller/network/subnet_controller.py b/apps/controller/network/subnet_controller.py deleted file mode 100644 index 3acd59bc..00000000 --- a/apps/controller/network/subnet_controller.py +++ /dev/null @@ -1,135 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.network.subnet import SubnetApi -from apps.api.network.subnet import SubnetBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "vpc_id", "cidr", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "vpc_id", "cidr"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "cidr", "secret", "vpc_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - name = data.pop("name", None) - cidr = data.pop("cidr", None) - secret = data.pop("secret", None) - region = data.pop("region", None) - provider = data.pop("provider", None) - vpc_id = data.pop("vpc_id", None) - zone = data.pop("zone", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "cidr": cidr, "vpc_id": vpc_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class SubnetController(BackendController): - allow_methods = ('GET', 'POST') - resource = SubnetApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "zone", "name", "cidr", - "vpc", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class SubnetIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = SubnetApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class SubnetAddController(BackendAddController): - allow_methods = ("POST",) - resource = SubnetBackendApi() - - -class SubnetDeleteController(BackendDeleteController): - name = "Subnet" - resource_describe = "Subnet" - allow_methods = ("POST",) - resource = SubnetBackendApi() - - -class SubnetSourceController(BackendSourceController): - name = "Subnet" - resource_describe = "Subnet" - allow_methods = ("POST",) - resource = SubnetBackendApi() diff --git a/apps/controller/network/vpc_controller.py b/apps/controller/network/vpc_controller.py deleted file mode 100644 index 47cf0e88..00000000 --- a/apps/controller/network/vpc_controller.py +++ /dev/null @@ -1,135 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from core import local_exceptions as exception_common -from apps.api.network.vpc import VpcApi -from apps.api.network.vpc import VpcBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "cidr", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "cidr"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "cidr", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - cidr = data.pop("cidr", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "cidr": cidr} - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - create_data=create_data, - asset_id=asset_id, - resource_id=resource_id, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class VPCController(BackendController): - allow_methods = ('GET', 'POST') - resource = VpcApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "zone", "name", "cidr", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class VPCIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = VpcApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class VPCAddController(BackendAddController): - allow_methods = ("POST",) - resource = VpcBackendApi() - - -class VPCDeleteController(BackendDeleteController): - name = "VPC" - resource_describe = "VPC" - allow_methods = ("POST",) - resource = VpcBackendApi() - - -class VPCSourceController(BackendSourceController): - name = "VPC" - resource_describe = "VPC" - allow_methods = ("POST",) - resource = VpcBackendApi() diff --git a/apps/controller/region/__init__.py b/apps/controller/region/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/region/az_controller.py b/apps/controller/region/az_controller.py deleted file mode 100644 index 1c1d2ef3..00000000 --- a/apps/controller/region/az_controller.py +++ /dev/null @@ -1,240 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from apps.api.configer.region import RegionObject -from apps.api.configer.region import ZoneObject -from apps.api.configer.provider import ProviderObject - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "name", "region_id", - "region", "asset_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["asset_id", "provider", "region_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "provider", "asset_id", "region_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - name = data.pop("name", None) - asset_id = data.pop("asset_id", None) - provider = data.pop("provider", None) - region = data.pop("region_id", None) or data.pop("region", None) - - ProviderObject().provider_name_object(provider) - RegionObject().region_object(region) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"id": rid, - "name": name, - "region": region, - "asset_id": asset_id, - "provider": provider, - "extend_info": json.dumps(extend_info), - } - - return resource.create(create_data) - - -class ZoneController(BackendController): - allow_methods = ('GET', 'POST') - resource = ZoneObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "name", 'asset_id', "region"]) - count, res = self.resource.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - result = [] - for x_res in res: - x_res["region_id"] = x_res["region"] - result.append(x_res) - - return count, res - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - return ResBase.create(resource=self.resource, data=data) - - -class ZoneIdController(BackendIdController): - allow_methods = ('GET', 'PATCH', 'DELETE') - resource = ZoneObject() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.show(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["provider", "name", "asset_id", "extend_info", "region", "region_id"]) - ResBase.validate_keys(data) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - - if "provider" in data.keys(): - ProviderObject().provider_name_object(data.get("provider")) - - if "region" in data.keys(): - RegionObject().region_object(data.get("region")) - - if data.get("extend_info") is not None: - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - data["extend_info"] = json.dumps(extend_info) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) - - -class ZoneAddController(BaseController): - allow_methods = ("POST",) - resource = ZoneObject() - - def before_handler(self, request, data, **kwargs): - ResBase.not_null(data) - ResBase.validate_keys(data) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - rid = data.get("id", None) - data.pop("secret", None) - asset_id = data.get("asset_id") - if rid: - if self.resource.show(rid): - if "extend_info" in data.keys(): - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - data["extend_info"] = json.dumps(extend_info) - - if "provider" in data.keys(): - ProviderObject().provider_name_object(data.get("provider")) - - if "region_id" in data.keys(): - RegionObject().region_object(data.get("region_id")) - data["region"] = data.pop("region_id", None) - - self.resource.update(rid, data) - return {"id": rid, "asset_id": asset_id} - - count, res = ResBase.create(resource=self.resource, data=data) - return {"id": res, "asset_id": asset_id} - - -class ZoneDeleteController(BaseController): - name = "Zone" - resource_describe = "Zone" - allow_methods = ("POST",) - resource = ZoneObject() - - def before_handler(self, request, data, **kwargs): - validation.not_allowed_null(data=data, - keys=["id"] - ) - - validation.validate_string("id", data.get("id")) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - rid = data.pop("id", None) - result = self.resource.delete(rid) - return {"id": result} - - -class ZoneSourceController(BaseController): - name = "Zone" - resource_describe = "Zone" - allow_methods = ("POST",) - resource = ZoneObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - # validation.allowed_key(data, ["id", "provider", "name", 'asset_id', "region"]) - return self.resource.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - pass - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - query_data = {} - for key in ["id", "provider", "name", 'asset_id', "region_id"]: - if data.get(key): - if key == "region_id": - query_data["region"] = data.get(key) - else: - query_data[key] = data.get(key) - - orderby = data.get("orderby") - page = data.get("page", 0) - pagesize = data.get("pagesize", 1000) - - count, result = self.list(request, data=query_data, - orderby=orderby, page=page, - pagesize=pagesize, **kwargs) - res = [] - for x in result: - x["region_id"] = x["region"] - res.append(x) - - return res diff --git a/apps/controller/region/region_controller.py b/apps/controller/region/region_controller.py deleted file mode 100644 index f57acffd..00000000 --- a/apps/controller/region/region_controller.py +++ /dev/null @@ -1,197 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from apps.api.configer.region import RegionObject -from apps.api.configer.provider import ProviderObject - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "name", "asset_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["asset_id", "provider"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "provider", "asset_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - name = data.pop("name", None) - asset_id = data.pop("asset_id", None) - provider = data.pop("provider", None) - - ProviderObject().provider_name_object(provider) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"id": rid, - "name": name, - "asset_id": asset_id, - "provider": provider, - "extend_info": json.dumps(extend_info), - } - - return resource.create(create_data) - - -class RegionController(BackendController): - allow_methods = ('GET', 'POST') - resource = RegionObject() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "name", 'asset_id']) - return self.resource.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - return ResBase.create(resource=self.resource, data=data) - - -class RegionIdController(BackendIdController): - allow_methods = ('GET', 'PATCH', 'DELETE') - resource = RegionObject() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.show(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["provider", "name", "asset_id", "extend_info"]) - ResBase.validate_keys(data) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - - if "provider" in data.keys(): - ProviderObject().provider_name_object(data.get("provider")) - - if data.get("extend_info") is not None: - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - data["extend_info"] = json.dumps(extend_info) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.delete(rid) - - -class RegionAddController(BaseController): - allow_methods = ("POST",) - resource = RegionObject() - - def before_handler(self, request, data, **kwargs): - ResBase.not_null(data) - ResBase.validate_keys(data) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - rid = data.get("id", None) - data.pop("secret", None) - asset_id = data.get("asset_id") - if rid: - if self.resource.show(rid): - if "extend_info" in data.keys(): - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - data["extend_info"] = json.dumps(extend_info) - - if "provider" in data.keys(): - ProviderObject().provider_name_object(data.get("provider")) - - self.resource.update(rid, data) - return {"id": rid, "asset_id": asset_id} - - count, res = ResBase.create(resource=self.resource, data=data) - return {"id": res, "asset_id": asset_id} - - -class RegionDeleteController(BaseController): - name = "Region" - resource_describe = "Region" - allow_methods = ("POST",) - resource = RegionObject() - - def before_handler(self, request, data, **kwargs): - validation.not_allowed_null(data=data, - keys=["id"] - ) - - validation.validate_string("id", data.get("id")) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - rid = data.pop("id", None) - result = self.resource.delete(rid) - return {"id": result} - - -class RegionSourceController(BaseController): - name = "Region" - resource_describe = "Region" - allow_methods = ("POST",) - resource = RegionObject() - - def before_handler(self, request, data, **kwargs): - pass - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - query_data = {} - for key in ["id", "provider", "name", 'asset_id']: - if data.get(key): - query_data[key] = data.get(key) - - orderby = data.get("orderby") - page = data.get("page", 0) - pagesize = data.get("pagesize", 1000) - - count, result = self.resource.list(filters=query_data, page=page, - pagesize=pagesize, orderby=orderby) - return result diff --git a/apps/controller/region/route.py b/apps/controller/region/route.py deleted file mode 100644 index 8b3c717d..00000000 --- a/apps/controller/region/route.py +++ /dev/null @@ -1,19 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url -import region_controller -import az_controller - -urlpatterns = [ - url(r'^region$', region_controller.RegionController()), - url(r'^region/(?P[\w-]+)$', region_controller.RegionIdController()), - url(r'^backend/region/apply$', region_controller.RegionAddController()), - url(r'^backend/region/destroy$', region_controller.RegionDeleteController()), - url(r'^backend/region/source$', region_controller.RegionSourceController()), - - url(r'^zone$', az_controller.ZoneController()), - url(r'^zone/(?P[\w-]+)$', az_controller.ZoneIdController()), - url(r'^backend/zone/apply$', az_controller.ZoneAddController()), - url(r'^backend/zone/destroy$', az_controller.ZoneDeleteController()), - url(r'^backend/zone/source$', az_controller.ZoneSourceController()), -] diff --git a/apps/controller/source_controller.py b/apps/controller/source_controller.py deleted file mode 100644 index 28875696..00000000 --- a/apps/controller/source_controller.py +++ /dev/null @@ -1,228 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from lib.uuid_util import get_uuid -from lib.logs import logger -from core import validation -from core.controller import BaseController -from core.response_hooks import format_string -from core import local_exceptions as exception_common -from apps.controller.configer.model_args import source_columns_outputs -from apps.api.configer.region import ZoneApi -from apps.api.apibase_backend import ApiBackendBase - - -def filter_action_output(out_datas, filters): - res = [] - for out_data in out_datas: - for key, value in filters.items(): - s_value = out_data.get(key) - if isinstance(s_value, (basestring, int, float)): - res.append({value: s_value}) - elif isinstance(s_value, list): - for s in s_value: - res.append({value: s}) - else: - logger.info("output value not string/int/list, skip ...") - - return res - - -class BaseSourceController(BaseController): - name = "" - resource_describe = "" - allow_methods = ("POST",) - resource = None - - def before_handler(self, request, data, **kwargs): - # validation.allowed_key(data, ["id", "resource_id", "provider", - # "secret", "region", "zone", "ignore_ids"]) - - validation.not_allowed_null(data=data, - keys=["provider", "region"] - ) - - validation.validate_collector(data=data, - strings=["id", "resource_id", "provider", - "secret", "region", "zone"], - lists=["ignore_ids"]) - - def response_templete(self, data): - return {} - - def fetch_source(self, rid, provider, region, zone, secret, resource_id, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param resource_id: - :param kwargs: - :return: - ''' - query_args = {} - for key, value in kwargs.items(): - if value is not None: - if not isinstance(value, (basestring, int, bool, float)): - raise ValueError("查询条件需为字符串/数字/布尔值") - - if value != '': - query_args[key] = value - - return self.resource.get_remote_source(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id, - **query_args) - - def return_relation_keys(self): - return [] - - def one_query(self, rid, provider, region, zone, secret, - resource_id, ignore_ids, **kwargs): - ''' - - :param rid: - :param provider: - :param region: - :param zone: - :param secret: - :param resource_id: - :param ignore_ids: - :param kwargs: - :return: - ''' - - result = self.fetch_source(rid=rid, provider=provider, region=region, zone=zone, - secret=secret, resource_id=resource_id, - **kwargs) - result_data = [] - - register_zones = ZoneApi().region_zones(region, provider) - - for x_result in result: - x_res = source_columns_outputs(self.resource.resource_name) - x_res.update(x_result) - res = {"region": region, "secret": secret, "provider": provider} - - if x_res.get("resource_id") in ignore_ids: - continue - - if x_res.get("zone") and (x_res.get("zone") not in register_zones): - logger.info("resource: %s ,zone: %s searched not in register zone, skip it" % ( - x_res.get("resource_id"), x_res.get("zone"))) - if x_res.get("x_ora_zone") and (x_res.get("x_ora_zone") not in register_zones): - continue - - for s_key in self.return_relation_keys(): - if x_res.get(s_key): - continue - else: - x_res[s_key] = kwargs.get(s_key) if kwargs.get(s_key) else "" - - x_res.pop("x_ora_zone", None) - - for x, value in x_res.items(): - if isinstance(value, dict): - res[x] = format_string(value) - else: - if value is None: - res[x] = '' - else: - res[x] = str(value) - - result_data.append(res) - - return result_data - - def _main_query(self, request, data, **kwargs): - rid = data.pop("id", None) - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - resource_id = data.pop("resource_id", None) - ignore_ids = data.pop("ignore_ids", None) - - if resource_id: - resource_id = resource_id.strip() - if resource_id.startswith("[") and resource_id.endswith("]"): - resource_id = eval(resource_id) - - if resource_id == "*": - resource_id = None - - if ignore_ids: - ignore_ids = ignore_ids.strip() - if ignore_ids.startswith("[") and ignore_ids.endswith("]"): - ignore_ids = eval(ignore_ids) - else: - ignore_ids = [ignore_ids] - else: - ignore_ids = [] - - if resource_id: - result = [] - if isinstance(resource_id, basestring): - result = self.one_query(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, resource_id=resource_id, - ignore_ids=ignore_ids, **data) - elif isinstance(resource_id, list): - for r_resource_id in resource_id: - result += self.one_query(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, resource_id=r_resource_id, - ignore_ids=ignore_ids, **data) - else: - raise ValueError("resource id error, please check") - - # return {"datas": result} - return result - - result_data = self.one_query(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, resource_id=resource_id, - ignore_ids=ignore_ids, **data) - return result_data - - def main_response(self, request, data, **kwargs): - rid = data.pop("id", None) - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - resource_id = data.pop("resource_id", None) - ignore_ids = data.pop("ignore_ids", None) - - if not data: - resource_config = self.resource.resource_info() - if resource_config.get("pre_action"): - pre_action = resource_config.get("pre_action") - pre_action = pre_action.strip() - action_define = pre_action.split(".") - actionclient = ApiBackendBase(resource_name=action_define[0], resource_workspace=action_define[0]) - action_result = actionclient.get_remote_source(rid=None, provider=provider, - region=region, zone=zone, - secret=secret, resource_id=None) - output_filters = filter_action_output(action_result, filters=resource_config.get("pre_action_output")) - result = [] - for output_filter in output_filters: - source_args = {"id": rid, "secret": secret, "region": region, - "zone": zone, "provider": provider, - "resource_id": resource_id, "ignore_ids": ignore_ids} - source_args.update(output_filter) - data.update(source_args) - x_result = self._main_query(request, data, **kwargs) - result += x_result - - return result - - data.update({"id": rid, "secret": secret, "region": region, - "zone": zone, "provider": provider, - "resource_id": resource_id, "ignore_ids": ignore_ids}) - return self._main_query(request, data, **kwargs) - diff --git a/apps/controller/storage/__init__.py b/apps/controller/storage/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/storage/bucket_object_controller.py b/apps/controller/storage/bucket_object_controller.py deleted file mode 100644 index be879dcd..00000000 --- a/apps/controller/storage/bucket_object_controller.py +++ /dev/null @@ -1,155 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.storage.bucket_object import BucketObjectApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "bucket_id", - "key", "content", "source"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "bucket_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "bucket_id", - "key", "content", "source"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - bucket_id = data.pop("bucket_id", None) - key = data.pop("appid", None) - content = data.pop("content", None) - source = data.pop("source", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - if source is None and content is None: - raise ValueError("source 和 content 不能同时为null") - - create_data = {"name": name, "bucket_id": bucket_id, "key": key, - "content": content, "source": source} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class BucketObjectController(BackendController): - allow_methods = ('GET', 'POST') - resource = BucketObjectApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "bucket_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class BucketObjectIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = BucketObjectApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class BucketObjectAddController(BackendAddController): - allow_methods = ("POST",) - resource = BucketObjectApi() - - def before_handler(self, request, data, **kwargs): - ResBase.not_null(data) - ResBase.validate_keys(data) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return res - - -class BucketObjectDeleteController(BackendDeleteController): - name = "BucketObject" - resource_describe = "BucketObject" - allow_methods = ("POST",) - resource = BucketObjectApi() - - def before_handler(self, request, data, **kwargs): - validation.not_allowed_null(data=data, - keys=["id"] - ) - - validation.validate_string("id", data.get("id")) - - def response_templete(self, data): - return {} - - def main_response(self, request, data, **kwargs): - rid = data.pop("id", None) - result = self.resource.destroy(rid) - return {"id": result} diff --git a/apps/controller/storage/disk_attach_controller.py b/apps/controller/storage/disk_attach_controller.py deleted file mode 100644 index 76db65de..00000000 --- a/apps/controller/storage/disk_attach_controller.py +++ /dev/null @@ -1,136 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.storage.disk_attach import DiskAttachApi -from apps.api.storage.disk_attach import DiskAttachBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "disk_id", "instance_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "disk_id", "instance_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "disk_id", - "instance_id", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - disk_id = data.pop("disk_id", None) - instance_id = data.pop("instance_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "disk_id": disk_id, "instance_id": instance_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))} - return res, result - - -class DiskAttachController(BackendController): - allow_methods = ('GET', 'POST') - resource = DiskAttachApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "disk_id", "instance_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class DiskAttachIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = DiskAttachApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.detach(rid) - - -class DiskAttachAddController(BackendAddController): - allow_methods = ("POST",) - resource = DiskAttachBackendApi() - - -class DiskDetachController(BaseController): - name = "DiskDetach" - resource_describe = "DiskDetach" - allow_methods = ("POST",) - resource = DiskAttachBackendApi() - - -class RTRuleSourceController(BackendSourceController): - name = "DiskDetach" - resource_describe = "DiskDetach" - allow_methods = ("POST",) - resource = DiskAttachBackendApi() diff --git a/apps/controller/storage/disk_controller.py b/apps/controller/storage/disk_controller.py deleted file mode 100644 index d6e22224..00000000 --- a/apps/controller/storage/disk_controller.py +++ /dev/null @@ -1,135 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from lib.uuid_util import get_uuid -from apps.api.storage.disk import DiskApi -from apps.api.storage.disk import DiskBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "type", "size", "extend_info", "charge_type"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", "type", "size"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "type", "secret", "charge_type"], - ints=["size"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - disktype = data.pop("type", None) - size = int(data.pop("size", None)) - charge_type = data.pop("charge_type", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "type": disktype, "size": size, "charge_type": charge_type} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class DiskController(BackendController): - allow_methods = ('GET', 'POST') - resource = DiskApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "zone", "type", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class DiskIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = DiskApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class DiskAddController(BackendAddController): - allow_methods = ("POST",) - resource = DiskBackendApi() - - -class DiskDeleteController(BackendDeleteController): - name = "Disk" - resource_describe = "Disk" - allow_methods = ("POST",) - resource = DiskBackendApi() - - -class DiskSourceController(BackendSourceController): - name = "Disk" - resource_describe = "Disk" - allow_methods = ("POST",) - resource = DiskBackendApi() diff --git a/apps/controller/storage/object_storage_controller.py b/apps/controller/storage/object_storage_controller.py deleted file mode 100644 index 42263c74..00000000 --- a/apps/controller/storage/object_storage_controller.py +++ /dev/null @@ -1,135 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.storage.object_storage import ObjectStorageApi -from apps.api.storage.object_storage import ObjectStorageBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "acl", "appid", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "acl", "appid", "secret"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - acl = data.pop("acl", None) - appid = data.pop("appid", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "acl": acl, "appid": appid} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "url": result.get("url", ""), - "resource_id": str(result.get("resource_id"))[:64]} - return res, result - - -class ObjectStorageController(BackendController): - allow_methods = ('GET', 'POST') - resource = ObjectStorageApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "url", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class ObjectStorageIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = ObjectStorageApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class ObjectStorageAddController(BackendAddController): - allow_methods = ("POST",) - resource = ObjectStorageBackendApi() - - -class ObjectStorageDeleteController(BackendDeleteController): - name = "ObjectStorage" - resource_describe = "ObjectStorage" - allow_methods = ("POST",) - resource = ObjectStorageBackendApi() - - -class OSSSourceController(BackendSourceController): - name = "ObjectStorage" - resource_describe = "ObjectStorage" - allow_methods = ("POST",) - resource = ObjectStorageBackendApi() diff --git a/apps/controller/storage/route.py b/apps/controller/storage/route.py deleted file mode 100644 index 50a7f4a9..00000000 --- a/apps/controller/storage/route.py +++ /dev/null @@ -1,26 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url -import disk_controller -import object_storage_controller -import disk_attach_controller - -urlpatterns = [ - url(r'^disk$', disk_controller.DiskController()), - url(r'^disk/(?P[\w-]+)$', disk_controller.DiskIdController()), - url(r'^backend/disk/apply$', disk_controller.DiskAddController()), - url(r'^backend/disk/destroy$', disk_controller.DiskDeleteController()), - url(r'^backend/disk/source$', disk_controller.DiskSourceController()), - - url(r'^disk_attach$', disk_attach_controller.DiskAttachController()), - url(r'^disk_attach/(?P[\w-]+)$', disk_attach_controller.DiskAttachIdController()), - url(r'^backend/disk/apply', disk_attach_controller.DiskAttachAddController()), - url(r'^backend/disk/destroy', disk_attach_controller.DiskDetachController()), - - url(r'^object_storage$', object_storage_controller.ObjectStorageController()), - url(r'^object_storage/(?P[\w-]+)$', object_storage_controller.ObjectStorageIdController()), - url(r'^backend/object_storage/apply$', object_storage_controller.ObjectStorageAddController()), - url(r'^backend/object_storage/destroy$', object_storage_controller.ObjectStorageDeleteController()), - url(r'^backend/object_storage/source$', object_storage_controller.OSSSourceController()), - -] diff --git a/apps/controller/vm/__init__.py b/apps/controller/vm/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/apps/controller/vm/eni_attach_controller.py b/apps/controller/vm/eni_attach_controller.py deleted file mode 100644 index b8424890..00000000 --- a/apps/controller/vm/eni_attach_controller.py +++ /dev/null @@ -1,139 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.vm.eni_attach import ENIAttachApi -from apps.api.vm.eni_attach import ENIAttachBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "network_interface_id", - "instance_id", "extend_info"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "network_interface_id", "instance_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "network_interface_id", - "instance_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - network_interface_id = data.pop("network_interface_id", None) - instance_id = data.pop("instance_id", None) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - create_data = {"name": name, "instance_id": instance_id, - "network_interface_id": network_interface_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64], - "instance_id": instance_id} - return res, result - - -class EniAttachController(BackendController): - allow_methods = ('GET', 'POST') - resource = ENIAttachApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "network_interface_id", "instance_id", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class EniAttachIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = ENIAttachApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.detach(rid) - - -class EniAttachAddController(BackendAddController): - allow_methods = ("POST",) - resource = ENIAttachBackendApi() - - -class EniDetachController(BaseController): - name = "EniDetach" - resource_describe = "EniDetach" - allow_methods = ("POST",) - resource = ENIAttachBackendApi() - - -class ENIAttachSourceController(BackendSourceController): - name = "VPC" - resource_describe = "VPC" - allow_methods = ("POST",) - resource = ENIAttachBackendApi() diff --git a/apps/controller/vm/eni_controller.py b/apps/controller/vm/eni_controller.py deleted file mode 100644 index 1b4ec24d..00000000 --- a/apps/controller/vm/eni_controller.py +++ /dev/null @@ -1,156 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from core.controller import BaseController -from lib.uuid_util import get_uuid -from apps.api.vm.eni import EniApi -from apps.api.vm.eni import EniBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "ipaddress", "subnet_id", - "vpc_id", "security_group_id", ]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "zone", "name", "subnet_id"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", - "ipaddress", "subnet_id", - "vpc_id"], - lists=["security_group_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - vpc_id = data.pop("vpc_id", None) - ipaddress = data.pop("ipaddress", None) - security_group_id = validation.validate_list("security_group_id", data.pop("security_group_id", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - - create_data = {"name": name, "subnet_id": subnet_id, - "vpc_id": vpc_id, "ipaddress": ipaddress, - "security_group_id": security_group_id} - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64], - "ipaddress": result.get("ipaddress")} - return res, result - - -class EniController(BackendController): - allow_methods = ('GET', 'POST') - resource = EniApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "zone", "ipaddress", "enabled"]) - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class EniIdController(BackendIdController): - allow_methods = ('GET', 'DELETE') - resource = EniApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.destroy(rid) - - -class EniAddController(BackendAddController): - allow_methods = ("POST",) - resource = EniBackendApi() - - -class EniDeleteController(BackendDeleteController): - name = "Eni" - resource_describe = "Eni" - allow_methods = ("POST",) - resource = EniBackendApi() - - -class ENISourceController(BackendSourceController): - name = "Eni" - resource_describe = "Eni" - allow_methods = ("POST",) - resource = EniBackendApi() - - -class ENISGSourceController(BackendSourceController): - name = "Eni" - resource_describe = "Eni" - allow_methods = ("POST",) - resource = EniBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id): - return self.resource.sg_eni_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/vm/instance_controller.py b/apps/controller/vm/instance_controller.py deleted file mode 100644 index 94a76c75..00000000 --- a/apps/controller/vm/instance_controller.py +++ /dev/null @@ -1,269 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from core import validation -from core import local_exceptions -from core.controller import BackendController -from core.controller import BaseController -from core.controller import BackendIdController -from lib.uuid_util import get_uuid -from apps.api.vm.instance import InstanceApi -from apps.api.vm.instance import InstanceBackendApi -from apps.controller.backend_controller import BackendAddController -from apps.controller.backend_controller import BackendDeleteController -from apps.controller.backend_controller import BackendSourceController - - -class ResBase(object): - @classmethod - def allow_key(cls, data): - validation.allowed_key(data, ["id", "provider", "secret", "region", "zone", - "name", "extend_info", "subnet_id", - "hostname", "image", "instance_type", - "disk_type", "disk_size", "password", "power_action", - "security_group_id", "vpc_id", "data_disks", - "charge_type"]) - - @classmethod - def allow_upgrade_key(cls, data): - if not data: - raise ValueError("没有需要更新的配置") - - validation.allowed_key(data, ["name", "instance_type", "image", - "extend_info", "security_group_id"]) - - @classmethod - def not_null(cls, data): - validation.not_allowed_null(data=data, - keys=["region", "provider", "name", - "subnet_id", "image", - "instance_type"] - ) - - @classmethod - def validate_keys(cls, data): - validation.validate_collector(data=data, - strings=["id", "name", "region", "zone", - "provider", "secret", "subnet_id", - "hostname", "image", "instance_type", - "disk_type", "password", "power_action", - "security_group_id", "vpc_id", - "charge_type"], - ints=["disk_size"], - dicts=["extend_info", "data_disks"]) - - @classmethod - def validate_upgrade_keys(cls, data): - validation.validate_collector(data=data, - strings=["name", "instance_type", - "image", "security_group_id"], - dicts=["extend_info"]) - - @classmethod - def create(cls, resource, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - secret = data.pop("secret", None) - region = data.pop("region", None) - zone = data.pop("zone", None) - provider = data.pop("provider", None) - name = data.pop("name", None) - subnet_id = data.pop("subnet_id", None) - hostname = data.pop("hostname", None) - image = data.pop("image", None) - disk_type = data.pop("disk_type") - disk_size = data.pop("disk_size", 40) - instance_type = data.pop("instance_type", None) - password = data.pop("password", None) - vpc_id = data.pop("vpc_id", None) - charge_type = data.pop("charge_type", None) - security_group_id = data.pop("security_group_id", None) - data_disks = validation.validate_dict("data_disks", data.pop("data_disks", None)) - - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - data.update(extend_info) - - asset_id = data.pop("asset_id", None) - resource_id = data.pop("resource_id", None) - action = data.pop("power_action", None) - - d = dict(hostname=hostname, image=image, - instance_type=instance_type, - password=password, vpc_id=vpc_id, - security_group_id=security_group_id, - data_disks=data_disks, - disk_type=disk_type, - disk_size=disk_size, - subnet_id=subnet_id, - charge_type=charge_type) - - create_data = {"name": name} - create_data.update(d) - if action: - create_data["power_action"] = action - - _, result = resource.create(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - asset_id=asset_id, - resource_id=resource_id, - create_data=create_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64], - "ipaddress": result.get("ipaddress"), - "cpu": result.get("cpu"), - "memory": result.get("memory")} - return res, result - - @classmethod - def update(cls, resource, data, **kwargs): - rid = kwargs.pop("rid", None) - name = data.pop("name", None) - instance_type = data.pop("instance_type") - image = data.pop("image") - security_group_id = data.pop("security_group_id", None) - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - - update_data = {} - if name: - update_data["name"] = name - if instance_type: - update_data["instance_type"] = instance_type - if security_group_id: - update_data["security_group_id"] = security_group_id - if image: - update_data["image"] = image - - data.update(extend_info) - _, result = resource.update(rid=rid, provider=None, - region=None, zone=None, - update_data=update_data, - extend_info=data) - - res = {"id": rid, "resource_id": str(result.get("resource_id"))[:64], - "ipaddress": result.get("ipaddress"), - "cpu": result.get("cpu"), - "memory": result.get("memory")} - - return res, result - - -class InstanceController(BackendController): - allow_methods = ('GET', 'POST') - resource = InstanceApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "region", 'resource_id', - "provider_id", "name", "enabled", - "hostname", "instance_type", "image", - "cpu", "memory", "ipaddress", - "disk_type", "disk_size", "power_state"]) - - return self.resource.resource_object.list(filters=data, page=page, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_key(data) - ResBase.not_null(data) - ResBase.validate_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.create(resource=self.resource, data=data) - return 1, res - - -class InstanceIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = InstanceApi() - - def show(self, request, data, **kwargs): - ''' - - :param request: - :param data: - :param kwargs: - :return: - ''' - - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - force_delete = data.get("force_delete", False) - return self.resource.destroy(rid, force_delete=force_delete) - - def before_handler(self, request, data, **kwargs): - ResBase.allow_upgrade_key(data) - ResBase.validate_upgrade_keys(data) - - def create(self, request, data, **kwargs): - res, _ = ResBase.update(resource=self.resource, data=data) - return 1, res - - -class InstanceActionController(BackendIdController): - allow_methods = ('PATCH',) - resource = InstanceApi() - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["action"]) - validation.not_allowed_null(data=data, - keys=["action"] - ) - - validation.validate_string("action", data["action"]) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - action = data.get("action", None) - if action.lower() == "start": - return self.resource.start(rid) - elif action.lower() == "stop": - return self.resource.stop(rid) - else: - raise local_exceptions.ValueValidateError("action", "VM 开关机操作,请使用合法值 start/stop") - - -class InstanceAddController(BackendAddController): - allow_methods = ("POST",) - resource = InstanceBackendApi() - - -class InstanceDeleteController(BackendDeleteController): - name = "Instance" - resource_describe = "Instance" - allow_methods = ("POST",) - resource = InstanceBackendApi() - - -class InstanceSourceController(BackendSourceController): - name = "Instance" - resource_describe = "Instance" - allow_methods = ("POST",) - resource = InstanceBackendApi() - - -class InstanceSGSourceController(BackendSourceController): - name = "Instance" - resource_describe = "Instance" - allow_methods = ("POST",) - resource = InstanceBackendApi() - - def fetch_source(self, rid, provider, region, zone, secret, resource_id): - return self.resource.sg_vm_relationship(rid=rid, provider=provider, - region=region, zone=zone, - secret=secret, - resource_id=resource_id) diff --git a/apps/controller/vm/instance_type_controller.py b/apps/controller/vm/instance_type_controller.py deleted file mode 100644 index 0e98f006..00000000 --- a/apps/controller/vm/instance_type_controller.py +++ /dev/null @@ -1,111 +0,0 @@ -# _ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from lib.uuid_util import get_uuid -from core import validation -from core.controller import BackendController -from core.controller import BackendIdController -from apps.api.vm.instance_type import InstanceTypeApi - - -class InstanceTypeController(BackendController): - allow_methods = ('GET', 'POST') - resource = InstanceTypeApi() - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - ''' - - :param request: - :param data: - :param orderby: - :param page: - :param pagesize: - :param kwargs: - :return: - ''' - - validation.allowed_key(data, ["id", "provider", "origin_name", "cpu", "memory", - "provider_id", "type", "name", "enabled"]) - - filter_string = None - for key in ["origin_name", "provider", "name", "provider_id", "type"]: - if data.get(key): - if filter_string: - filter_string += 'and ' + key + " like '%" + data.get(key) + "%' " - else: - filter_string = key + " like '%" + data.get(key) + "%' " - data.pop(key, None) - - return self.resource.resource_object.list(filters=data, page=page, - filter_string=filter_string, - pagesize=pagesize, orderby=orderby) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["id", "name", "provider", "origin_name", - "type", "cpu", "memory", "network", "extend_info"]) - validation.not_allowed_null(data=data, - keys=["provider", "name", "cpu", "memory"] - ) - - validation.validate_string("id", data.get("id")) - validation.validate_string("name", data["name"]) - validation.validate_string("origin_name", data.get("origin_name")) - validation.validate_int("cpu", data.get("cpu")) - validation.validate_int("memory", data["memory"]) - validation.validate_string("network", data.get("network")) - validation.validate_string("type", data.get("type")) - validation.validate_string("provider", data.get("provider")) - validation.validate_dict("extend_info", data.get("extend_info")) - - def create(self, request, data, **kwargs): - rid = data.pop("id", None) or get_uuid() - name = data.pop("name", None) - origin_name = data.pop("origin_name", None) - cpu = data.pop("cpu", None) - memory = data.pop("memory", None) - network = data.pop("network", None) - x_type = data.pop("type", None) - provider = data.pop("provider", None) - extend_info = validation.validate_dict("extend_info", data.pop("extend_info", None)) - - data.update(extend_info) - return self.resource.create(rid, name, provider, origin_name, - cpu, memory, network, x_type, extend_info=data) - - -class InstanceTypeIdController(BackendIdController): - allow_methods = ('GET', 'DELETE', 'PATCH') - resource = InstanceTypeApi() - - def show(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.resource_object.show(rid) - - def before_handler(self, request, data, **kwargs): - validation.allowed_key(data, ["name", "provider", "origin_name", "type", - "cpu", "memory", "network", "extend_info"]) - - validation.validate_string("id", data.get("id")) - validation.validate_string("name", data.get("name")) - validation.validate_string("origin_name", data.get("origin_name")) - validation.validate_int("cpu", data.get("cpu")) - validation.validate_int("memory", data.get("memory")) - validation.validate_string("type", data.get("type")) - validation.validate_string("network", data.get("network")) - validation.validate_string("provider", data.get("provider")) - validation.validate_dict("extend_info", data.get("extend_info")) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - - if data.get("extend_info") is not None: - extend_info = validation.validate_dict("extend_info", data.get("extend_info")) - data["extend_info"] = json.dumps(extend_info) - - return self.resource.update(rid, data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.resource_object.delete(rid) diff --git a/apps/controller/vm/route.py b/apps/controller/vm/route.py deleted file mode 100644 index 50a501b4..00000000 --- a/apps/controller/vm/route.py +++ /dev/null @@ -1,35 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url -import instance_controller -import instance_type_controller -import eni_controller -import eni_attach_controller - -urlpatterns = [ - url(r'^instance$', instance_controller.InstanceController()), - url(r'^instance/(?P[\w-]+)$', instance_controller.InstanceIdController()), - url(r'^instance_action/(?P[\w-]+)$', instance_controller.InstanceActionController()), - url(r'^backend/instance/apply$', instance_controller.InstanceAddController()), - url(r'^backend/instance/destroy$', instance_controller.InstanceDeleteController()), - # url(r'^backend/instance/update$', instance_controller.InstanceUpdateController()), - # url(r'^backend/instance/action$', instance_controller.InstanceStartController()), - url(r'^backend/instance/source$', instance_controller.InstanceSourceController()), - url(r'^backend/instance/security_group/source$', instance_controller.InstanceSGSourceController()), - - url(r'^instance_type$', instance_type_controller.InstanceTypeController()), - url(r'^instance_type/(?P[\w-]+)$', instance_type_controller.InstanceTypeIdController()), - - url(r'^network_interface$', eni_controller.EniController()), - url(r'^network_interface/(?P[\w-]+)$', eni_controller.EniIdController()), - url(r'^backend/network_interface/apply$', eni_controller.EniAddController()), - url(r'^backend/network_interface/destroy$', eni_controller.EniDeleteController()), - url(r'^backend/network_interface/source$', eni_controller.ENISourceController()), - url(r'^backend/network_interface/security_group/source$', eni_controller.ENISGSourceController()), - - url(r'^network_interface_attach$', eni_attach_controller.EniAttachController()), - url(r'^network_interface_attach/(?P[\w-]+)$', eni_attach_controller.EniAttachIdController()), - url(r'^backend/network_interface/apply$', eni_attach_controller.EniAttachAddController()), - url(r'^backend/network_interface/destroy$', eni_attach_controller.EniDetachController()), - -] diff --git a/apps/route.py b/apps/route.py deleted file mode 100644 index 608474c1..00000000 --- a/apps/route.py +++ /dev/null @@ -1,14 +0,0 @@ -# _ coding:utf-8 _*_ - -from django.conf.urls import include, url - -urlpatterns = [ - # url(r'^test/', include('apps.controller.test.route', namespace='test')), - url(r'^configer/', include('apps.controller.configer.route', namespace='configer')), - url(r'^network/', include('apps.controller.network.route', namespace='network')), - url(r'^vm/', include('apps.controller.vm.route', namespace='vm')), - url(r'^az/', include('apps.controller.region.route', namespace='az')), - url(r'^storage/', include('apps.controller.storage.route', namespace='storage')), - url(r'^loadbalance/', include('apps.controller.loadbalance.route', namespace='lb')), - url(r'^database/', include('apps.controller.database.route', namespace='database')), -] diff --git a/bin/__init__.py b/bin/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/bin/start.sh b/bin/start.sh deleted file mode 100644 index 5b39aefa..00000000 --- a/bin/start.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/bash - -# start wecube plugins terraform -echo "terraform plugins starting " -pidfile=bin/terraform.pid -logfile=logs/err.log - -SCRIPT_PATH="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" -cd ${SCRIPT_PATH}; - -mkdir ../logs -cd .. - -sed -i "s/true/false/g" conf/application.conf -echo "gunicorn starting process. wating ... " -gunicorn -c gunicorn.conf wecube_plugins_terraform.wsgi:application -t 900 --pid $pidfile --error-logfile $logfile --log-level info - -echo "done" diff --git a/bin/stop.sh b/bin/stop.sh deleted file mode 100644 index e77d4656..00000000 --- a/bin/stop.sh +++ /dev/null @@ -1,11 +0,0 @@ -#!/bin/bash - -# wecube plugins terraform stop - -echo "wecube plugins terraform will stop" -PID=`ps -ef | grep python | grep -v grep | awk -F " " '{print $2}'` - -echo "wating ..." -kill -9 $PID -echo "done" - diff --git a/build/build-server.sh b/build/build-server.sh new file mode 100755 index 00000000..6bc4a57a --- /dev/null +++ b/build/build-server.sh @@ -0,0 +1,4 @@ +#!/bin/bash +set -e -x +cd $(dirname $0)/../terraform-server +go build -ldflags "-linkmode external -extldflags -static -s" \ No newline at end of file diff --git a/build/build-ui-docker.sh b/build/build-ui-docker.sh new file mode 100755 index 00000000..805daa3e --- /dev/null +++ b/build/build-ui-docker.sh @@ -0,0 +1,5 @@ +#!/bin/bash +set -e -x +cd $(dirname $0)/../cmdb-ui +npm install +npm run plugin \ No newline at end of file diff --git a/build/build-ui.sh b/build/build-ui.sh new file mode 100755 index 00000000..6559fda3 --- /dev/null +++ b/build/build-ui.sh @@ -0,0 +1,11 @@ +#!/bin/bash +set -e -x +npm -v +if [ $? -eq 0 ] +then + cd $1/ui + npm install + npm run plugin +else + docker run --rm -v $1:/app/terraform --name terraform-node-build node:12.13.1 /bin/bash /app/terraform/build/build-ui-docker.sh +fi \ No newline at end of file diff --git a/build/default.json b/build/default.json new file mode 100644 index 00000000..0d1e9eb8 --- /dev/null +++ b/build/default.json @@ -0,0 +1,41 @@ +{ + "default_language": "en", + "http_server": { + "port": "8999", + "cross": true + }, + "log": { + "level": "{{TERRAFORM_LOG_LEVEL}}", + "log_dir": "logs", + "access_log_enable": true, + "db_log_enable": true, + "archive_max_size": 64, + "archive_max_backup": 10, + "archive_max_day": 15, + "compress": true + }, + "database": { + "server": "{{TERRAFORM_MYSQL_HOST}}", + "port": "{{TERRAFORM_MYSQL_PORT}}", + "user": "{{TERRAFORM_MYSQL_USER}}", + "password": "{{TERRAFORM_MYSQL_PWD}}", + "database": "{{TERRAFORM_MYSQL_SCHEMA}}", + "maxOpen": 50, + "maxIdle": 10, + "timeout": 60 + }, + "rsa_key_path": "/data/certs/rsa_key", + "wecube": { + "base_url": "{{GATEWAY_URL}}", + "jwt_signing_key": "{{JWT_SIGNING_KEY}}", + "sub_system_code": "{{SUB_SYSTEM_CODE}}", + "sub_system_key": "{{SUB_SYSTEM_KEY}}" + }, + "auth": { + "password_seed": "{{TERRAFORM_SECRET_KEY_SEED}}" + }, + "terraform_file_path": "/data/terraform/", + "terraform_cmd_path": "/usr/local/bin/terraform", + "terraform_provider_os_arch": "linux_amd64", + "version": "{{PLUGIN_VERSION}}" +} diff --git a/build/register.xml b/build/register.xml new file mode 100644 index 00000000..66d7dfb8 --- /dev/null +++ b/build/register.xml @@ -0,0 +1,48 @@ + + + + + + + + + + + /terraformIndex + /debuggerIndex + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/build/start.sh b/build/start.sh new file mode 100755 index 00000000..a748ae01 --- /dev/null +++ b/build/start.sh @@ -0,0 +1,21 @@ +#!/bin/bash + +sed -i "s~{{TERRAFORM_MYSQL_HOST}}~$TERRAFORM_MYSQL_HOST~g" /app/terraform/conf/default.json +sed -i "s~{{TERRAFORM_MYSQL_PORT}}~$TERRAFORM_MYSQL_PORT~g" /app/terraform/conf/default.json +sed -i "s~{{TERRAFORM_MYSQL_SCHEMA}}~$TERRAFORM_MYSQL_SCHEMA~g" /app/terraform/conf/default.json +sed -i "s~{{TERRAFORM_MYSQL_USER}}~$TERRAFORM_MYSQL_USER~g" /app/terraform/conf/default.json +sed -i "s~{{TERRAFORM_MYSQL_PWD}}~$TERRAFORM_MYSQL_PWD~g" /app/terraform/conf/default.json +sed -i "s~{{TERRAFORM_LOG_LEVEL}}~$TERRAFORM_LOG_LEVEL~g" /app/terraform/conf/default.json +sed -i "s~{{GATEWAY_URL}}~$GATEWAY_URL~g" /app/terraform/conf/default.json +sed -i "s~{{JWT_SIGNING_KEY}}~$JWT_SIGNING_KEY~g" /app/terraform/conf/default.json +sed -i "s~{{SUB_SYSTEM_CODE}}~$SUB_SYSTEM_CODE~g" /app/terraform/conf/default.json +sed -i "s~{{SUB_SYSTEM_KEY}}~$SUB_SYSTEM_KEY~g" /app/terraform/conf/default.json +sed -i "s~{{PLUGIN_VERSION}}~$PLUGIN_VERSION~g" /app/terraform/conf/default.json +sed -i "s~{{TERRAFORM_SECRET_KEY_SEED}}~$TERRAFORM_SECRET_KEY_SEED~g" /app/terraform/conf/default.json + +if [ ! -n "`ls /data/terraform/providers`" ] +then + cp -r /data/terraform/providers_tpl/* /data/terraform/providers/ +fi + +./terraform-server \ No newline at end of file diff --git a/build/stop.sh b/build/stop.sh new file mode 100755 index 00000000..c32eccb4 --- /dev/null +++ b/build/stop.sh @@ -0,0 +1,3 @@ +#!/bin/bash + +kill -9 `pidof terraform-server` \ No newline at end of file diff --git a/conf/__init__.py b/conf/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/conf/application.conf b/conf/application.conf deleted file mode 100644 index c70ddddf..00000000 --- a/conf/application.conf +++ /dev/null @@ -1,19 +0,0 @@ -[DEFAULT] -debug = true -serverport = 8999 - - -[LOG] -name = application.log -level = INFO -# unit of size is MB -max_size = 500 -backup_count = 3 -msg_max_len = 2048 - - -[DATABASE] -server = 127.0.0.1:3306 -username = terraform -password = qaz123456 -database = terraform \ No newline at end of file diff --git a/core/__init__.py b/core/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/core/auth.py b/core/auth.py deleted file mode 100644 index 02dd68d7..00000000 --- a/core/auth.py +++ /dev/null @@ -1,41 +0,0 @@ -# _*_ coding:utf-8 _*_ - -import base64 -import binascii -import traceback -import jwt -import local_exceptions -from lib.logs import logger -from wecube_plugins_terraform.settings import JWT_KEY, DEBUG - - -def b64decode_key(key): - new_key = key - max_padding = 3 - while max_padding > 0: - try: - return base64.b64decode(new_key) - except (binascii.Error, TypeError) as e: - new_key += '=' - max_padding -= 1 - if max_padding <= 0: - raise e - - -if not DEBUG: - jwt_key = b64decode_key(JWT_KEY) -else: - jwt_key = '' - - -def jwt_request(request): - _token = request.META.get("HTTP_AUTHORIZATION", None) - if _token: - try: - token = _token[len("Bearer "):] - return jwt.decode(token, jwt_key, verify=True) - except Exception, e: - logger.info(traceback.format_exc()) - raise local_exceptions.AuthExceptionError("认证失败") - else: - raise local_exceptions.AuthExceptionError("认证失败") diff --git a/core/backend_response.py b/core/backend_response.py deleted file mode 100644 index b289941a..00000000 --- a/core/backend_response.py +++ /dev/null @@ -1,298 +0,0 @@ -# _*_ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -import traceback - -from django.http import HttpResponse -from django.http import HttpResponseNotAllowed -from wecube_plugins_terraform.settings import DEBUG -from core import local_exceptions as exception_common -from core import validation -from core.validation import validate_column_line -from lib.classtools import get_all_class_for_module -from lib.json_helper import format_json_dumps -from lib.logs import logger -from lib.uuid_util import get_uuid -from .auth import jwt_request - -content_type = 'application/json;charset=utf-8' -exception_common_classes = get_all_class_for_module(exception_common) - - -class BackendResponse(object): - allow_methods = tuple() - requestId = "" - requestUser = "Unknown" - resource = None - - def list(self, request, data, orderby=None, page=None, pagesize=None, **kwargs): - return self.resource.list(data, orderby=orderby, page=page, pagesize=pagesize) - - def show(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.show(rid, where_and=data) - - def create(self, request, data, **kwargs): - return self.resource.create(data) - - def delete(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.delete(rid, where_and=data) - - def update(self, request, data, **kwargs): - rid = kwargs.pop("rid", None) - return self.resource.update(rid, data, where_and=data) - - def on_create(self, request, **kwargs): - try: - if not request.body: - return {} - data = json.loads(request.body) - except: - raise exception_common.RequestValidateError("请求参数不为json") - - self._validate_column(data) - - self.before_handler(request, data, **kwargs) - count, result = self.create(request, data, **kwargs) - return {"count": count, "data": result} - - def build_filters(self, data): - pagesize = data.pop("pagesize", None) or data.pop("__limit", None) - page = data.pop("page", 1) or data.pop("__offset", None) - oder_key = data.pop("oder_key", None) - oder_as = data.pop("oder_as", 'asc') - - if pagesize: - pagesize = validation.validate_int("pagesize", pagesize, min=1) - page = validation.validate_int("page", page, min=0) - page = page - 1 if page >= 1 else 1 - PAGINATION = {'pagesize': pagesize, 'page': page} - else: - PAGINATION = {} - - if oder_as not in ['asc', 'desc']: - raise exception_common.ValueValidateError(param="oder_as", msg=("非法值 %s, 允许值: asc 或 desc")) - if oder_key: - validate_column_line(oder_key) - ORDER = [[oder_key, oder_as]] - else: - ORDER = [] - - query_data = {} - for col, value in data.items(): - if col.startswith("__"): - continue - - if value: - query_data[col] = value - - res = {"orderby": ORDER, "data": query_data} - res.update(PAGINATION) - return res - - def on_get(self, request, **kwargs): - data = request.GET - data = self.build_filters(data.dict()) - count, res = self.list(request, **data) - - if isinstance(res, list): - result = [] - for t in res: - if isinstance(t, dict): - if t.get("define_json"): - t["define_json"]["provider"] = {} - - result.append(t) - - res = result - - return {"count": count, "data": res} - - def on_id_get(self, request, **kwargs): - if len(request.META.get("QUERY_STRING", "")) > 2048: - raise exception_common.DataToolangError(msg="请求URL过长") - - data = request.GET - res = self.show(request, data.dict(), **kwargs) - if not res: - raise exception_common.ResourceNotFoundError() - - if isinstance(res, dict): - if res.get("define_json"): - res["define_json"]["provider"] = {} - - return res - - def on_delete(self, request, **kwargs): - data = request.GET - res = self.delete(request, data.dict(), **kwargs) - if not res: - raise exception_common.ResourceNotFoundError() - return {"data": res} - - def on_patch(self, request, **kwargs): - try: - if not request.body: - return {} - data = json.loads(request.body) - except: - raise exception_common.RequestValidateError("请求参数不为json") - - for cid, value in data.items(): - validate_column_line(cid) - - self.before_handler(request, data, **kwargs) - count, res = self.update(request, data, **kwargs) - if not res: - raise exception_common.ResourceNotFoundError() - - if isinstance(res, dict): - if res.get("define_json"): - res["define_json"]["provider"] = {} - - return {"count": count, "data": res} - - def before_handler(self, request, data, **kwargs): - pass - - def _validate_column(self, data): - if isinstance(data, list): - raise exception_common.RequestValidateError("不支持的数据类型") - elif isinstance(data, dict): - for cid, value in data.items(): - validate_column_line(cid) - else: - raise exception_common.RequestValidateError("未知请求数据类型") - - def _trace_req(self, request): - try: - data = request.body if request.method.upper() in ['POST', 'PATCH'] else request.GET - if isinstance(data, (dict, list)): - data = format_json_dumps(data) - logger.info("[%s] [RE] [%s]- %s %s %s " % (self.requestId, self.requestUser, - request.method.upper(), request.path, data)) - except: - logger.info(traceback.format_exc()) - - def format_response(self, data): - return format_json_dumps({"status": "OK", "message": "OK", "code": 0, "data": data}) - - def format_err(self, errcode, errtype, errinfo, return_data=None): - if isinstance(errinfo, Exception): - errorMessage = "type: %s, info: %s" % (errtype, errinfo.message) - else: - errorMessage = "type: %s, info: %s" % (errtype, errinfo) - - msg = {"status": "ERROR", "message": errorMessage, "code": errcode, "data": None} - return json.dumps(msg, ensure_ascii=False) - - def request_response(self, request, **kwargs): - if request.method.upper() == "OPTIONS": - return HttpResponse(str(self.allow_methods)) - else: - if request.method.upper() in self.allow_methods: - self.requestId = "req_%s" % get_uuid() - self._trace_req(request) - res = self._request_response(request, **kwargs) - res.setdefault("ReqID", self.requestId) - self.trace_log(request, res) - return res - else: - return HttpResponseNotAllowed(self.allow_methods, - content=self.format_err(405, "HttpMethodsNotAllowed", - ",".join(self.allow_methods)), - content_type=content_type) - - def handler_http(self, request, **kwargs): - raise NotImplementedError() - - def _is_platform(self, jwt_info): - if ("SUB_SYSTEM" not in jwt_info.get("authority")) and ( - "ADMIN_TERRAFORM_CONFIG" not in jwt_info.get("authority")): - raise exception_common.AllowedForbidden("AllowedForbidden") - - def _request_response(self, request, **kwargs): - try: - if not DEBUG: - jwt_info = jwt_request(request) - self._is_platform(jwt_info) - self.requestUser = jwt_info.get("sub") - - res = HttpResponse(content=self.handler_http(request=request, **kwargs), - status=200, - content_type=content_type) - except Exception, e: - logger.info(traceback.format_exc()) - logger.info(e.message) - res = self.exception_response(e) - return res - - def trace_log(self, request, res): - try: - _traceres = res.content.decode("utf-8") - except: - _traceres = res.content - - try: - logger.info("[%s] [RP] [%s]- %s %s %s" % (self.requestId, self.requestUser, - request.method.upper(), - request.path, (str(res.status_code) + " data: %s " % _traceres))) - except: - logger.info(traceback.format_exc()) - - def exception_response(self, e): - if e.__class__.__name__ in ['UnicodeDecodeError']: - status_code = 400 - errmsg = self.format_err(400, "DataError", "字符错误, 原因:请使用UTF-8编码") - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['ValueError', 'TypeError', "KeyError"]: - status_code = 400 - errmsg = self.format_err(400, "ValueError", "字符错误, 原因:%s" % e.message) - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['IntegrityError']: - status_code = 400 - errmsg = self.format_err(400, "ValueError", "资源值已存在或缺少必填项(或空字段),请检查") - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['TerrformExecError']: - status_code = 400 - errmsg = self.format_err(400, "ExecError", "资源执行异常, 请检查") - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['AuthFailedError']: - status_code = 401 - errmsg = self.format_err(401, "UserAuthError", e) - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in exception_common_classes: - errmsg = self.format_err(e.status_code, e.__class__.__name__, e) - response_res = HttpResponse(status=e.status_code, content=errmsg, content_type=content_type) - else: - status_code = 500 - errmsg = self.format_err(status_code, "SericeError", "服务器遇到异常") - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - return response_res - - -class BackendManager(BackendResponse): - def handler_http(self, request, **kwargs): - method = request.method.upper() - if method == "GET": - return self.format_response(self.on_get(request, **kwargs)) - elif method == "POST": - return self.format_response(self.on_create(request, **kwargs)) - else: - raise exception_common.HttpMethodsNotAllowed() - - -class BackendIdManager(BackendResponse): - def handler_http(self, request, **kwargs): - method = request.method.upper() - if method == "GET": - return self.format_response(self.on_id_get(request, **kwargs)) - elif method == "PATCH": - return self.format_response(self.on_patch(request, **kwargs)) - elif method == "DELETE": - return self.format_response(self.on_delete(request, **kwargs)) - else: - raise exception_common.HttpMethodsNotAllowed() diff --git a/core/controller.py b/core/controller.py deleted file mode 100644 index d709f9b3..00000000 --- a/core/controller.py +++ /dev/null @@ -1,35 +0,0 @@ -# _*_ coding:utf-8 _*_ - - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from .backend_response import BackendManager as _BackendManager_ -from .backend_response import BackendIdManager as _BackendIdManager_ -from .response_hooks import ResponseController as _ResponseController_ - - -class BaseController(_ResponseController_): - name = None - resource = None - allow_methods = ('POST',) - - def __call__(self, request, **kwargs): - return self.request_response(request=request, **kwargs) - - -class BackendController(_BackendManager_): - name = None - resource = None - allow_methods = ('GET', 'POST') - - def __call__(self, request, **kwargs): - return self.request_response(request=request, **kwargs) - - -class BackendIdController(_BackendIdManager_): - name = None - resource = None - allow_methods = ('GET', 'PATCH', 'DELETE') - - def __call__(self, request, **kwargs): - return self.request_response(request=request, **kwargs) diff --git a/core/local_exceptions.py b/core/local_exceptions.py deleted file mode 100644 index 93ce86cc..00000000 --- a/core/local_exceptions.py +++ /dev/null @@ -1,212 +0,0 @@ -# coding=utf8 - -from __future__ import (absolute_import, division, print_function, unicode_literals) - - -class AppBaseException(Exception): - status_code = 404 - - def __init__(self, message): - super(AppBaseException, self).__init__(message) - - def __repr__(self): - pass - - -class HttpMethodsNotAllowed(Exception): - status_code = 405 - - def __repr__(self): - pass - - -class AuthExceptionError(Exception): - status_code = 401 - - def __repr__(self): - pass - - -class AuthFailedError(Exception): - status_code = 401 - - def __init__(self, msg): - _msg = "认证失败, 原因: %s" % (msg) - super(AuthFailedError, self).__init__(_msg) - - -class AllowedForbidden(Exception): - status_code = 403 - - def __init__(self, msg): - _msg = "拒绝访问, 原因: %s" % (msg) - super(AllowedForbidden, self).__init__(_msg) - - -class ResourceNotFoundError(Exception): - status_code = 404 - - def __repr__(self): - pass - - -class ResourceConfigError(Exception): - status_code = 400 - - def __init__(self, msg): - _msg = "资源配置异常, 原因: %s" % (msg) - super(ResourceConfigError, self).__init__(_msg) - - -class ResourceNotSearchError(Exception): - status_code = 400 - - def __init__(self, param, msg, return_data): - self.return_data = return_data - _msg = "资源 %s %s未找到" % (param, msg) - super(ResourceNotSearchError, self).__init__(_msg) - -class ResourceOpearateNotSuccess(Exception): - status_code = 400 - - def __init__(self, param, msg, return_data): - self.return_data = return_data - _msg = "资源 %s %s 操作失败" % (param, msg) - super(ResourceOpearateNotSuccess, self).__init__(_msg) - - -class DataToolangError(Exception): - status_code = 403 - - def __init__(self, msg): - _msg = "请求错误, 原因: %s" % (msg) - super(DataToolangError, self).__init__(_msg) - - -class ResourceIsFoundError(Exception): - status_code = 400 - - def __repr__(self): - pass - - -class ServerIsBusy(AppBaseException): - status_code = 400 - - def __init__(self, msg=None): - if msg: - _msg = "服务器 %s,请稍后再试" % msg - else: - _msg = "服务器忙碌中,请稍后再试" - super(ServerIsBusy, self).__init__(_msg) - - -class ResoucrAddError(AppBaseException): - status_code = 400 - - def __init__(self, msg=None): - if not msg: - msg = "资源添加失败" % msg - - super(ResoucrAddError, self).__init__(msg) - - -class ObjectIsNotExistedException(AppBaseException): - status_code = 404 - - -class ObjectExistedException(AppBaseException): - status_code = 400 - - def __init__(self, param, msg): - _msg = "资源 %s 已存在, 原因: %s" % (param, msg) - super(ObjectExistedException, self).__init__(_msg) - - -class ResourceUniqueException(AppBaseException): - status_code = 400 - - def __init__(self, param, value): - _msg = "参数 %s 校验错误, 原因: %s 必须唯一" % (param, value) - super(ResourceUniqueException, self).__init__(_msg) - - -class ResourceOperateException(AppBaseException): - status_code = 400 - - def __init__(self, param, msg): - _msg = "资源 %s 操作错误, 原因: %s" % (param, msg) - super(ResourceOperateException, self).__init__(_msg) - - -class OperateTooFastException(AppBaseException): - status_code = 400 - - def __init__(self, param, msg): - _msg = "资源 %s 操作过快, 原因: %s" % (param, msg) - super(OperateTooFastException, self).__init__(_msg) - - -class ValueValidateError(Exception): - status_code = 400 - - def __init__(self, param, msg): - _msg = "参数 %s 校验错误, 原因: %s" % (param, msg) - super(ValueValidateError, self).__init__(_msg) - - -class ResourceValidateError(Exception): - status_code = 400 - - def __init__(self, param, msg): - _msg = "资源 %s 校验错误, 原因: %s" % (param, msg) - super(ResourceValidateError, self).__init__(_msg) - - -class ResourceNotEnoughError(Exception): - status_code = 400 - - def __init__(self, param, msg): - _msg = "资源 %s 容量不足, 原因: %s" % (param, msg) - super(ResourceNotEnoughError, self).__init__(_msg) - - -class ResourceNotAttachError(Exception): - status_code = 400 - - def __init__(self, param, msg): - _msg = "资源 %s 挂载异常, 原因: %s" % (param, msg) - super(ResourceNotAttachError, self).__init__(_msg) - - -class ResourceNotCompleteError(Exception): - status_code = 400 - - def __init__(self, param, msg, return_data): - self.return_data = return_data - _msg = "部分资源 %s 创建未完成, 原因: %s" % (param, msg) - super(ResourceNotCompleteError, self).__init__(_msg) - - -class ArgsError(Exception): - status_code = 400 - - def __init__(self, msg): - _msg = "参数 错误, 原因: %s" % (msg) - super(ArgsError, self).__init__(_msg) - - -class RequestValidateError(Exception): - status_code = 400 - - def __init__(self, msg): - _msg = "请求校验错误, 原因: %s" % (msg) - super(RequestValidateError, self).__init__(_msg) - - -class UserAuthError(Exception): - status_code = 401 - - def __init__(self, param, msg): - _msg = "用户 %s 认证失败, 原因: %s" % (param, msg) - super(UserAuthError, self).__init__(_msg) diff --git a/core/response_hooks.py b/core/response_hooks.py deleted file mode 100644 index caca17bb..00000000 --- a/core/response_hooks.py +++ /dev/null @@ -1,252 +0,0 @@ -# _*_ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import copy -import json -import traceback -from django.http import HttpResponse -from django.http import HttpResponseNotAllowed -from wecube_plugins_terraform.settings import DEBUG -from core import local_exceptions as exception_common -from core.validation import validate_column_line -from lib.classtools import get_all_class_for_module -from lib.json_helper import format_json_dumps -from lib.logs import logger -from lib.uuid_util import get_uuid -from .auth import jwt_request - -content_type = 'application/json;charset=utf-8' -exception_common_classes = get_all_class_for_module(exception_common) - - -def format_string(data): - result = {} - for key, value in data.items(): - if isinstance(value, (dict, list)): - if key == "datas": - result[key] = value - else: - result[key] = format_json_dumps(value) - else: - if value is not None and value != 'None': - result[key] = str(value) - else: - result[key] = '' - - return result - - -class ResponseController(object): - name = None - allow_methods = tuple() - requestId = "" - requestUser = "Unknown" - resource = None - - def run_post(self, request, data, **kwargs): - if not isinstance(data, list): - raise exception_common.RequestValidateError("inputs 不支持的请求数据类型") - - return self.on_create(request, data, **kwargs) - - def on_create(self, request, datas, **kwargs): - response_data = {"resultCode": "0", "resultMessage": "success", "results": {"outputs": []}} - outputs = [] - for data in datas: - self.before_handler(request, data, **kwargs) - - for data in datas: - _res = {"errorCode": "0", "errorMessage": ""} - _res["callbackParameter"] = data.pop("callbackParameter", "") - try: - data.pop("callbackParameter", "") - user = data.pop("operator", "") - logger.info("user: %s data: %s" % (user, format_json_dumps(data))) - - res = self.backend_response(request, data, **kwargs) - if not res: - res = self.response_templete(data) - - if isinstance(res, list): - _t = [] - for _result in res: - _res.update(_result) - _tmp = copy.deepcopy(_res) - _t.append(_tmp) - - outputs += _t - else: - _res.update(res) - outputs.append(format_string(_res)) - except Exception, e: - _res["errorCode"] = "1" - response_data["resultCode"] = "1" - if e.__class__.__name__ in ['UnicodeDecodeError', 'ValueError', 'TypeError', "KeyError", - 'ResourceNotCompleteError', "ResourceNotSearchError", - 'AllowedForbidden', 'RequestDataTooBig', 'DataToolangError', - 'ResourceNotFoundError', 'AuthFailedError', 'TerrformExecError']: - x_msg = "type: %s, info: %s" % (e.__class__.__name__, e.message) - elif e.__class__.__name__ in exception_common_classes: - x_msg = "type: %s, info: %s" % (e.__class__.__name__, e.message) - else: - x_msg = "type: %s" % (e.__class__.__name__) - - response_data["resultMessage"] = x_msg - _res["errorMessage"] = x_msg - logger.info(traceback.format_exc()) - _res.update(self.response_templete(data)) - outputs.append(format_string(_res)) - - response_data["results"]["outputs"] = outputs - return response_data - - def response_templete(self, data): - return {} - - def before_handler(self, request, data, **kwargs): - pass - - def backend_response(self, request, data, **kwargs): - return self.main_response(request, data, **kwargs) - - def main_response(self, request, data, **kwargs): - return self.resource.create(data) - - def _validate_column(self, data): - if isinstance(data, list): - raise exception_common.RequestValidateError("不支持的数据类型") - elif isinstance(data, dict): - for cid, value in data.items(): - validate_column_line(cid) - else: - raise exception_common.RequestValidateError("未知请求数据类型") - - def handler_http(self, request, **kwargs): - data = request.body - try: - data = json.loads(data) - except: - raise exception_common.RequestValidateError("请求参数不为json") - - self.requestId = data.get("requestId") or "req_%s" % get_uuid() - self._trace_req(request) - self._validate_column(data) - - try: - data = data["inputs"] - except: - logger.info(traceback.format_exc()) - raise exception_common.RequestValidateError("非法的请求数据格式") - - result = self.run_post(request, data, **kwargs) - return format_json_dumps(result) - - def auth_method(self, request): - method = request.method.upper() - if method in self.allow_methods: - return True - else: - return False - - def format_err(self, errcode, errtype, errinfo, return_data=None): - if isinstance(errinfo, Exception): - errorMessage = "type: %s, info: %s" % (errtype, errinfo.message) - else: - errorMessage = "type: %s, info: %s" % (errtype, errinfo) - - msg = {"resultCode": "1", - "resultMessage": errorMessage, - "results": {"outputs": []} - } - - return json.dumps(msg, ensure_ascii=False) - - def _trace_req(self, request): - try: - data = request.body if request.method.upper() in ['POST', 'PATCH'] else request.GET - if isinstance(data, (dict, list)): - data = format_json_dumps(data) - logger.info("[%s] [RE] [%s]- %s %s %s " % (self.requestId, self.requestUser, - request.method.upper(), request.path, data)) - except: - logger.info(traceback.format_exc()) - - def trace_log(self, request, msg): - try: - if isinstance(msg, (dict, list)): - msg = format_json_dumps(msg) - - logger.info("[%s] [RP] [%s]- %s %s %s" % (self.requestId, self.requestUser, - request.method.upper(), request.path, msg)) - except: - logger.info(traceback.format_exc()) - - def exception_response(self, e): - if e.__class__.__name__ in ['UnicodeDecodeError']: - status_code = 200 - errmsg = self.format_err(200, "DataError", "字符错误, 原因:请使用UTF-8编码") - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['ValueError', 'TypeError', "KeyError"]: - status_code = 200 - errmsg = self.format_err(200, "ValueError", "字符错误, 原因:%s" % e.message) - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['AuthFailedError']: - status_code = 401 - errmsg = self.format_err(401, "UserAuthError", e) - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in ['AllowedForbidden']: - status_code = 403 - errmsg = self.format_err(403, "AllowedForbidden", e) - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - elif e.__class__.__name__ in exception_common_classes: - errmsg = self.format_err(e.status_code, e.__class__.__name__, e) - response_res = HttpResponse(status=e.status_code, content=errmsg, content_type=content_type) - else: - status_code = 500 - errmsg = self.format_err(status_code, "SericeError", "服务器遇到异常") - response_res = HttpResponse(status=status_code, content=errmsg, content_type=content_type) - return response_res - - def request_response(self, request, **kwargs): - method = request.method - if method == "OPTIONS": - return HttpResponse(str(self.allow_methods)) - else: - if request.method.upper() == "POST": - res = self._request_response(request, **kwargs) - res.setdefault("ReqID", self.requestId) - - try: - _traceres = res.content.decode("utf-8") - except: - _traceres = res.content - - self.trace_log(request, msg=(str(res.status_code) + " data: %s " % _traceres)) - return res - else: - return HttpResponseNotAllowed(["POST"], - content=self.format_err(405, "HttpMethodsNotAllowed", "POST"), - content_type=content_type) - - def _is_platform(self, jwt_info): - if jwt_info.get("sub") != "SYS_PLATFORM": - raise exception_common.AllowedForbidden("AllowedForbidden") - if "SUB_SYSTEM" not in jwt_info.get("authority"): - raise exception_common.AllowedForbidden("AllowedForbidden") - - def _request_response(self, request, **kwargs): - try: - if not DEBUG: - jwt_info = jwt_request(request) - self._is_platform(jwt_info) - self.requestUser = jwt_info.get("sub") - - res = HttpResponse(content=self.handler_http(request=request, **kwargs), - status=200, - content_type=content_type) - except Exception, e: - logger.info(traceback.format_exc()) - logger.info(e.message) - res = self.exception_response(e) - return res diff --git a/core/validation.py b/core/validation.py deleted file mode 100644 index 393a5ebf..00000000 --- a/core/validation.py +++ /dev/null @@ -1,220 +0,0 @@ -# _*_ coding:utf-8 _*_ - -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import datetime -import json - -import re - -import core.local_exceptions as exception_common -from lib.ip_helper import check_ip - -args_type = { - "int": u"整数", - "list": u"列表", - "dict": u"json类型", - "basestring": u"字符串类型", - "str": u"字符串类型", - "bool": u"布尔值", - "datetime.datetime": u"时间类型", - "unicode": u"字符串类型", - "float": u"浮点数类型" -} - - -def validate_ipaddress(ip): - status, msg = check_ip(ip) - if not status: - raise exception_common.ValueValidateError("ip", msg) - - -def validate_column_line(column): - if re.match(r'^[0-9a-zA-Z_]{1,36}$', column): - return True - else: - raise exception_common.RequestValidateError("不合法字段 %s" % column) - - -def validate_resource_id(rid): - if re.match(r'^[.0-9a-zA-Z_-]{1,36}$', rid): - return True - else: - raise exception_common.ResourceNotFoundError() - - -def format_type_to_chinese(type): - for ixe in args_type.keys(): - if type == eval(ixe): - return args_type.get(ixe) - - -def str_to_time(key, date_str): - try: - if ":" in date_str: - return datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') - else: - return datetime.datetime.strptime(date_str, '%Y-%m-%d') - except: - raise exception_common.ValueValidateError(param=key, msg=u"非法值 %s,不是合法的时间" % date_str) - - -def validate_port(port, min=None, max=None, permit_null=False): - if permit_null: - if not port and port != 0: - return 0 - - try: - port = int(port) - except: - raise exception_common.ValueValidateError(param="port", msg=u"%s 非法的端口号" % port) - - min = min or 1 - max = max or 65535 - if port < min or port > max: - raise exception_common.ValueValidateError(param="port", msg=u"%s 非法的端口号" % port) - - return port - - -def validate_email_address(email_address): - if re.match(r'^[.0-9a-zA-Z_-]{0,19}@[0-9a-zA-Z]{1,13}\.[comnet]{1,3}$', email_address): - if email_address.endswith("com") or email_address.endswith("net") or email_address.endswith("cn"): - return True - - raise exception_common.ValueValidateError(param="email", msg=u"非法值 %s,不是合法的邮件地址" % email_address) - - -def not_allowed_null(keys, data): - for key in keys: - if not data.get(key): - raise ValueError("参数%s 不能为null" % key) - - -def allowed_key(keys, data): - for key in keys: - if key not in data: - raise ValueError("不合法的参数%s" % key) - - -def validate_string(key, value, minlen=None, maxlen=None): - if value == "" or value is None: - return - if not isinstance(value, basestring): - raise ValueError("%s 不是合法类型string" % key) - if minlen and len(value) < minlen: - raise ValueError("%s 长度不能小于 %s" % (key, minlen)) - if maxlen and len(value) > maxlen: - raise ValueError("%s 长度不能大于 %s" % (key, maxlen)) - - return value - - -def validate_collector(data, strings=None, dicts=None, lists=None, ints=None, ports=None): - if strings: - for str in strings: - validate_string(key=str, value=data.get(str)) - - if dicts: - for d in dicts: - validate_dict(key=d, value=data.get(d)) - - if lists: - for d in lists: - validate_list(key=d, value=data.get(d)) - - if ints: - for d in ints: - validate_int(key=d, value=data.get(d)) - - if ports: - for d in ports: - validate_port(data.get(d)) - - -def validate_list(key, value, minlen=None, maxlen=None): - if not value: - return [] - if isinstance(value, basestring): - value = value.strip() - if value.startswith("[") and value.endswith("]"): - try: - value = json.loads(value) - except: - value = eval(value) - elif "," in value and "[" not in value: - value = value.split(",") - if not isinstance(value, list): - raise ValueError("%s 不是合法类型list" % key) - if minlen and len(value) < minlen: - raise ValueError("%s 长度不能小于 %s" % (key, minlen)) - if maxlen and len(value) > maxlen: - raise ValueError("%s 长度不能大于 %s" % (key, maxlen)) - - return value - - -def validate_dict(key, value, minlen=None, maxlen=None): - if not value: - return {} - try: - if isinstance(value, basestring): - try: - value = json.loads(value) - except: - value = eval(value) - except: - raise ValueError("%s 不是json" % key) - - if not isinstance(value, dict): - raise ValueError("%s 不是合法类型json" % key) - - if minlen and len(value) < minlen: - raise ValueError("%s 长度不能小于 %s" % (key, minlen)) - if maxlen and len(value) > maxlen: - raise ValueError("%s 长度不能大于 %s" % (key, maxlen)) - - return value - - -def validate_bool(key, value, default=None): - if value is None: - value = value or default - else: - if not isinstance(value, bool): - raise ValueError("%s 不是合法类型bool" % key) - - return value - - -def validate_number(key, value, min=None, max=None): - try: - if isinstance(value, basestring): - if "." in value: - value = float(value) - else: - value = int(value) - except: - raise ValueError("%s 不是合法类型" % key) - - if min and value < min: - raise ValueError("%s 不能小于 %s" % (key, min)) - if max and value > max: - raise ValueError("%s 不能大于 %s" % (key, max)) - - return value - - -def validate_int(key, value, min=None, max=None): - try: - if not isinstance(value, int): - value = int(value) - except: - raise ValueError("%s 不是合法整数类型" % key) - - if min and value < min: - raise ValueError("%s 不能小于 %s" % (key, min)) - if max and value > max: - raise ValueError("%s 不能大于 %s" % (key, max)) - - return value diff --git a/doc/images/terraform_process.svg b/doc/images/terraform_process.svg deleted file mode 100644 index ac285a21..00000000 --- a/doc/images/terraform_process.svg +++ /dev/null @@ -1,3 +0,0 @@ - - -
API    CONTROLLER
API    CONTROLLER
alicloud resource
alicloud resource
aws resource
aws resource
tencentcloud resource
tencentcloud resource
api
api
sdk driver
sdk driver
aws
aws
alicloud
alicloud
tencent
tencent
wecube
wecube
.....
.....
terrform cli
terrform cli
资源生成器
资源生成器
DB
DB
配置中心
配置中心
云厂商注册
云厂商注册
云资源配置
云资源配置
标准化配置
标准化配置
规格配置
规格配置
系统内置
初始化配置
系统内置 初始化配置
用户配置
用户配置
FILE
FILE
internet
internet
配置生成器
配置生成器
资源转换器
资源转换器
属性筛选器
属性筛选器
workspace
workspace
属性转换器
属性转换器
output
output
云资源网络: VPC、SUBNET ....VM: ECS, ENI ....存储: OSS, DISK ....DB: MYSQL  ..........
Viewer does not support full SVG 1.1
\ No newline at end of file diff --git a/doc/init.sql b/doc/init.sql deleted file mode 100644 index 68ea422f..00000000 --- a/doc/init.sql +++ /dev/null @@ -1,157 +0,0 @@ -#@v0.1.0-begin@; - -DROP TABLE IF EXISTS `cloud_providers`; - -CREATE TABLE `cloud_providers` ( - `id` VARCHAR(64) NOT NULL, - `display_name` VARCHAR(64) DEFAULT NULL, - `name` VARCHAR(64) NOT NULL, - `secret_id` VARCHAR(256), - `secret_key` VARCHAR(256), - `region` VARCHAR(64) DEFAULT NULL, - `zone` VARCHAR(64) DEFAULT NULL, - `plugin_source` VARCHAR(64) DEFAULT NULL, - `extend_info` TEXT DEFAULT NULL, - `provider_property` TEXT DEFAULT NULL, - `is_init` BOOL DEFAULT FALSE, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `enabled` BOOL DEFAULT TRUE, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`), - UNIQUE INDEX `idx_name` (`name`, `is_deleted`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - - -DROP TABLE IF EXISTS `cloud_secret`; - -CREATE TABLE `cloud_secret` ( - `id` VARCHAR(64) NOT NULL, - `name` VARCHAR(64) NOT NULL, - `display_name` VARCHAR(64) DEFAULT NULL, - `provider` VARCHAR(64) NOT NULL, - `region` VARCHAR(64) DEFAULT NULL, - `secret_info` VARCHAR(2048) NOT NULL, - `extend_info` TEXT DEFAULT NULL, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `enabled` BOOL DEFAULT TRUE, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`), - UNIQUE INDEX `idx_name` (`name`, `provider`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - - -DROP TABLE IF EXISTS `resource`; - -CREATE TABLE `resource` ( - `id` varchar(64) NOT NULL, - `provider` varchar(64) NOT NULL, - `resource_type` varchar(64) NOT NULL, - `resource_name` varchar(64) NOT NULL, - `extend_info` text NOT NULL, - `resource_property` text NOT NULL, - `resource_output` text, - `data_source_name` varchar(64) DEFAULT NULL, - `data_source_argument` varchar(64) DEFAULT NULL, - `data_source` text, - `data_source_output` varchar(2048) DEFAULT NULL, - `is_locked` tinyint(1) DEFAULT '0', - `created_time` datetime DEFAULT NULL, - `updated_time` datetime DEFAULT NULL, - `deleted_time` datetime DEFAULT NULL, - `enabled` tinyint(1) DEFAULT '1', - `is_deleted` tinyint(1) DEFAULT '0', - PRIMARY KEY (`id`), - UNIQUE KEY `idx_property` (`provider`,`resource_type`,`is_deleted`) -) ENGINE=InnoDB DEFAULT CHARSET=utf8; - - -DROP TABLE IF EXISTS `config`; - -CREATE TABLE `config` ( - `id` VARCHAR(64) NOT NULL, - `provider` VARCHAR(64) NOT NULL, - `resource` VARCHAR(64) NOT NULL, - `property` VARCHAR(64) DEFAULT NULL, - `value_config` text NOT NULL, - `is_locked` BOOL DEFAULT FALSE, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `enabled` BOOL DEFAULT TRUE, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`), - UNIQUE INDEX `idx_pro_res` (`provider`, `resource`, `property`, `is_deleted`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - - -DROP TABLE IF EXISTS `cloud_resource`; - -CREATE TABLE `cloud_resource` ( - `id` VARCHAR(64) NOT NULL, - `provider_id` VARCHAR(64) DEFAULT NULL, - `provider` VARCHAR(64) DEFAULT NULL, - `region` VARCHAR(64) DEFAULT NULL, - `zone` VARCHAR(64) DEFAULT NULL, - `resource_name` VARCHAR(64) NOT NULL, - `resource_id` VARCHAR(64) DEFAULT NULL, - `owner_id` VARCHAR(64) DEFAULT NULL, - `relation_id` VARCHAR(64) DEFAULT NULL, - `propertys` text DEFAULT NULL, - `extend_info` text DEFAULT NULL, - `define_json` text DEFAULT NULL, - `result_json` TEXT DEFAULT NULL, - `output_json` TEXT DEFAULT NULL, - `status` varchar(64) DEFAULT NULL, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`, `resource_name`), - INDEX `idx_id` (`resource_id`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - -ALTER TABLE `cloud_resource` - CHANGE `resource_id` `resource_id` VARCHAR(255) CHARSET utf8 COLLATE utf8_general_ci NULL; - - -DROP TABLE IF EXISTS `instance_type`; - -CREATE TABLE `instance_type` ( - `id` VARCHAR(64) NOT NULL, - `provider_id` VARCHAR(64) DEFAULT NULL, - `provider` VARCHAR(64) DEFAULT NULL, - `name` VARCHAR(64) DEFAULT NULL, - `origin_name` VARCHAR(64) DEFAULT NULL, - `cpu` int(11) DEFAULT NULL, - `memory` int(11) DEFAULT NULL, - `network` VARCHAR(64) DEFAULT NULL, - `extend_info` text DEFAULT NULL, - `define_json` text DEFAULT NULL, - `status` varchar(64) DEFAULT NULL, - `result_json` TEXT DEFAULT NULL, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `enabled` BOOL DEFAULT TRUE, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - - -DROP TABLE IF EXISTS `cloud_resource_history`; - -CREATE TABLE `cloud_resource_history` ( - `xid` VARCHAR(64) NOT NULL, - `id` VARCHAR(64) DEFAULT NULL, - `resource` VARCHAR(64) DEFAULT NULL, - `ora_data` text DEFAULT NULL, - `created_time` DATETIME DEFAULT NULL, - PRIMARY KEY (`xid`), - INDEX `idx_id` (`id`, `created_time`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - -#@v0.1.0-end@; \ No newline at end of file diff --git a/doc/init_data.sql b/doc/init_data.sql deleted file mode 100644 index 1629c35c..00000000 --- a/doc/init_data.sql +++ /dev/null @@ -1,3 +0,0 @@ -#@v0.1.0-begin@; - -#@v0.1.0-end@; \ No newline at end of file diff --git a/doc/terraform_front_api.md b/doc/terraform_front_api.md deleted file mode 100644 index 1703725d..00000000 --- a/doc/terraform_front_api.md +++ /dev/null @@ -1,1307 +0,0 @@ -#### provider (云厂商) - -##### list: - -url: /terraform/v1/configer/provider - -参数: "id", "name", "region" - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":2, - "data":[ - { - "display_name":"腾讯云", - "name":"tencentcloud", - "zone":null, - "deleted_time":null, - "region":null, - "enabled":1, - "secret_id":"xxxx", - "updated_time":"2021-01-13 11:15:56", - "provider_property":{ - "region":"region", - "secret_key":"secret_key", - "secret_id":"secret_id" - }, - "created_time":"2021-01-13 11:15:56", - "extend_info":{ - - }, - "plugin_source":null, - "is_init":1, - "is_deleted":0, - "secret_key":"xxx", - "id":"73aa4d40248849c48cb0fcde88d1d1d1" - } - ] - } -} -``` - - - -##### create: - -url: /terraform/v1/configer/provider - -字段: - - - -输入: - - -``` -name string 必填 -display_name string 选填 -secret_id string 必填 -secret_key string 必填 -region string 隐藏 -zone string 隐藏 -extend_info json -provider_property json - - -示例: -{ -"display_name": "腾讯云", -"name": "tencentcloud02", -"zone": "", -"region": "", -"secret_id": "31313", -"secret_key": "3131313", -"extend_info": {}, -"provider_property": { - "secret_id": "secret_id", - "secret_key":"secret_key", - "region": "region" -} -} - -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":{ - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - - } -} -``` - - - -##### 详情: - -url: /terraform/v1/configer/provider/{id} - -参数: "id", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "display_name":"腾讯云", - "name":"tencentcloud02", - "zone":null, - "deleted_time":null, - "region":null, - "enabled":1, - "secret_id":"34242", - "updated_time":"2021-01-21 10:37:10", - "provider_property":{ - "region":"region", - "secret_key":"secret_key", - "secret_id":"secret_id" - }, - "created_time":"2021-01-21 10:37:10", - "extend_info":{ - - }, - "plugin_source":null, - "is_init":1, - "is_deleted":0, - "secret_key":"xxxx", - "id":"c73b8db43df24a2899da00d9dc7afbb9" - } -} -``` - - -##### update: - -url: /terraform/v1/configer/provider/{id} - -参数: -"zone", -"secret_id", -"secret_key", -"region", -"enabled", -"extend_info", -"provider_property" - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":[ - { - "is_deleted":0, - "name":"tencentcloud", - "zone":null, - "deleted_time":null, - "region":"ap-guangzhou", - "enabled":1, - "secret_id":"dasdad", - "updated_time":"2020-12-15 16:12:30", - "provider_property":"{}", - "created_time":"2020-12-15 16:12:30", - "extend_info":"{}", - "plugin_source":null, - "is_init":1, - "secret_key":"ddada", - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - ] - } -} -``` - - -##### delete: - -url: /terraform/v1/configer/provider/{id} - -参数: id - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "data":[ - { - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - ] - } -} -``` - ------ - ----- -#### resource(资源/云产品) - -##### list: - -url: /terraform/v1/configer/resource - -参数:"id", "provider", "resource_type", "resource_name" - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":6, - "data":[ - { - "resource_name":"vpc", - "is_deleted":0, - "resource_property":{ - "cidr":{ - "convert":"cidr_block", - "allow_null":0, - "type":"string" - }, - "name":{ - "convert":"name", - "allow_null":0, - "type":"string" - } - }, - "is_locked":0, - "deleted_time":null, - "enabled":1, - "resource_output":{ - "resource_id":"id" - }, - "updated_time":"2021-01-16 12:35:44", - "extend_info":{ - "is_multicast":false, - "tags":{ - "type":"json" - } - }, - "provider":"tencentcloud", - "created_time":"2021-01-16 12:35:44", - "resource_type":"tencentcloud_vpc", - "id":"b0126dffc4114d9495b3d22ce8ca99ec" - } - ] - } -} -``` - - - -##### create: - -url: /terraform/v1/configer/resource - -字段: - - - -输入: - - -``` -"provider" 云厂商 string 必填 -"resource_type" 类别: 如vpc string 必填 - - -"resource_name" 资源名称 如tencent_vpc string 必填 -"extend_info" 资源其他属性字段 json -"resource_property" 资源转换的属性字段 json 如: cidr需要转换为cidr_block 则为{“cidr”: "cidr_block"} -"resource_output" 输出属性: 例如{“resource_id”: "id"}或{“resource_id”: {"type": "string", "value": "id"}} - - -data_source_name 查询data source资源名称 string 选填 -data_source_argument data source输出资源字段 如: instance.configs string 选填 -data_source data source查询字段转换 string 选填 -data_source_output data source资源输出转换字段 string 选填 - - -pre_action 列表资源 string 选填 例如vpc_list -pre_action_output 列表输出参数 json 选填 -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":{ - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - - } -} -``` - - - -##### 详情: - -url: /terraform/v1/configer/resource/{id} - -参数: "id", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "data_source_name":"tencentcloud_vpc_instances", - "resource_name":"tencentcloud_vpc", - "is_deleted":0, - "resource_property":{ - "cidr":{ - "convert":"cidr_block", - "allow_null":0, - "type":"string" - }, - "name":{ - "convert":"name", - "allow_null":0, - "type":"string" - } - }, - "is_locked":0, - "deleted_time":null, - "resource_output":{ - "resource_id":{ - "type":"string", - "value":"id" - } - }, - "enabled":1, - "updated_time":"2021-03-25 15:39:17", - "data_source":{ - "resource_id":"vpc_id" - }, - "extend_info":{ - "tags":{ - "type":"json" - }, - "is_multicast":false - }, - "data_source_output":{ - "cidr":{ - "convert":"cidr_block", - "allow_null":0, - "type":"string" - }, - "name":{ - "convert":"name", - "allow_null":0, - "type":"string", - "resource_id":"vpc_id" - } - }, - "data_source_argument":"instance_list", - "provider":"tencentcloud", - "created_time":"2021-03-25 15:39:17", - "id":"074354e0795a4e859cff2c6e7471e6bf", - "resource_type":"vpc" - } -} -``` - - -##### update: - -url: /terraform/v1/configer/resource/{id} - -参数: - -``` -"provider", -"resource_type", -"extend_info" -"resource_name", -"resource_property" -"resource_output" -data_source_name -data_source_argument -data_source -data_source_output -``` -例如: -``` -{ -"data_source_name": "tencentcloud_vpc_subnets", -"data_source_argument": "instance_list", -"data_source": {"resource_id": "subnet_id"}, -"data_source_output": {} -} -``` - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":[ - { - "resource_name":"vpc", - "is_deleted":0, - "resource_property":{ - "cidr":{ - "convert":"cidr_block", - "allow_null":0, - "type":"string" - }, - "name":{ - "convert":"name", - "allow_null":0, - "type":"string" - } - }, - "is_locked":0, - "deleted_time":null, - "enabled":1, - "resource_output":{ - "resource_id":"id" - }, - "updated_time":"2021-01-16 12:35:44", - "extend_info":{ - "is_multicast":false, - "tags":{ - "type":"json" - } - }, - "provider":"tencentcloud", - "created_time":"2021-01-16 12:35:44", - "resource_type":"tencentcloud_vpc", - "id":"b0126dffc4114d9495b3d22ce8ca99ec" - } - ] - } -} -``` - - -##### delete: - -url: /terraform/v1/configer/resource/{id} - -参数: id - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "data":[ - { - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - ] - } -} -``` - ----- - ------ - -#### keyconfig(云资源的属性) - -##### list: - -url: /terraform/v1/configer/keyconfig - -参数:"id", "resource", "provider", "resource_type", "enabled" - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":[ - { - "resource":"vpc", - "is_locked":0, - "deleted_time":null, - "enabled":1, - "updated_time":null, - "value_config":"{ -"cidr":"cidr_block" -}", - "provider":"tencentcloud", - "created_time":null, - "is_deleted":0, - "resource_type":null, - "id":"62e1376d-eb96-477b-927a-ce27d0ea6849" - } - ] - } -} -``` - - - -##### create: - -url: /terraform/v1/configer/keyconfig - -字段: - - - -输入: - - -``` -"provider", 云厂商 string 必填 -"resource", 资源名称 string 必填 如vpc -"resource_type", 资源属性 string 必填 如cidr -"value_config" 资源属性值转换配置 json 如: “muliticate” 转换为“muliticate_info” 则配置为: {“muliticate”: “muliticate_info”} -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":{ - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - - } -} -``` - - - -##### 详情: - -url: /terraform/v1/configer/keyconfig/{id} - -参数: "id", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "resource":"vpc", - "is_locked":0, - "deleted_time":null, - "enabled":1, - "updated_time":null, - "value_config":"{ -"cidr":"cidr_block" -}", - "provider":"tencentcloud", - "created_time":null, - "is_deleted":0, - "resource_type":null, - "id":"62e1376d-eb96-477b-927a-ce27d0ea6849" - ] - } -} -``` - - -##### update: - -url: /terraform/v1/configer/keyconfig/{id} - -参数: - -``` -"provider", -"resource", -"resource_type", -"value_config" -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":[ - { - "resource":"vpc", - "is_locked":0, - "deleted_time":null, - "enabled":1, - "updated_time":null, - "value_config":"{ -"cidr":"cidr_block" -}", - "provider":"tencentcloud", - "created_time":null, - "is_deleted":0, - "resource_type":null, - "id":"62e1376d-eb96-477b-927a-ce27d0ea6849" - } - ] - } -} -``` - - -##### delete: - -url: /terraform/v1/configer/keyconfig/{id} - -参数: id - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "data":[ - { - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - ] - } -} -``` - ----- - ------ - -#### instance type (实例规格) - -包含: 虚拟机, RDS, NOSQL, KV STORAGE 等涉及规格选择的产品 - - - -##### list: - -url: /terraform/v1/vm/instance_type - -参数: - -``` -"id", "provider", "origin_name", "cpu", "memory", "provider_id", "name", "enabled", type -``` - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":[ - { - "provider_id":"73aa4d40248849c48cb0fcde88d1d1d1", - "is_deleted":0, - "network":"1.5Gbps -- 25万PPS", - "origin_name":"S5.SMALL2", - "deleted_time":null, - "memory":2, - "enabled":1, - "cpu":1, - "extend_info":{ - - }, - "provider":"tencentcloud", - "created_time":"2021-01-21 11:31:34", - "updated_time":"2021-01-21 11:31:34", - "id":"a814c4b2400e43f89d30cd167a9cf9c3", - "name":"1C-2G" - } - ] - } -} -``` - - - -##### create: - -url: /terraform/v1/vm/instance_type - -字段: - - - -输入: - - -``` -"name" 类型名称 string 必填 -"provider_id", 云厂商id string 必填 -"origin_name", 云厂商实例规格 string 必填 -"cpu", cpu个数 int 必填 -"memory", 内存大小 int 必填 -"network", 网络参数 string 选填 -"extend_info" 其他信息 list 选填 -type 规格类型如instance string 必填 (对应名称必须为资源类型name、例如: mysql, instance等) - -例如: -{ -"name": "1C-2G", - "provider_id": "73aa4d40248849c48cb0fcde88d1d1d1", -"origin_name": "S5.SMALL2", - "cpu": 1, -"memory": 2, - "network": "1.5Gbps -- 25万PPS", - "extend_info": {} -} - -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":"a814c4b2400e43f89d30cd167a9cf9c3" - } -} -``` - - - -##### 详情: - -url: /terraform/v1/vm/instance_type/{id} - -参数: "id", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "provider_id":"73aa4d40248849c48cb0fcde88d1d1d1", - "is_deleted":0, - "network":"1.5Gbps -- 25万PPS", - "origin_name":"S5.SMALL2", - "deleted_time":null, - "memory":2, - "enabled":1, - "cpu":1, - "extend_info":{ - - }, - "provider":"tencentcloud", - "created_time":"2021-01-21 11:31:34", - "updated_time":"2021-01-21 11:31:34", - "id":"a814c4b2400e43f89d30cd167a9cf9c3", - "name":"1C-2G" - } -} -``` - - -##### update: - -url: /terraform/v1/vm/instance_type/{id} - -参数: - -``` -"name", -"provider_id", -"origin_name", -"cpu", -"memory", -"network", -"extend_info" -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":{ - "provider_id":"73aa4d40248849c48cb0fcde88d1d1d1", - "provider":null, - "created_time":"2021-01-21 11:31:34", - "id":"a814c4b2400e43f89d30cd167a9cf9c3", - "name":"1C-2G", - "is_deleted":0, - "network":"1.5Gbps -- 25万PPS", - "origin_name":"S5.SMALL2", - "deleted_time":null, - "enabled":1, - "updated_time":"2021-01-21 11:38:42", - "extend_info":{ - - }, - "memory":2, - "cpu":1 - } - } -} -``` - - -##### delete: - -url: /terraform/v1/vm/instance_type/{id} - -参数: id - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "data":[ - { - "id":"76c8286bed1e444db7bee8ef5129ede5" - } - ] - } -} -``` - ---- - ---- - - -#### secret (云厂商认证信息) - -##### list: - -url: /terraform/v1/configer/secret - -参数: "id", "name", "display_name", "region", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":[ - { - "secret_info":"{cipher_a}oc+hXvxxxxxxxxxxxxx", - "display_name":"alicloud_secret", - "name":"alicloud_secret", - "deleted_time":null, - "region":null, - "enabled":1, - "updated_time":"2021-02-22 16:47:45", - "extend_info":{ - - }, - "provider":"alicloud", - "created_time":"2021-02-22 16:47:45", - "is_deleted":0, - "id":"443318b72e91438896fc5b901901c285" - } - ] - } -} -``` - - - -##### create: - -url: /terraform/v1/configer/secret - -字段: - - - -输入: - - -``` -name string 必填 -display_name string 选填 -provider string 云厂商 必填 -secret_info json 认证信息 必填 -region string 选填(备注:限制秘钥可以使用的region范围, region为空则不限制, 多个region之间使用逗号(,)分隔) -extend_info json - - - -示例: -{ -"name": "xxxxx", -"display_name": "tencenssst", -"provider": "tencentcloud", -"secret_info": {"secret_id": "xxxxxx", "secret_key": "xxxx"}, -"region": null, -"extend_info": {} -} - -``` - - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":"8a772999c34e40faa503b59a026971aa" - } -} -``` - - - -##### 详情: - -url: /terraform/v1/configer/secret/{id} - -参数: "id", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "secret_info":"{cipher_a}Rxxxxxxxxxxxx", - "display_name":"tencenssst", - "name":"xxxxx", - "deleted_time":null, - "region":null, - "enabled":1, - "updated_time":"2021-02-23 11:23:08", - "extend_info":{ - - }, - "provider":"tencentcloud", - "created_time":"2021-02-23 11:23:08", - "is_deleted":0, - "id":"8a772999c34e40faa503b59a026971aa" - } -} -``` - - -##### update: - -url: /terraform/v1/configer/secret/{id} - -参数: -name string -display_name string -provider string -secret_info json -region string -extend_info json - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "count":1, - "data":{ - "secret_info":"{cipher_a}+vvvvvv", - "display_name":"tencenssst", - "name":"xxxxx", - "deleted_time":null, - "region":null, - "enabled":1, - "updated_time":"2021-02-23 11:27:04", - "extend_info":{ - - }, - "provider":"tencentcloud", - "created_time":"2021-02-23 11:23:08", - "is_deleted":0, - "id":"8a772999c34e40faa503b59a026971aa" - } - } -} -``` - - -##### delete: - -url: /terraform/v1/configer/secret/{id} - -参数: id - - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "data":1 - } -} -``` - ------ -#### 资源配置列表 -##### list: - -url: /terraform/v1/configer/resourceList - -参数: "provider", - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "resource":[ - { - "id":"nosql", - "name":"nosql" - }, - { - "id":"route_entry", - "name":"route_entry" - }, - ..... - { - "id":"security_group", - "name":"security_group" - }, - { - "id":"mysql_backup", - "name":"mysql_backup" - } - ] - } -} -``` - - ------ - -#### 资源属性列表 -##### list: - -url: /terraform/v1/configer/resourceAttr - -参数: "provider", “resource_type” - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "resource":[ - "asset_id", - "cidr_block", - "name" - ] - } -} -``` - ------ - -#### 资源属性列表 -##### list: - -url: /terraform/v1/configer/resourceHint - -参数: “resource_type” - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "attribute":{ - "nosql":[ - "asset_id", - "ipaddress", - "port" - ] - "disk_attach":[ - "asset_id" - ], - - }, - "resource":[ - { - "id":"$zone", - "name":"$zone" - }, - { - "id":"$region", - "name":"$region" - }, - { - "id":"$instance.type", - "name":"$instance.type" - }, - { - "id":"$instance.type.cpu", - "name":"$instance.type.cpu" - }, - { - "id":"$instance.type.memory", - "name":"$instance.type.memory" - }, - { - "id":"$resource", - "name":"$resource" - }, - { - "id":"$resource.db_subnet_group", - "name":"$resource.db_subnet_group" - }, - - ] - } -} -``` - ------ - - -#### 资源属性列表 -##### list: - -url: /terraform/v1/configer/resourceAttr - -参数: "provider", “resource_type” - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "attribute":{ - "charge_type":{ - "POSTPAID":"POSTPAID", - "PREPAID":"PREPAID" - }, - "internet_service":{ - "0":"0", - "1":"1" - } - }, - "resource":[ - { - "id":"charge_type", - "name":"charge_type" - }, - { - "id":"engine", - "name":"engine" - } - ] - } -} -``` - ------ - -#### 资源配置列表 -##### list: - -url: /terraform/v1/configer/configList - -参数: "provider", “resource_type”, "property" - -输出: - -``` -{ - "status":"OK", - "message":"OK", - "code":0, - "data":{ - "resource":[ - { - "origin_name":"POSTPAID", - "id":"POSTPAID", - "name":"POSTPAID" - }, - { - "origin_name":"PREPAID", - "id":"PREPAID", - "name":"PREPAID" - } - ] - } -} -``` - ------ - diff --git a/doc/todo_list.md b/doc/todo_list.md deleted file mode 100644 index 3807eade..00000000 --- a/doc/todo_list.md +++ /dev/null @@ -1,5 +0,0 @@ - -列表: - - - diff --git a/doc/update.sql b/doc/update.sql deleted file mode 100644 index ed447ef3..00000000 --- a/doc/update.sql +++ /dev/null @@ -1,59 +0,0 @@ - -#@v0.2.0-begin@; -ALTER TABLE cloud_secret ADD server varchar(256) NULL after `region`; - - -DROP TABLE IF EXISTS `cloud_region`; - -CREATE TABLE `cloud_region` ( - `id` VARCHAR(64) NOT NULL, - `name` VARCHAR(128) DEFAULT NULL, - `provider` VARCHAR(128) NOT NULL, - `asset_id` VARCHAR(128) NOT NULL, - `extend_info` TEXT DEFAULT NULL, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `enabled` BOOL DEFAULT TRUE, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`), - UNIQUE INDEX `idx_asset` (`asset_id`, `provider`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - - -DROP TABLE IF EXISTS `cloud_zone`; - -CREATE TABLE `cloud_zone` ( - `id` VARCHAR(64) NOT NULL, - `name` VARCHAR(128) DEFAULT NULL, - `provider` VARCHAR(128) NOT NULL, - `asset_id` VARCHAR(128) NOT NULL, - `region` VARCHAR(128) DEFAULT NULL, - `extend_info` TEXT DEFAULT NULL, - `created_time` DATETIME DEFAULT NULL, - `updated_time` DATETIME DEFAULT NULL, - `deleted_time` DATETIME DEFAULT NULL, - `enabled` BOOL DEFAULT TRUE, - `is_deleted` BOOL DEFAULT FALSE, - PRIMARY KEY (`id`), - UNIQUE INDEX `idx_asset` (`asset_id`, `provider`) -) ENGINE=INNODB DEFAULT CHARSET=utf8; - -#@v0.2.0-end@; - -#@v0.3.0-begin@; - - -ALTER TABLE resource MODIFY COLUMN data_source_argument varchar(256) CHARACTER SET utf8 COLLATE utf8_general_ci NOT NULL; - -ALTER TABLE resource ADD pre_action varchar(256) NULL after `data_source_output`; -ALTER TABLE resource ADD pre_action_output varchar(512) NULL after `pre_action`; - -ALTER TABLE resource MODIFY COLUMN data_source_name varchar(64) CHARACTER SET utf8 COLLATE utf8_general_ci DEFAULT NULL; - -ALTER TABLE instance_type ADD `type` varchar(64) default 'instance' after `name`; - - -#@v0.3.0-end@; - - diff --git a/gunicorn.conf b/gunicorn.conf deleted file mode 100644 index 2989d12a..00000000 --- a/gunicorn.conf +++ /dev/null @@ -1,13 +0,0 @@ -#_ coding:utf-8 _*_ -import os -import multiprocessing - -bind = ':8999' -proc_name = 'terraform' -pidfile = 'bin/terraform.pid' -limit_request_field_size = 0 -limit_request_line = 0 -workers = 5 -x_forwarded_for_header = 'X-FORWARDED-FOR' -accesslog = 'logs/gunicorn.log' -access_log_format = 'ip: [%(h)s] %(l)s %(t)s url: [%(r)s] code: %(s)s "%(L)s" process: ["%(p)s"]' \ No newline at end of file diff --git a/lib/ConfigReader.py b/lib/ConfigReader.py deleted file mode 100644 index 008017f9..00000000 --- a/lib/ConfigReader.py +++ /dev/null @@ -1,78 +0,0 @@ -# _*_ coding:utf-8 _*_ -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import configparser -from wecube_plugins_terraform.settings import BASE_DIR - - -class ConfigReader(object): - def __init__(self, configPath=None): - self.__configPath__ = configPath or os.path.join(BASE_DIR, "conf/application.conf") - self.__defaultConfig__ = configparser.ConfigParser(allow_no_value=True) - - self.__default__ = None - - if self.__check_file(self.__configPath__): - self.__default__ = True - self.__defaultConfig__.read(self.__configPath__, encoding="UTF-8") - else: - raise ValueError("配置文件 %s 不存在" % os.path.basename(self.__configPath__)) - - def __check_file(self, path): - return os.path.exists(path) - - def get(self, section, option, default=None): - ''' - 获取配置文件参数 - :param section: - :param option: - :param default: 默认值,当未设置配置参数时, 返回默认值 - :return: - ''' - _data = None - if self.__default__: - try: - _data = self.__defaultConfig__.get(section=section, option=option) - except (configparser.NoOptionError, configparser.NoOptionError) as e: - pass - - if not _data and default is not None: - _data = default - - if _data is None: - raise ValueError("[section]: %s [option]: %s 未配置" % (section, option)) - return _data - - def getInt(self, section, option, default=None): - _data = self.get(section, option, default) - return int(_data) if _data else 0 - - def getFloat(self, section, option, default=None): - _data = self.get(section, option, default) - return float(_data) if _data else 0.00 - - def getBool(self, section, option, default=False): - _data = self.get(section, option, default) - if _data == 'true' or _data == 'True' or _data is True: - return True - elif _data == 'false' or _data == 'False' or _data is False: - return False - else: - raise ValueError("[section]: %s [option]: %s 配置错误 - %s" % (section, option, _data)) - - def getList(self, section, option, default=None, splitwith=None): - _data = self.get(section, option, default) - if _data and isinstance(_data, str): - splitwith = splitwith or "," - _data = _data.split(splitwith) - - return _data - - -Config = ConfigReader() - - -if __name__ == '__main__': - conf = ConfigReader() - print(conf.get("DEFAULT", "test1", "ok")) diff --git a/lib/__init__.py b/lib/__init__.py deleted file mode 100644 index e69de29b..00000000 diff --git a/lib/classtools.py b/lib/classtools.py deleted file mode 100644 index 3d8c82fa..00000000 --- a/lib/classtools.py +++ /dev/null @@ -1,29 +0,0 @@ -# -*- coding:utf-8 -*- - -import functools -import inspect -import time - - -def get_all_class_for_module(module_name): - classes = [] - for name, obj in inspect.getmembers(module_name): - if inspect.isclass(obj): - classes.append(name) - return classes - - -def retry(times=3, sleep_time=3): - def wrap(f): - @functools.wraps(f) - def inner(*args, **kwargs): - for i in range(0, times): - try: - return f(*args, **kwargs) - except: - time.sleep(sleep_time) - raise - - return inner - - return wrap diff --git a/lib/command.py b/lib/command.py deleted file mode 100644 index 008bb8ae..00000000 --- a/lib/command.py +++ /dev/null @@ -1,29 +0,0 @@ -# _ coding:utf-8 _*_ -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import subprocess -import traceback -from lib.logs import logger - - -def command(cmd, workdir=None): - environ_vars = os.environ.copy() - logger.info("[EXEC CMD]: %s" % cmd) - try: - process = subprocess.Popen(cmd, shell=True, - stdout=subprocess.PIPE, - stdin=subprocess.PIPE, - stderr=subprocess.PIPE, - close_fds=True, - executable='/bin/bash', - cwd=workdir, - env=environ_vars) - out, err = process.communicate() - ret_code = process.returncode - logger.info("[RESULT CMD]: %s %s %s" % (ret_code, out, err)) - return ret_code, out, err - except Exception, e: - logger.info(traceback.format_exc()) - logger.info("%s: %s" % (e.__class__.__name__, e.message)) - return 1, "", "%s: %s" % (e.__class__.__name__, e.message) diff --git a/lib/date_time.py b/lib/date_time.py deleted file mode 100644 index cd0bbfa3..00000000 --- a/lib/date_time.py +++ /dev/null @@ -1,111 +0,0 @@ -# coding=utf8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import time -import datetime - - -def get_datetime_str(): - return time.strftime("%Y-%m-%d %X") - - -def get_date_ymd_str(): - return time.strftime("%Y-%m-%d") - - -def get_datetime_point_str(): - return time.strftime("%Y%m%d_%H%M%S") - - -def datetime_to_str(the_datetime): - return the_datetime.strftime("%Y-%m-%d %H:%M:%S") - - -def time_add(datepoint, day=0, hour=0, mins=0, sec=0): - if ":" in datepoint: - return datetime.datetime.strptime( - datepoint, - '%Y-%m-%d %H:%M:%S') + datetime.timedelta( - days=day, - hours=hour, - minutes=mins, - seconds=sec) - else: - return datetime.datetime.strptime(datepoint, - '%Y-%m-%d') + datetime.timedelta(days=day, - hours=hour, - minutes=mins, - seconds=sec) - - -def time_reduce(datepoint, day=0, hour=0, mins=0, sec=0): - if ":" in datepoint: - return datetime.datetime.strptime( - datepoint, - '%Y-%m-%d %H:%M:%S') - datetime.timedelta( - days=day, - hours=hour, - minutes=mins, - seconds=sec) - else: - return datetime.datetime.strptime(datepoint, - '%Y-%m-%d') - datetime.timedelta(days=day, - hours=hour, - minutes=mins, - seconds=sec) - - -def time_add_ymd(datepoint, day=0, hour=0, mins=0, sec=0): - t = time_add(datepoint, day, hour, mins, sec) - return t.strftime("%Y-%m-%d") - - -def time_reduce_ymd(datepoint, day=0, hour=0, mins=0, sec=0): - t = time_reduce(datepoint, day, hour, mins, sec) - return t.strftime("%Y-%m-%d") - - -def datetime_to_timestamp(the_datetime, is_float=False): - ms = time.mktime(the_datetime.timetuple()) - if not is_float: - ms = int(ms) - return ms - - -def timestamp_to_date_str(ts): - ''' - timestamp 转日期字符串 - :param ts: - :return: - ''' - return datetime.datetime.fromtimestamp(ts).strftime('%Y-%m-%d %H:%M:%S') - - -def is_time_str(date_str): - try: - if ":" in date_str: - datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') - else: - datetime.datetime.strptime(date_str, '%Y-%m-%d') - return True - except: - return False - - -def str_to_time(date_str): - if ":" in date_str: - return datetime.datetime.strptime(date_str, '%Y-%m-%d %H:%M:%S') - else: - return datetime.datetime.strptime(date_str, '%Y-%m-%d') - - -# if __name__ == '__main__': -# print time_add_ymd(datepoint=get_date_ymd_str(), day=4) - -if __name__ == '__main__': - # import iso8601 - s = get_datetime_str() - print(s) - print(time_add(s, day=1)) - # print datetime.datetime.strptime('2012-11-01 04:16:13', '%Y-%m-%d %H:%M:%S') - # print datetime.datetime.fromisoformat("2020-05-12T08:00:00.000+0800") diff --git a/lib/encrypt_helper.py b/lib/encrypt_helper.py deleted file mode 100644 index 5f2c0919..00000000 --- a/lib/encrypt_helper.py +++ /dev/null @@ -1,34 +0,0 @@ -# coding: utf-8 - -from pyDes import * -from wecube_plugins_terraform.settings import ENCRYPT_SEED - -ENCRYPT_SEED = ENCRYPT_SEED[:16] - -if len(ENCRYPT_SEED) < 16: - ENCRYPT_SEED = ENCRYPT_SEED + 's' * (16 - len(ENCRYPT_SEED)) - - -def encrypt_str(text, key=ENCRYPT_SEED): - try: - from Crypto.Cipher import AES - except: - return text - import os - import base64 - - BS = AES.block_size - pad = lambda s: s + (BS - len(s) % BS) * chr(BS - len(s) % BS) - cipher = AES.new(key, IV='w' * len(key)) - return base64.b64encode(cipher.encrypt(pad(text))) - - -def decrypt_str(encryptstr, key=ENCRYPT_SEED): - try: - from Crypto.Cipher import AES - except: - return encryptstr - import base64 - unpad = lambda s: s[0:-ord(s[-1])] - cipher = AES.new(key, IV='w' * len(key)) - return unpad(cipher.decrypt(base64.b64decode(encryptstr))) diff --git a/lib/hashstr.py b/lib/hashstr.py deleted file mode 100644 index b4630deb..00000000 --- a/lib/hashstr.py +++ /dev/null @@ -1,8 +0,0 @@ -# coding:utf-8 - -import hashlib - - -def hash256str(str): - return hashlib.sha256(str).hexdigest() - diff --git a/lib/ip_helper.py b/lib/ip_helper.py deleted file mode 100644 index 5591a7ac..00000000 --- a/lib/ip_helper.py +++ /dev/null @@ -1,60 +0,0 @@ -# cording=utf8 -# author="rd" - -import IPy - - -def check_ip(ip): - if ip is None: - return False, "ip is null" - if not isinstance(ip, basestring): - return False, "not ip" - if ip.strip(): - if not len(ip.split("/")) == 1: - return False, "%s is invalid ip address" % ip - else: - try: - t_res = ip.split(".") - if len(t_res) != 4: - return False, "not ip" - IPy.IP(ip).strNormal() - return True, "ok" - except Exception, e: - return False, e - else: - return False, "ip is null" - - -def check_cidr(cidr): - if cidr is None: - return False, "cidr is null" - if not isinstance(cidr, basestring): - raise False, "not cinder" - if cidr.strip(): - if "/" not in cidr: - return False, "not cidr" - else: - try: - IPy.IP(cidr).strNormal() - return True, "ok" - except: - return False, "not cidr" - - else: - return False, "cidr is null" - - -def check_cidr(cidr, ip): - try: - return ip in IPy.IP(cidr) - except: - return False - - - - -if __name__ == '__main__': - pass -# ips = ' ,' -# status, result = check_ips(ips) -# print status, result diff --git a/lib/json_helper.py b/lib/json_helper.py deleted file mode 100644 index 38271a28..00000000 --- a/lib/json_helper.py +++ /dev/null @@ -1,30 +0,0 @@ -# coding=utf8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import json -from datetime import date, datetime - - -def __default(obj): - if isinstance(obj, datetime): - return obj.strftime('%Y-%m-%d %H:%M:%S') - elif isinstance(obj, date): - return obj.strftime('%Y-%m-%d') - else: - raise TypeError('%r is not JSON serializable' % obj) - - -def format_dict_data(data): - return json.loads(data, default=__default) - - -def format_json_dumps(data, ensure_ascii=False, indent=None): - return json.dumps(data, default=__default, ensure_ascii=ensure_ascii, indent=indent) - - -def format_to_json(data): - if isinstance(data, dict): - res = json.dumps(data, default=__default) - return json.loads(res) - else: - return json.loads(data) diff --git a/lib/logs.py b/lib/logs.py deleted file mode 100644 index 6cbb420a..00000000 --- a/lib/logs.py +++ /dev/null @@ -1,63 +0,0 @@ -# coding=utf8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import os -import logging -import logging.config -import logging.handlers -import threading -from wecube_plugins_terraform.settings import DEBUG -from wecube_plugins_terraform.settings import LOG_BACKUP -from wecube_plugins_terraform.settings import LOG_BASE_PATH -from wecube_plugins_terraform.settings import LOG_LEVEL -from wecube_plugins_terraform.settings import LOG_MAX_SIZE -from wecube_plugins_terraform.settings import LOG_NAME - -levelmap = { - 'DEBUG': logging.DEBUG, - 'INFO': logging.INFO, - 'WARNING': logging.WARNING, - 'ERROR': logging.ERROR, - 'CRITICAL': logging.CRITICAL -} - -if not os.path.exists(LOG_BASE_PATH): - os.makedirs(LOG_BASE_PATH) - - -def singleton(cls): - instances = {} - lock = threading.Lock() - - def _singleton(*args, **kwargs): - with lock: - fullkey = str((cls.__name__, tuple(args), tuple(kwargs.items()))) - if fullkey not in instances: - instances[fullkey] = cls(*args, **kwargs) - return instances[fullkey] - - return _singleton - - -@singleton -def logsetup(logname=None): - filename = os.path.join(LOG_BASE_PATH, logname) - handler = logging.handlers.RotatingFileHandler(filename=filename, maxBytes=LOG_MAX_SIZE, backupCount=LOG_BACKUP) - logging.getLogger(logname).setLevel(levelmap.get(LOG_LEVEL, logging.INFO)) - formatter = logging.Formatter("[%(asctime)s] [%(levelname)s] [%(filename)s-L%(lineno)d] - %(message)s") - handler.setFormatter(formatter) - logging.getLogger(logname).addHandler(handler) - - if DEBUG: - console = logging.StreamHandler() - handler.setFormatter(formatter) - logging.getLogger("").addHandler(console) - - -def get_logger(logname=None): - logname = logname or LOG_NAME - logsetup(logname) - return logging.getLogger(logname) - - -logger = get_logger() diff --git a/lib/md5str.py b/lib/md5str.py deleted file mode 100644 index be8dde18..00000000 --- a/lib/md5str.py +++ /dev/null @@ -1,10 +0,0 @@ -# -*- coding: utf-8 -*- -from __future__ import (absolute_import, division, print_function, unicode_literals) - -import hashlib - - -def Md5str(string): - _data = hashlib.md5() - _data.update(string.encode("utf-8")) - return _data.hexdigest() diff --git a/lib/mysql_client.py b/lib/mysql_client.py deleted file mode 100644 index c84486ed..00000000 --- a/lib/mysql_client.py +++ /dev/null @@ -1,146 +0,0 @@ -# coding:utf-8 - -import traceback -from lib.logs import logger -from sqlalchemy import create_engine -from sqlalchemy import desc -from sqlalchemy import text -from sqlalchemy.ext.declarative import declarative_base -from sqlalchemy.orm import sessionmaker -from wecube_plugins_terraform.settings import MYSQL_SERVER -from wecube_plugins_terraform.settings import MYSQL_USERNAME -from wecube_plugins_terraform.settings import MYSQL_PASSWORD -from wecube_plugins_terraform.settings import MYSQL_DATABASE - -Base = declarative_base() - -_Session = sessionmaker(bind=create_engine("mysql+pymysql://%(user)s:%(password)s@%(service)s/%(database)s?charset=utf8" - % ({"user": MYSQL_USERNAME, - "password": MYSQL_PASSWORD, - "service": MYSQL_SERVER, - "database": MYSQL_DATABASE}), - # echo=True, - pool_size=20, - encoding="utf-8", - pool_recycle=60 * 9 - ), - autocommit=True - ) - -session = _Session() - - -class Database(object): - def create(self, obj): - ''' - - :param obj: - :return: - ''' - - try: - session.begin() - session.add(obj) - session.commit() - except Exception, e: - logger.info(traceback.format_exc()) - session.rollback() - raise e - - def query(self, obj, filters=None, filter_string=None, params=None, - pageAt=0, pageSize=20000, orderby=None, **kwargs): - ''' - - :param obj: - :param filters: - :param filter_string: 需要与params一起使用 - :param pageAt: - :param pageSize: - :param orderby: - :param kwargs: - :return: - ''' - - filters = filters or {} - - _query_sql = session.query(obj).filter_by(**filters) - - if filter_string: - params = params or {} - _query_sql = _query_sql.filter(text(filter_string)).params(**params) - - count = _query_sql.count() - - if pageAt: - _query_sql = _query_sql.offset(pageAt * pageSize) - - if orderby: - if orderby[1] == "desc": - _query_sql = _query_sql.order_by(desc(orderby[0])) - else: - _query_sql = _query_sql.order_by(orderby[0]) - - return count, _query_sql.limit(pageSize).all() - - def get(self, obj, filters, filter_string=None, params=None): - ''' - - :param obj: - :param fileters: - :return: - ''' - filters = filters or {} - _query_sql = session.query(obj).filter_by(**filters) - if filter_string: - _query_sql = _query_sql.filter(filter_string).params(**params) - - return _query_sql.first() - - def delete(self, obj, filters): - ''' - - :param obj: - :param filters: - :return: - ''' - if not filters: - raise ValueError("delete filter not permit null") - - session.begin() - try: - session.query(obj).filter_by(**filters).delete() - session.commit() - except Exception, e: - logger.info(traceback.format_exc()) - session.rollback() - raise e - - def update(self, obj, filters, update_data): - ''' - - :param obj: - :param filters: - :param update_data: - :return: - ''' - if not filters: - raise ValueError("update filter not permit null") - - try: - session.begin() - session.query(obj).filter_by(**filters).update(update_data) - session.commit() - except Exception, e: - logger.info(traceback.format_exc()) - session.rollback() - raise e - - def excute(self, sql, bind=None): - ''' - - :param sql: - :return: - ''' - - ret = session.execute(sql, bind=bind) - return ret.fetchall() diff --git a/lib/randomStr.py b/lib/randomStr.py deleted file mode 100644 index 15edcf0e..00000000 --- a/lib/randomStr.py +++ /dev/null @@ -1,12 +0,0 @@ -# coding=utf-8 - -import random - - -def random_str(strlen=8): - src_str = "1234567890qwertyuiopasdfghjklzxcvbnmQWERTYUIOPASDFGHJKLZXCVBNM" - sa = [] - for i in range(strlen): - sa.append(random.choice(src_str)) - - return "".join(sa) diff --git a/lib/uuid_util.py b/lib/uuid_util.py deleted file mode 100644 index 9de70f5c..00000000 --- a/lib/uuid_util.py +++ /dev/null @@ -1,18 +0,0 @@ -# coding=utf8 -from __future__ import (absolute_import, division, print_function, unicode_literals) - -from uuid import uuid4 - - -def get_uuid(upper=False, HYPHEN=False): - ''' - :param upper: - :param HYPHEN: - :return: 32位的uuid - ''' - uuid = str(uuid4()) - if not HYPHEN: - uuid = uuid.replace('-', '') - if upper: - return uuid.upper() - return uuid diff --git a/manage.py b/manage.py deleted file mode 100644 index c7f49ca0..00000000 --- a/manage.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python -import os -import sys -if sys.getdefaultencoding() != 'utf-8': - reload(sys) - sys.setdefaultencoding('utf-8') - - -if __name__ == "__main__": - os.environ.setdefault("DJANGO_SETTINGS_MODULE", "wecube_plugins_terraform.settings") - try: - from django.core.management import execute_from_command_line - except ImportError: - # The above import may fail for some other reason. Ensure that the - # issue is really that Django is missing to avoid masking other - # exceptions on Python 2. - try: - import django - except ImportError: - raise ImportError( - "Couldn't import Django. Are you sure it's installed and " - "available on your PYTHONPATH environment variable? Did you " - "forget to activate a virtual environment?" - ) - raise - execute_from_command_line(sys.argv) diff --git a/plugins/alicloud/info.txt b/plugins/alicloud/info.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/plugins/aws/info.txt b/plugins/aws/info.txt deleted file mode 100644 index e69de29b..00000000 diff --git a/readme.md b/readme.md deleted file mode 100644 index 5f7d1858..00000000 --- a/readme.md +++ /dev/null @@ -1,216 +0,0 @@ - -### 一.简介 -Terraform 多云管理 - -支持的云厂商: 腾讯云(tencentcloud), 阿里云(alicloud)等 -

-整体架构如下图: - ----- - - - ---- - -依赖内容: -1. python 2.7 - -2. terraform (cli, 建议将terraform的provide 插件cache进行加速,具体可参考文档中(`terraform 插件配置`)) -3. 公有云厂商(例如腾讯云,阿里云), 用户自行进行开通, 并参考各云厂商申请API key, - - 若为子账号则需要足够的资源申请权限(包括但不限于: - 1). 使用的云资源的权限(创建,删除, 修改权限), - 2). 账户对资源的付费权限(没有该权限可能创建失败), - 3). 服务端ip对公有云厂商的访问权限) - -### 二. 安装 -运行环境依赖python环境,可按如下步骤进行初始化 - -1. 安装rpm包: -``` -yum install gcc -y -yum install -y python-virtualenv.noarch -``` - -2. 创建python独立环境: -``` -mkdir -p /wecube -cd /wecube -virtualenv terraform_env - . /wecube/terraform_env/bin/activate -``` - -3. 初始化依赖包: -``` -mkdir /apps/wecube_plugins_terraform -cd /apps/wecube_plugins_terraform -git clone https://github.com/WeBankPartners/wecube-plugins-terraform.git -pip install -r requirements.txt -``` - -4. 安装terraform: -(用户可在terraform官网进行[下载](https://www.terraform.io/downloads.html)安装, 参考`terraform 插件配置`配置加速) - -5. 更新conf: -更新conf目录下application.conf 配置文件,更新数据库连接配置及其他配置 - -6. 运行: -``` -sh bin/start.sh -``` - -### 三. terraform 插件配置 -在plugins 中创建provider name 目录, 如terraform插件有版本要求则写入对应的版本需求文件 versions.tf - -例如: 腾讯云 tencentcloud, -则在 plugins 下创建 tencentcloud 目录, 并创建version.tf 文件 - -注: 为加速terraform执行, 需要将对应的provider插件放在os cache目录中: -`/usr/local/share/terraform/plugins/registry.terraform.io` - -例如cache tencentcloud 的插件, -则将tencentcloud插件放入 `/usr/local/share/terraform/plugins/registry.terraform.io/tencentcloudstack` - -### 四. terraform docker镜像打包: - -1. 镜像打包需要python:2.7.18-slim为基础镜像 - -2. 镜像需要依赖terraform cli 以及cache的加速, 需要将对应的依赖包放入对应目录 - -1. 将terraform cli 软件包放入源码文件的bin目录下 - - 例如当前源码文件为/data/wecube_plugins_terraform, cli版本文件(例如:terraform_0.14.5_linux_amd64.zip), - 则放入/data/wecube_plugins_terraform/bin 并修改Makefile的`cd bin && unzip -o terraform_0.14.5_linux_amd64.zip` - -2. cache 加速: - -将对应插件的完整目录打包成registry.terraform.io.tar.gz 放入到源码文件的plugins目录下, -并修改Dockerfile的tar -xvf /app/wecube_plugins_terraform/plugins/registry.terraform.io.tar.gz - -### 五. terraform 接入云厂商配置信息 -转换规则: - -字段属性转换定义说明: -1. string 直接转换为对应的值, 若为空字符串,则不转换, 如 {"cidr": ''} - -2. json 定义约束, 类型为json - 1. type定义类型, 可定义: [string, int, float, json, list] - 2. allow_null 是否允许为空, 0 为不允许为空,反之则允许为空 - 3. convert 转换为对应的字段名称, 若不定义,则不转换 - 4. default 指定默认值 - 5. hint 指定资源转换, 由id转换为云上id, - 配置为: $zone转换可用区, $region转换区域, $instance.type 转换规格类型 - $resource表示为可能转换为任意的id, $resource.xxx 表示转换为xxx资源的resource id - $resource.xxx.yyy 表示转换为xxx资源的yyy属性值,例如arn, name, resource id等 - - 例如: "name": {"type": "string", "allow_null": 0, "convert": "name"} - name字段,定义type为string, 不运行为空, 转换为name - -3. 要求的关键字不使用, 则可使用减号移除, 如:{"tag": "-"} - -#### 如下以腾讯云为例, 其他云厂商接入类似, 可参考进行配置: - -1. 配置云厂商: -url: /terraform/v1/configer/provider - -``` -{ - "display_name":"腾讯云", - "name":"tencentcloud", - "zone":"", - "region":"", - "secret_id":"xxxx", # 云厂商提供的api key信息 - "secret_key":"xxxx", # 云厂商提供的api key信息 - "extend_info":{ - - }, - "provider_property":{ - "secret_id":"secret_id", - "secret_key":"secret_key", - "region":"region" - } -} -``` -secret_id 云厂商提供的api key信息 - -secret_key 云厂商提供的api key信息 - -provider_property配置字段的转换: 如secret_id 需要转换为api_id 则配置 "secret_id": "api_id" - -2. 配置属性资源: - -例如: vpc 网段配置: - -url: /terraform/v1/configer/resource - -1).资源 -``` -{ -"resource_type": "vpc", -"resource_name": "tencentcloud_vpc", -"provider": "tencentcloud", -"extend_info": {"tags": {"type": "json"}, "is_multicast": false}, -"resource_property": { - "name": {"type": "string", "allow_null": 0, "convert": "name"}, - "cidr": {"type": "string", "allow_null": 0, "convert": "cidr_block"} -}, -"resource_output": {"resource_id": {"type": "string", "value": "id"}}, -"data_source_name": "tencentcloud_vpc_instances", -"data_source_argument": "instance_list", -"data_source": {"resource_id": "vpc_id"}, -"data_source_output": {"resource_id": "vpc_id"} -} -``` - -2).回刷云上已有资源信息 -``` -{ -"data_source_name": "tencentcloud_vpc_subnets", -"data_source_argument": "instance_list", -"data_source": {"resource_id": "subnet_id"}, -"data_source_output": {"resource_id": "vpc_id"} -} -``` -

-source_property回刷data名称 -data_source_argument 获取回刷数据的字段名称, 可使用层级, 如: instance.configs -data_source 设置查询的字段转换信息 -data_source_output 需要转换统一输出的字段 -

- -3).针对回刷资源需要先刷列表的情况 -``` -{ -"pre_action": "aws_vpcs", -"pre_action_output": "{'ids': 'id'}" -} -``` -

-pre_action 列表获取的资源名称, 例如vpc_list -pre_action_output 列表获取, 输入到资源的过滤参数, 仅支持最多一个 -

- -3. 配置通用值 - -用于将云厂商之间不同的值进行统一 - -例如image字段的 centos 7.2字段, -云厂商A为: centos-7.2 x64, -原厂商B为: linux centos 7.2 x64, -则可以统一命名为centos 7.2 进行转换 - -注: 若region/zone通用值配置后,在注册region/zone时, asset id(资产id需要与配置后的值一致) -url: /terraform/v1/configer/keyconfig - -``` -{ -"resource": "subnet", -"resource_type": "cidr", -"provider": "tencentcloud", -"value_config": { - "subnet_20": "10.0.20.0/24", - "subnet_1": "10.0.1.0/24" - } -} -``` - diff --git a/register.xml b/register.xml deleted file mode 100644 index 8b5b4d3c..00000000 --- a/register.xml +++ /dev/null @@ -1,2027 +0,0 @@ - - - - - - - - - - - /terraformIndex - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - next_hub - region_id - resource_id - next_type - route_table_id - destination - id - zone_id - gateway_id - nat_gateway_id - name - peer_connection_id - secret - local_gateway_id - instance_id - network_interface_id - extend_info - provider - vpc_id - - - asset_id - errorCode - errorMessage - id - - - - - asset_id - gateway_id - nat_gateway_id - region_id - peer_connection_id - secret - local_gateway_id - instance_id - network_interface_id - provider - route_table_id - cidr - type - id - - - gateway_id - next_hub - region_id - errorMessage - errorCode - next_type - route_table_id - destination - id - asset_id - nat_gateway_id - name - peer_connection_id - secret - local_gateway_id - instance_id - network_interface_id - provider - vpc_id - - - - - - - asset_id - name - resource_id - secret - extend_info - provider - region_id - mysql_id - id - zone_id - - - asset_id - errorCode - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - secret - zone_id - extend_info - provider - vpc_id - id - region_id - - - asset_id - errorCode - name - errorMessage - id - - - - - asset_id - name - subnet_id - secret - tag - provider - route_table_id - vpc_id - id - region_id - - - asset_id - name - errorMessage - errorCode - secret - provider - vpc_id - id - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - region_id - id - security_group_id - port - charge_type - disk_type - parameters - subnet_id - base_security_group_id - max_disk_size - secret - version - extend_info - slave_deploy_mode - second_slave_zone - zone - engine - asset_id - app_security_group_id - provider - user - subnet_group - password - zone_id - disk_size - name - force_delete - instance_type - vpc_id - first_slave_zone - - - asset_id - errorMessage - id - errorCode - user - password - ipaddress - port - arn - - - - - engine - asset_id - name - subnet_id - id - secret - tag - version - provider - vpc_id - ipaddress - port - region_id - - - region_id - errorCode - id - max_disk_size - port - charge_type - password - parameters - subnet_id - security_group_id - secret - version - provider - second_slave_zone - engine - asset_id - errorMessage - user - subnet_group - disk_type - ipaddress - zone_id - disk_size - name - force_delete - instance_type - vpc_id - first_slave_zone - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - region_id - zone_id - id - size - charge_type - name - zone - secret - extend_info - provider - type - - - asset_id - errorCode - name - errorMessage - id - - - - - asset_id - volume_ids - name - instance_id - secret - tag - provider - type - id - region_id - - - charge_type - asset_id - name - errorMessage - instance_id - secret - zone_id - errorCode - provider - region_id - type - id - size - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - provider - accepter.allow_vpc - requester.allow_vpc - secret - zone_id - extend_info - peer_vpc_id - peer_region - vpc_id - id - region_id - - - asset_id - errorCode - errorMessage - id - - - - - asset_id - name - provider - secret - peer_vpc_id - peer_region - vpc_id - cidr - id - region_id - - - asset_id - name - errorMessage - provider - errorCode - secret - peer_vpc_id - peer_region - vpc_id - id - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - zone - secret - zone_id - extend_info - provider - vpc_id - cidr - id - region_id - - - asset_id - errorCode - name - errorMessage - id - - - - - asset_id - name - zone - secret - tag - provider - region_id - vpc_id - cidr - id - zone_id - - - asset_id - zone_id - errorMessage - errorCode - secret - provider - region_id - vpc_id - cidr - id - name - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - secret - extend_info - region_id - provider - id - name - - - errorCode - asset_id - errorMessage - id - - - - - asset_id - id - region_id - name - provider - - - asset_id - name - errorMessage - errorCode - extend_info - provider - id - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - region_id - app_security_group_id - load_balancer_type - tag - ipaddress - id - zone_id - charge_type - name - subnet_id - base_security_group_id - secret - extend_info - provider - vpc_id - network_type - - - asset_id - errorCode - errorMessage - ipaddress - id - - - - - asset_id - name - subnet_id - secret - tag - provider - vpc_id - ipaddress - id - region_id - - - asset_id - region_id - errorMessage - errorCode - load_balancer_type - tag - ipaddress - id - charge_type - name - subnet_id - security_group_id - secret - provider - vpc_id - type - network_type - - - - - - - id - - - errorMessage - errorCode - id - - - - - engine - asset_id - region_id - app_security_group_id - id - password - port - name - charge_type - zone_id - subnet_id - type - base_security_group_id - security_group_id - instance_type - secret - version - extend_info - version_parameter - replicas_num - provider - vpc_id - cluster_num - - - asset_id - errorMessage - id - errorCode - ipaddress - port - - - - - asset_id - engine - zone_id - subnet_id - port - secret - tag - version - provider - region_id - vpc_id - ipaddress - id - name - - - region_id - errorCode - id - port - charge_type - subnet_id - security_group_id - secret - version - replicas_num - provider - type - engine - asset_id - errorMessage - password - ipaddress - name - zone_id - instance_type - vpc_id - cluster_num - - - - - - - asset_id - name - resource_id - disk_id - instance_id - secret - extend_info - provider - region_id - id - zone_id - - - asset_id - errorCode - name - errorMessage - disk_id - instance_id - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - protocol - name - lb_id - id - instance_id - secret - zone_id - provider - vpc_id - type - port - region_id - - - asset_id - errorCode - errorMessage - id - arn - - - - - asset_id - name - lb_id - instance_id - secret - provider - id - region_id - - - asset_id - instance_id - errorMessage - protocol - name - lb_id - id - errorCode - secret - zone_id - provider - vpc_id - type - port - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - nic_type - description - resource_id - end_port - ip_protocol - zone_id - id - cidr_ip - asset_id - region_id - ipaddress_group_id - name - begin_port - security_group_id - priority - secret - extend_info - provider - policy - vpc_id - type - ports - - - asset_id - errorCode - errorMessage - id - - - - - asset_id - name - security_group_id - secret - provider - vpc_id - group_id - id - region_id - - - nic_type - region_id - to_port - ip_protocol - errorCode - id - cidr_ip - asset_id - security_group_id - priority - secret - provider - ipaddress_group_id - policy - type - from_port - description - errorMessage - name - vpc_id - ports - - - - - - - id - - - errorMessage - errorCode - id - - - - - zone_id - id - asset_id - region_id - name - addresses - secret - extend_info - provider - - - asset_id - errorCode - errorMessage - id - - - - - asset_id - name - secret - provider - id - region_id - - - region_id - errorCode - id - addresses - asset_id - secret - provider - errorMessage - name - - - - - - - id - - - errorMessage - errorCode - id - - - - - region_id - image - get_password_data - power_action - id - charge_type - disk_type - zone - subnet_id - base_security_group_id - hostname - security_group_id - secret - extend_info - provider - asset_id - internet_charge_type - app_security_group_id - password - zone_id - disk_size - name - force_delete - instance_type - vpc_id - data_disks - - - asset_id - errorCode - name - errorMessage - cpu - public_ip - memory - password - ipaddress - id - - - - - asset_id - region_id - zone - subnet_id - get_password_data - public_ip - image_id - secret - tag - provider - vpc_id - ipaddress - id - - - region_id - image - public_ip - power_action - id - arn - charge_type - disk_type - subnet_id - get_password_data - hostname - security_group_id - secret - memory - asset_id - errorMessage - internet_charge_type - provider - password - ipaddress - zone_id - errorCode - disk_size - name - force_delete - cpu - instance_id - instance_type - vpc_id - data_disks - - - - - - - region_id - bandwidth - from_region - id - zone_id - asset_id - name - dest_region - ccn_id - secret - extend_info - provider - - - asset_id - errorCode - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - health_check - protocol - region_id - target_group_arn - port - backend_port - zone_id - redirect - name - health_check_uri - lb_id - id - secret - extend_info - provider - forward - default_action - type - - - asset_id - errorCode - errorMessage - id - arn - - - - - asset_id - region_id - lb_id - id - secret - provider - port - - - redirect - health_check - protocol - region_id - errorMessage - errorCode - target_group_arn - id - backend_port - asset_id - name - health_check_uri - lb_id - port - secret - provider - forward - default_action - type - - - - - - - username - asset_id - zone_id - resource_id - privileges - database - secret - extend_info - provider - mysql_id - id - region_id - - - errorCode - asset_id - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - app_security_group_id - subnet_id - base_security_group_id - security_group_id - secret - extend_info - provider - region_id - vpc_id - ipaddress - id - zone_id - - - asset_id - errorMessage - errorCode - mac - ipaddress - id - - - - - asset_id - name - subnet_id - public_ip - secret - tag - provider - vpc_id - ipaddress - id - region_id - - - asset_id - name - subnet_id - errorMessage - security_group_id - errorCode - secret - provider - vpc_id - ipaddress - id - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - region_id - bandwidth - id - zone_id - asset_id - eip - name - subnet_id - secret - extend_info - provider - vpc_id - - - asset_id - errorCode - errorMessage - public_ip - ipaddress - id - - - - - asset_id - name - subnet_id - secret - bandwidth - tag - provider - vpc_id - type - id - region_id - - - asset_id - eip - name - subnet_id - errorMessage - secret - public_ip - bandwidth - errorCode - provider - vpc_id - ipaddress - id - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - subnet_id - secret - tag - extend_info - provider - region_id - id - zone_id - - - asset_id - errorCode - errorMessage - id - arn - - - - - asset_id - secret - region_id - provider - id - - - asset_id - name - subnet_id - errorMessage - errorCode - secret - zone_id - provider - vpc_id - id - arn - region_id - - - - - - - region_id - resource_id - zone_id - id - instance_region - asset_id - name - ccn_id - instance_id - instance_type - secret - extend_info - provider - - - asset_id - errorCode - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - listener_id - region_id - app_security_group_id - resource_id - host_header - frontend_port - id - condition - zone_id - asset_id - name - hint - base_security_group_id - lb_id - source_ip - security_group_id - server_group_id - secret - extend_info - provider - action - type - - - asset_id - errorCode - name - errorMessage - id - - - - - asset_id - secret - listener_id - region_id - provider - lb_id - id - - - asset_id - errorMessage - region_id - lb_id - security_group_id - errorCode - secret - listener_id - zone_id - provider - frontend_port - id - name - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - secret - tag - extend_info - provider - cidr - id - region_id - - - asset_id - errorCode - errorMessage - id - arn - - - - - asset_id - name - secret - tag - provider - cidr - id - region_id - - - asset_id - name - errorMessage - errorCode - secret - provider - cidr - id - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - charge_type - asset_id - name - secret - public_ipv4_pool - instance - bandwidth_limit_type - zone_id - extend_info - private_ip - vpc - provider - ipaddress - id - region_id - - - asset_id - errorMessage - errorCode - private_ip - ipaddress - id - - - - - asset_id - name - secret - tag - provider - ipaddress - id - region_id - - - asset_id - charge_type - name - errorMessage - secret - errorCode - bandwidth_limit_type - private_ip - provider - ipaddress - id - region_id - - - - - - - asset_id - name - resource_id - device_index - secret - instance_id - network_interface_id - extend_info - provider - region_id - id - zone_id - - - asset_id - errorCode - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - secret - extend_info - name - provider - id - - - errorCode - asset_id - errorMessage - id - - - - - asset_id - id - name - provider - - - asset_id - errorCode - extend_info - name - provider - errorMessage - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - name - provider - acl - secret - extend_info - appid - region_id - id - zone_id - - - asset_id - url - errorMessage - errorCode - id - arn - - - - - asset_id - name - secret - tag - provider - id - region_id - - - asset_id - name - url - errorMessage - acl - errorCode - secret - provider - id - arn - region_id - - - - - - - id - - - errorMessage - errorCode - id - - - - - listener_id - region_id - weight - resource_id - id - targets - backend_servers - zone_id - asset_id - name - lb_id - port - instance_id - secret - extend_info - provider - group_id - - - asset_id - errorCode - errorMessage - id - - - - - asset_id - instance_id - secret - region_id - provider - lb_id - id - - - asset_id - instance_id - errorMessage - region_id - weight - lb_id - id - errorCode - secret - listener_id - provider - port - backend_servers - name - - - - - - - id - - - errorMessage - errorCode - id - - - - - charge_type - asset_id - name - secret - bandwidth_limit_type - zone_id - extend_info - provider - id - region_id - - - asset_id - errorCode - errorMessage - ipaddress - id - - - - - asset_id - secret - name - provider - id - region_id - - - asset_id - name - errorMessage - errorCode - secret - provider - id - region_id - - - - - - - asset_id - eip_id - name - resource_id - secret - allow_reassociation - instance_id - network_interface_id - extend_info - private_ip - provider - region_id - id - zone_id - - - asset_id - errorCode - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - asset_id - name - resource_id - secret - extend_info - provider - region_id - mysql_id - password - id - zone_id - - - asset_id - errorCode - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - - - id - - - errorMessage - errorCode - id - - - - - asset_id - zone_id - name - secret - tag - extend_info - provider - region_id - vpc_id - id - description - - - asset_id - errorCode - errorMessage - id - arn - - - - - asset_id - name - secret - tag - provider - vpc_id - id - region_id - - - asset_id - description - errorMessage - errorCode - secret - tag - provider - region_id - vpc_id - id - name - - - - - - - asset_id - zone_id - resource_id - name - secret - extend_info - provider - mysql_id - backup_model - id - backup_time - region_id - - - errorCode - asset_id - errorMessage - id - - - - - id - - - errorMessage - errorCode - id - - - - - diff --git a/requirements.txt b/requirements.txt deleted file mode 100644 index 52a647a4..00000000 --- a/requirements.txt +++ /dev/null @@ -1,19 +0,0 @@ -gunicorn==19.7.1 -Django -ipaddress==1.0.18 -requests -six==1.10.0 -kubernetes -IPy -configparser -retrying -apscheduler -Crypto -Werkzeug -PyJWT -python-terraform==0.10.1 -SQLalchemy==1.3.23 -PyMySQL==0.7.11 -pycrypto -pyDes -lxml diff --git a/terraform-server/api/api.go b/terraform-server/api/api.go new file mode 100644 index 00000000..859cbc0a --- /dev/null +++ b/terraform-server/api/api.go @@ -0,0 +1,215 @@ +package api + +import ( + "bytes" + "fmt" + "io/ioutil" + "net/http" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/interfaces" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/log_operation" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/parameter" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/plugin" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/provider" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/resource_data" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/source" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/template" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/tf_argument" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/tfstate_attribute" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/gin-gonic/gin" +) + +type handlerFuncObj struct { + HandlerFunc func(c *gin.Context) + Method string + Url string + LogOperation bool +} + +var ( + httpHandlerFuncList []*handlerFuncObj +) + +func init() { + // declaration + httpHandlerFuncList = append(httpHandlerFuncList, + &handlerFuncObj{Url: "/plugins", Method: "POST", HandlerFunc: plugin.PluginBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/plugins", Method: "GET", HandlerFunc: plugin.PluginList}, + &handlerFuncObj{Url: "/plugins", Method: "DELETE", HandlerFunc: plugin.PluginBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/plugins", Method: "PUT", HandlerFunc: plugin.PluginBatchUpdate, LogOperation: true}, + + &handlerFuncObj{Url: "/interfaces", Method: "POST", HandlerFunc: interfaces.InterfaceBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/interfaces", Method: "GET", HandlerFunc: interfaces.InterfaceList}, + &handlerFuncObj{Url: "/interfaces", Method: "DELETE", HandlerFunc: interfaces.InterfaceBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/interfaces", Method: "PUT", HandlerFunc: interfaces.InterfaceBatchUpdate, LogOperation: true}, + + &handlerFuncObj{Url: "/templates", Method: "POST", HandlerFunc: template.TemplateBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/templates", Method: "GET", HandlerFunc: template.TemplateList}, + &handlerFuncObj{Url: "/templates", Method: "DELETE", HandlerFunc: template.TemplateBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/templates", Method: "PUT", HandlerFunc: template.TemplateBatchUpdate, LogOperation: true}, + &handlerFuncObj{Url: "/templates/:pluginId", Method: "GET", HandlerFunc: template.TemplateListByPlugin}, + + &handlerFuncObj{Url: "/template_values", Method: "POST", HandlerFunc: template.TemplateValueBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/template_values", Method: "GET", HandlerFunc: template.TemplateValueList}, + &handlerFuncObj{Url: "/template_values", Method: "DELETE", HandlerFunc: template.TemplateValueBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/template_values", Method: "PUT", HandlerFunc: template.TemplateValueBatchUpdate, LogOperation: true}, + &handlerFuncObj{Url: "/template_values/:parameterId", Method: "GET", HandlerFunc: template.TemplateValueListByParameter}, + + &handlerFuncObj{Url: "/parameters", Method: "POST", HandlerFunc: parameter.ParameterBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/parameters", Method: "GET", HandlerFunc: parameter.ParameterList}, + &handlerFuncObj{Url: "/parameters", Method: "DELETE", HandlerFunc: parameter.ParameterBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/parameters", Method: "PUT", HandlerFunc: parameter.ParameterBatchUpdate, LogOperation: true}, + ) + + // cloud config + httpHandlerFuncList = append(httpHandlerFuncList, + &handlerFuncObj{Url: "/providers", Method: "POST", HandlerFunc: provider.ProviderBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/providers", Method: "GET", HandlerFunc: provider.ProviderList}, + &handlerFuncObj{Url: "/providers", Method: "DELETE", HandlerFunc: provider.ProviderBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/providers", Method: "PUT", HandlerFunc: provider.ProviderBatchUpdate, LogOperation: true}, + + &handlerFuncObj{Url: "/provider_infos", Method: "POST", HandlerFunc: provider.ProviderInfoBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/provider_infos", Method: "GET", HandlerFunc: provider.ProviderInfoList}, + &handlerFuncObj{Url: "/provider_infos", Method: "DELETE", HandlerFunc: provider.ProviderInfoBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/provider_infos", Method: "PUT", HandlerFunc: provider.ProviderInfoBatchUpdate, LogOperation: true}, + + &handlerFuncObj{Url: "/provider_template_values", Method: "POST", HandlerFunc: provider.ProviderTemplateValueBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/provider_template_values", Method: "GET", HandlerFunc: provider.ProviderTemplateValueList}, + &handlerFuncObj{Url: "/provider_template_values", Method: "DELETE", HandlerFunc: provider.ProviderTemplateValueBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/provider_template_values", Method: "PUT", HandlerFunc: provider.ProviderTemplateValueBatchUpdate, LogOperation: true}, + &handlerFuncObj{Url: "/provider_template_values/:templateId", Method: "GET", HandlerFunc: provider.ProviderTemplateValueListByTemplate}, + + &handlerFuncObj{Url: "/sources", Method: "POST", HandlerFunc: source.SourceBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/sources", Method: "GET", HandlerFunc: source.SourceList}, + &handlerFuncObj{Url: "/sources", Method: "DELETE", HandlerFunc: source.SourceBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/sources", Method: "PUT", HandlerFunc: source.SourceBatchUpdate, LogOperation: true}, + + &handlerFuncObj{Url: "/tf_arguments", Method: "POST", HandlerFunc: tf_argument.TfArgumentBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/tf_arguments", Method: "GET", HandlerFunc: tf_argument.TfArgumentList}, + &handlerFuncObj{Url: "/tf_arguments", Method: "DELETE", HandlerFunc: tf_argument.TfArgumentBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/tf_arguments", Method: "PUT", HandlerFunc: tf_argument.TfArgumentBatchUpdate, LogOperation: true}, + + &handlerFuncObj{Url: "/tfstate_attributes", Method: "POST", HandlerFunc: tfstate_attribute.TfstateAttributeBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/tfstate_attributes", Method: "GET", HandlerFunc: tfstate_attribute.TfstateAttributeList}, + &handlerFuncObj{Url: "/tfstate_attributes", Method: "DELETE", HandlerFunc: tfstate_attribute.TfstateAttributeBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/tfstate_attributes", Method: "PUT", HandlerFunc: tfstate_attribute.TfstateAttributeBatchUpdate, LogOperation: true}, + ) + + // resource_data + httpHandlerFuncList = append(httpHandlerFuncList, + &handlerFuncObj{Url: "/resource_datas", Method: "POST", HandlerFunc: resource_data.ResourceDataBatchCreate, LogOperation: true}, + &handlerFuncObj{Url: "/resource_datas", Method: "GET", HandlerFunc: resource_data.ResourceDataList}, + &handlerFuncObj{Url: "/resource_datas", Method: "DELETE", HandlerFunc: resource_data.ResourceDataBatchDelete, LogOperation: true}, + &handlerFuncObj{Url: "/resource_datas", Method: "PUT", HandlerFunc: resource_data.ResourceDataBatchUpdate, LogOperation: true}, + + // for resource_data_debug + &handlerFuncObj{Url: "/resource_data_debugs", Method: "GET", HandlerFunc: resource_data.ResourceDataDebugList}, + &handlerFuncObj{Url: "/terraform_debug/:plugin/:action", Method: "POST", HandlerFunc: resource_data.TerraformOperationDebug, LogOperation: true}, + + // &handlerFuncObj{Url: "/terraform/:plugin/:action", Method: "POST", HandlerFunc: resource_data.TerraformOperation, LogOperation: true}, + ) + + // export and import + httpHandlerFuncList = append(httpHandlerFuncList, + &handlerFuncObj{Url: "/provider_plugin_config/export", Method: "GET", HandlerFunc: provider.ProviderPluginExport, LogOperation: true}, + &handlerFuncObj{Url: "/provider_plugin_config/import", Method: "POST", HandlerFunc: provider.ProviderPluginImport, LogOperation: false}, + &handlerFuncObj{Url: "/plugin_xml/export", Method: "GET", HandlerFunc: plugin.PluginXmlExport, LogOperation: true}, + ) +} + +func InitHttpServer() { + urlPrefix := models.UrlPrefix + r := gin.New() + if !models.PluginRunningMode { + // reflect ui resource + r.LoadHTMLGlob("public/*.html") + r.Static(fmt.Sprintf("%s/js", urlPrefix), "public/js") + r.Static(fmt.Sprintf("%s/css", urlPrefix), "public/css") + r.Static(fmt.Sprintf("%s/img", urlPrefix), "public/img") + r.Static(fmt.Sprintf("%s/fonts", urlPrefix), "public/fonts") + r.GET(fmt.Sprintf("%s/", urlPrefix), func(c *gin.Context) { + c.HTML(http.StatusOK, "index.html", gin.H{}) + }) + // allow cross request + if models.Config.HttpServer.Cross { + crossHandler(r) + } + } + // access log + if models.Config.Log.AccessLogEnable { + r.Use(httpLogHandle()) + } + // const handler func + // r.POST(urlPrefix+"/api/v1/login", permission.Login) + // register handler func with auth + authRouter := r.Group(urlPrefix + "/api/v1") + // authRouter.GET("/refresh-token", permission.RefreshToken) + for _, funcObj := range httpHandlerFuncList { + switch funcObj.Method { + case "GET": + if funcObj.LogOperation { + authRouter.GET(funcObj.Url, funcObj.HandlerFunc, log_operation.HandleOperationLog) + } else { + authRouter.GET(funcObj.Url, funcObj.HandlerFunc) + } + break + case "POST": + if funcObj.LogOperation { + authRouter.POST(funcObj.Url, funcObj.HandlerFunc, log_operation.HandleOperationLog) + } else { + authRouter.POST(funcObj.Url, funcObj.HandlerFunc) + } + break + case "PUT": + if funcObj.LogOperation { + authRouter.PUT(funcObj.Url, funcObj.HandlerFunc, log_operation.HandleOperationLog) + } else { + authRouter.PUT(funcObj.Url, funcObj.HandlerFunc) + } + break + case "DELETE": + if funcObj.LogOperation { + authRouter.DELETE(funcObj.Url, funcObj.HandlerFunc, log_operation.HandleOperationLog) + } else { + authRouter.DELETE(funcObj.Url, funcObj.HandlerFunc) + } + break + } + } + /* + r.POST(urlPrefix+"/entities/:ciType/query", middleware.AuthToken(), ci.HandleCiModelRequest) + r.POST(urlPrefix+"/entities/:ciType/create", middleware.AuthCoreRequestToken(), ci.HandleCiModelRequest, ci.HandleOperationLog) + r.POST(urlPrefix+"/entities/:ciType/update", middleware.AuthCoreRequestToken(), ci.HandleCiModelRequest, ci.HandleOperationLog) + r.POST(urlPrefix+"/entities/:ciType/delete", middleware.AuthCoreRequestToken(), ci.HandleCiModelRequest, ci.HandleOperationLog) + r.GET(urlPrefix+"/data-model", middleware.AuthToken(), ci.GetAllDataModel) + r.POST(urlPrefix+"/plugin/ci-data/operation", middleware.AuthCorePluginToken(), ci.PluginCiDataOperationHandle, ci.HandleOperationLog) + r.POST(urlPrefix+"/plugin/ci-data/attr-value", middleware.AuthCorePluginToken(), ci.PluginCiDataAttrValueHandle, ci.HandleOperationLog) + */ + // r.POST(urlPrefix + "/api/v1/:plugin/:action", middleware.AuthCoreRequestToken(), resource_data.TerraformOperation, log_operation.HandleOperationLog) + r.POST(urlPrefix+"/api/v1/terraform/:plugin/:action", resource_data.TerraformOperation, log_operation.HandleOperationLog) + r.Run(":" + models.Config.HttpServer.Port) +} + +func crossHandler(r *gin.Engine) { + r.Use(func(c *gin.Context) { + if c.GetHeader("Origin") != "" { + c.Header("Access-Control-Allow-Origin", c.GetHeader("Origin")) + } + }) +} + +func httpLogHandle() gin.HandlerFunc { + return func(c *gin.Context) { + start := time.Now() + bodyBytes, _ := ioutil.ReadAll(c.Request.Body) + c.Request.Body.Close() + c.Request.Body = ioutil.NopCloser(bytes.NewReader(bodyBytes)) + c.Set("requestBody", string(bodyBytes)) + c.Next() + log.AccessLogger.Info("request", log.String("url", c.Request.RequestURI), log.String("method", c.Request.Method), log.Int("code", c.Writer.Status()), log.String("operator", c.GetString("user")), log.String("ip", middleware.GetRemoteIp(c)), log.Float64("cost_ms", time.Now().Sub(start).Seconds()*1000), log.String("body", string(bodyBytes))) + } +} diff --git a/terraform-server/api/middleware/request.go b/terraform-server/api/middleware/request.go new file mode 100644 index 00000000..5ba6cf3b --- /dev/null +++ b/terraform-server/api/middleware/request.go @@ -0,0 +1,11 @@ +package middleware + +import "github.com/gin-gonic/gin" + +func GetRemoteIp(c *gin.Context) string { + netIp, ok := c.RemoteIP() + if ok { + return netIp.String() + } + return c.ClientIP() +} diff --git a/terraform-server/api/middleware/response.go b/terraform-server/api/middleware/response.go new file mode 100644 index 00000000..0873cbf5 --- /dev/null +++ b/terraform-server/api/middleware/response.go @@ -0,0 +1,71 @@ +package middleware + +import ( + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/gin-gonic/gin" + "net/http" +) + +func ReturnPageData(c *gin.Context, pageInfo models.PageInfo, contents interface{}) { + if contents == nil { + contents = []string{} + } + c.JSON(http.StatusOK, models.ResponseJson{StatusCode: "OK", Data: models.ResponsePageData{PageInfo: pageInfo, Contents: contents}}) +} + +func ReturnEmptyPageData(c *gin.Context) { + c.JSON(http.StatusOK, models.ResponseJson{StatusCode: "OK", Data: models.ResponsePageData{PageInfo: models.PageInfo{StartIndex: 0, PageSize: 0, TotalRows: 0}, Contents: []string{}}}) +} + +func ReturnData(c *gin.Context, data interface{}) { + if data == nil { + data = []string{} + } + c.JSON(http.StatusOK, models.ResponseJson{StatusCode: "OK", Data: data}) +} + +func ReturnSuccess(c *gin.Context) { + c.JSON(http.StatusOK, models.ResponseJson{StatusCode: "OK", Data: []string{}}) +} + +func ReturnError(c *gin.Context, statusCode, statusMessage string, data interface{}) { + if data == nil { + data = []string{} + } + log.Logger.Error("Handle error", log.String("statusCode", statusCode), log.String("message", statusMessage)) + c.JSON(http.StatusOK, models.ResponseErrorJson{StatusCode: statusCode, StatusMessage: statusMessage, Data: data}) +} + +func ReturnBatchUpdateError(c *gin.Context, data []*models.ResponseErrorObj) { + ReturnError(c, "ERR_BATCH_CHANGE", "message", data) +} + +func ReturnParamValidateError(c *gin.Context, err error) { + ReturnError(c, "PARAM_VALIDATE_ERROR", err.Error(), nil) +} + +func ReturnParamEmptyError(c *gin.Context, paramName string) { + ReturnError(c, "PARAM_EMPTY_ERROR", paramName, nil) +} + +func ReturnServerHandleError(c *gin.Context, err error) { + log.Logger.Error("Request server handle error", log.Error(err)) + ReturnError(c, "SERVER_HANDLE_ERROR", err.Error(), nil) +} + +func ReturnTokenValidateError(c *gin.Context, err error) { + c.JSON(http.StatusUnauthorized, models.ResponseErrorJson{StatusCode: "TOKEN_VALIDATE_ERROR", StatusMessage: err.Error(), Data: nil}) +} + +func ReturnDataPermissionError(c *gin.Context, err error) { + ReturnError(c, "DATA_PERMISSION_ERROR", err.Error(), nil) +} + +func ReturnDataPermissionDenyError(c *gin.Context) { + ReturnError(c, "DATA_PERMISSION_DENY", "permission deny", nil) +} + +func ReturnApiPermissionError(c *gin.Context) { + ReturnError(c, "API_PERMISSION_ERROR", "api permission deny", nil) +} diff --git a/terraform-server/api/middleware/token.go b/terraform-server/api/middleware/token.go new file mode 100644 index 00000000..f43f9b6a --- /dev/null +++ b/terraform-server/api/middleware/token.go @@ -0,0 +1,114 @@ +package middleware + +import ( + "fmt" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/token" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/gin-gonic/gin" + "net/http" +) + +func GetRequestUser(c *gin.Context) string { + user := models.AdminUser + if models.Config.Auth.Enable { + user = c.GetString("user") + } + return user +} + +func GetRequestRoles(c *gin.Context) []string { + roles := []string{models.AdminUser} + if models.Config.Auth.Enable { + roles = c.GetStringSlice("roles") + } + return roles +} + + + +func authRequest(c *gin.Context) error { + if !models.Config.Auth.Enable { + return nil + } + authHeader := c.GetHeader("Authorization") + if authHeader == "" { + return fmt.Errorf("Can not find Request Header Authorization ") + } + authToken, err := token.DecodeJwtToken(authHeader, models.Config.Wecube.JwtSigningKey) + if err != nil { + return err + } + if authToken.User == "" { + return fmt.Errorf("Token content is illegal,main message is empty ") + } + c.Set("user", authToken.User) + c.Set("roles", authToken.Roles) + return nil +} + +func AuthCoreRequestToken() gin.HandlerFunc { + return func(c *gin.Context) { + err := authCoreRequest(c) + if err != nil { + c.JSON(http.StatusOK, models.EntityResponse{Status: "ERROR", Message: "Permission deny "}) + c.Abort() + } else { + c.Next() + } + } +} + +func AuthCorePluginToken() gin.HandlerFunc { + return func(c *gin.Context) { + err := authCoreRequest(c) + if err != nil { + c.JSON(http.StatusOK, pluginInterfaceResultObj{ResultCode: "1", ResultMessage: "Token authority validate fail", Results: pluginInterfaceResultOutput{Outputs: []string{}}}) + c.Abort() + } else { + c.Next() + } + } +} + +type pluginInterfaceResultObj struct { + ResultCode string `json:"resultCode"` + ResultMessage string `json:"resultMessage"` + Results pluginInterfaceResultOutput `json:"results"` +} + +type pluginInterfaceResultOutput struct { + Outputs []string `json:"outputs"` +} + +func authCoreRequest(c *gin.Context) error { + if !models.Config.Auth.Enable { + return nil + } + authHeader := c.GetHeader("Authorization") + if authHeader == "" { + return fmt.Errorf("Can not find Request Header Authorization ") + } + authToken, err := token.DecodeJwtToken(authHeader, models.Config.Wecube.JwtSigningKey) + if err != nil { + return err + } + if authToken.User == "" { + return fmt.Errorf("Token content is illegal,main message is empty ") + } + isSystemCall := false + log.Logger.Debug("core token", log.StringList("role", authToken.Roles), log.String("user", authToken.User), log.String("header", authHeader)) + for _, v := range authToken.Roles { + if v == models.SystemRole { + isSystemCall = true + break + } + } + if !isSystemCall { + return fmt.Errorf("Token authority validate fail ") + } else { + c.Set("user", authToken.User) + c.Set("roles", authToken.Roles) + } + return nil +} diff --git a/terraform-server/api/v1/interfaces/interfaces.go b/terraform-server/api/v1/interfaces/interfaces.go new file mode 100644 index 00000000..401a44e3 --- /dev/null +++ b/terraform-server/api/v1/interfaces/interfaces.go @@ -0,0 +1,78 @@ +package interfaces + +import ( + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func InterfaceList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + plugin := c.Query("plugin") + if plugin != "" { + paramsMap["plugin"] = plugin + } + rowData, err := db.InterfaceList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.InterfaceTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func InterfaceBatchCreate(c *gin.Context) { + var param []*models.InterfaceTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + rowData, err := db.InterfaceBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func InterfaceBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.InterfaceBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func InterfaceBatchUpdate(c *gin.Context) { + var param []*models.InterfaceTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.InterfaceBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} diff --git a/terraform-server/api/v1/log_operation/operation_log.go b/terraform-server/api/v1/log_operation/operation_log.go new file mode 100644 index 00000000..8d89fcee --- /dev/null +++ b/terraform-server/api/v1/log_operation/operation_log.go @@ -0,0 +1,71 @@ +package log_operation + +import ( + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +var operationLogChannel = make(chan *models.SysLogTable, 100) + +func StartConsumeOperationLog() { + log.Logger.Info("start consume operation log job") + for { + operationLogObj := <-operationLogChannel + db.SaveOperationLog(operationLogObj) + } +} + +func HandleOperationLog(c *gin.Context) { + var operationLogObj models.SysLogTable + operationLogObj.Operator = c.GetString("user") + operationLogObj.Operation = c.Param("operation") + if operationLogObj.Operation == "" { + operationLogObj.Operation = c.Request.Method + } + operationLogObj.Content = c.GetString("requestBody") + operationLogObj.RequestUrl = c.Request.RequestURI + for i, v := range strings.Split(operationLogObj.RequestUrl, "/") { + if i == 4 { + if v == "ci-data" { + operationLogObj.LogCat = "CI Data Management" + } else if v == "ci-types" || v == "ci-types-attr" { + operationLogObj.LogCat = "CI Type Management" + } else if v == "base-key" { + operationLogObj.LogCat = "Base Data Management" + } else if v == "permissions" { + operationLogObj.LogCat = "Permission Management" + } else { + operationLogObj.LogCat = v + } + break + } + } + operationLogObj.ClientHost = middleware.GetRemoteIp(c) + operationLogChannel <- &operationLogObj +} + +func QueryOperationLog(c *gin.Context) { + //Param validate + var param models.QueryRequestParam + if err := c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + //Query database + pageInfo, rowData, err := db.QueryOperationLog(¶m) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnPageData(c, pageInfo, rowData) + } +} + +func GetAllLogOperation(c *gin.Context) { + operationList := db.GetAllLogOperation() + middleware.ReturnData(c, operationList) +} diff --git a/terraform-server/api/v1/parameter/parameter.go b/terraform-server/api/v1/parameter/parameter.go new file mode 100644 index 00000000..b9725026 --- /dev/null +++ b/terraform-server/api/v1/parameter/parameter.go @@ -0,0 +1,79 @@ +package parameter + +import ( + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func ParameterList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + interfaceId := c.Query("interface") + if interfaceId != "" { + paramsMap["interface"] = interfaceId + } + rowData, err := db.ParameterList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.ParameterQuery{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func ParameterBatchCreate(c *gin.Context) { + var param []*models.ParameterTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + // rowData, err := db.ParameterBatchCreate(user, param) + rowData, err := db.ParameterBatchCreateUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func ParameterBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.ParameterBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ParameterBatchUpdate(c *gin.Context) { + var param []*models.ParameterTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.ParameterBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} diff --git a/terraform-server/api/v1/plugin/plugin.go b/terraform-server/api/v1/plugin/plugin.go new file mode 100644 index 00000000..5662ce6e --- /dev/null +++ b/terraform-server/api/v1/plugin/plugin.go @@ -0,0 +1,143 @@ +package plugin + +import ( + "fmt" + "net/http" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func PluginCreate(c *gin.Context) { + var param models.PluginTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + param.CreateUser = user + rowData, err := db.PluginCreate(¶m) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func PluginList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.PluginList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.PluginTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func PluginDelete(c *gin.Context) { + pluginId := c.Param("pluginId") + + if pluginId == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param pluginId can not be empty")) + return + } + err := db.PluginDelete(pluginId) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func PluginUpdate(c *gin.Context) { + var param models.PluginTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + pluginId := c.Param("pluginId") + if pluginId == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param pluginId can not be empty")) + return + } + user := middleware.GetRequestUser(c) + param.UpdateUser = user + err = db.PluginUpdate(pluginId, ¶m) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func PluginBatchCreate(c *gin.Context) { + var param []*models.PluginTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + rowData, err := db.PluginBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func PluginBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.PluginBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func PluginBatchUpdate(c *gin.Context) { + var param []*models.PluginTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.PluginBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func PluginXmlExport(c *gin.Context) { + result, err := db.PluginXmlExport() + if err != nil { + middleware.ReturnServerHandleError(c, err) + return + } + c.Writer.Header().Add("Content-Disposition", fmt.Sprintf("attachment; filename=%s_%s.xml", "terrform_xml", time.Now().Format("20060102150405"))) + c.Data(http.StatusOK, "application/octet-stream", result) +} diff --git a/terraform-server/api/v1/provider/provider.go b/terraform-server/api/v1/provider/provider.go new file mode 100644 index 00000000..527624d6 --- /dev/null +++ b/terraform-server/api/v1/provider/provider.go @@ -0,0 +1,131 @@ +package provider + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "net/http" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func ProviderList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.ProviderList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.ProviderTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func ProviderBatchCreate(c *gin.Context) { + var param []*models.ProviderTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + rowData, err := db.ProviderBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func ProviderBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.ProviderBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ProviderBatchUpdate(c *gin.Context) { + var param []*models.ProviderTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.ProviderBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ProviderPluginExport(c *gin.Context) { + providerId := c.Query("provider") + pluginId := c.Query("plugin") + if providerId == "" || pluginId == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Param provider and plugin can not emtpy ")) + return + } + result, err := db.ProviderPluginExport(strings.Split(providerId, ","), strings.Split(pluginId, ",")) + if err != nil { + middleware.ReturnServerHandleError(c, err) + return + } + b, err := json.Marshal(result) + if err != nil { + middleware.ReturnServerHandleError(c, fmt.Errorf("Export terrform config fail, json marshal object error:%s ", err.Error())) + return + } + c.Writer.Header().Add("Content-Disposition", fmt.Sprintf("attachment; filename=%s_%s.json", "terrform_config", time.Now().Format("20060102150405"))) + c.Data(http.StatusOK, "application/octet-stream", b) +} + +func ProviderPluginImport(c *gin.Context) { + file, err := c.FormFile("file") + if err != nil { + c.JSON(http.StatusInternalServerError, models.ResponseErrorJson{StatusCode: "PARAM_HANDLE_ERROR", StatusMessage: "Http read upload file fail:" + err.Error(), Data: nil}) + return + } + f, err := file.Open() + if err != nil { + c.JSON(http.StatusInternalServerError, models.ResponseErrorJson{StatusCode: "PARAM_HANDLE_ERROR", StatusMessage: "File open error:" + err.Error(), Data: nil}) + return + } + var paramObj models.ProviderPluginImportObj + b, err := ioutil.ReadAll(f) + defer f.Close() + if err != nil { + c.JSON(http.StatusInternalServerError, models.ResponseErrorJson{StatusCode: "PARAM_HANDLE_ERROR", StatusMessage: "Read content fail error:" + err.Error(), Data: nil}) + return + } + err = json.Unmarshal(b, ¶mObj) + if err != nil { + c.JSON(http.StatusInternalServerError, models.ResponseErrorJson{StatusCode: "PARAM_HANDLE_ERROR", StatusMessage: "Json unmarshal fail error:" + err.Error(), Data: nil}) + return + } + err = db.ProviderPluginImport(paramObj, middleware.GetRequestUser(c)) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } +} diff --git a/terraform-server/api/v1/provider/provider_info.go b/terraform-server/api/v1/provider/provider_info.go new file mode 100644 index 00000000..0cb0bb50 --- /dev/null +++ b/terraform-server/api/v1/provider/provider_info.go @@ -0,0 +1,104 @@ +package provider + +import ( + "fmt" + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/cipher" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func ProviderInfoList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.ProviderInfoList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.ProviderInfoQuery{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func ProviderInfoBatchCreate(c *gin.Context) { + var param []*models.ProviderInfoTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + for i := range param { + enCodeSecretId, encodeErr := cipher.AesEnPasswordByGuid(models.PGuid, models.Config.Auth.PasswordSeed, param[i].SecretId, "") + if encodeErr != nil { + err = fmt.Errorf("Try to encode secretId fail,%s ", encodeErr.Error()) + return + } + enCodeSecretKey, encodeErr := cipher.AesEnPasswordByGuid(models.PGuid, models.Config.Auth.PasswordSeed, param[i].SecretKey, "") + if encodeErr != nil { + err = fmt.Errorf("Try to encode secretKey fail,%s ", encodeErr.Error()) + return + } + param[i].SecretId = enCodeSecretId + param[i].SecretKey = enCodeSecretKey + } + rowData, err := db.ProviderInfoBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func ProviderInfoBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.ProviderInfoBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ProviderInfoBatchUpdate(c *gin.Context) { + var param []*models.ProviderInfoTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + for i := range param { + enCodeSecretId, encodeErr := cipher.AesEnPasswordByGuid(models.PGuid, models.Config.Auth.PasswordSeed, param[i].SecretId, "") + if encodeErr != nil { + err = fmt.Errorf("Try to encode secretId fail,%s ", encodeErr.Error()) + return + } + enCodeSecretKey, encodeErr := cipher.AesEnPasswordByGuid(models.PGuid, models.Config.Auth.PasswordSeed, param[i].SecretKey, "") + if encodeErr != nil { + err = fmt.Errorf("Try to encode secretKey fail,%s ", encodeErr.Error()) + return + } + param[i].SecretId = enCodeSecretId + param[i].SecretKey = enCodeSecretKey + } + err = db.ProviderInfoBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} diff --git a/terraform-server/api/v1/provider/provider_template_value.go b/terraform-server/api/v1/provider/provider_template_value.go new file mode 100644 index 00000000..53275490 --- /dev/null +++ b/terraform-server/api/v1/provider/provider_template_value.go @@ -0,0 +1,94 @@ +package provider + +import ( + "fmt" + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func ProviderTemplateValueList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.ProviderTemplateValueList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.ProviderTemplateValueTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func ProviderTemplateValueBatchCreate(c *gin.Context) { + var param []*models.ProviderTemplateValueTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + // rowData, err := db.ProviderTemplateValueBatchCreate(user, param) + rowData, err := db.ProviderTemplateValueBatchCreateUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func ProviderTemplateValueBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.ProviderTemplateValueBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ProviderTemplateValueBatchUpdate(c *gin.Context) { + var param []*models.ProviderTemplateValueTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.ProviderTemplateValueBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ProviderTemplateValueListByTemplate(c *gin.Context) { + templateId := c.Param("templateId") + if templateId == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param templateId can not be empty")) + return + } + rowData, err := db.ProviderTemplateValueListByTemplate(templateId) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TemplateValueQuery{} + } + middleware.ReturnData(c, rowData) + } + return +} \ No newline at end of file diff --git a/terraform-server/api/v1/resource_data/resource_data.go b/terraform-server/api/v1/resource_data/resource_data.go new file mode 100644 index 00000000..363ec04a --- /dev/null +++ b/terraform-server/api/v1/resource_data/resource_data.go @@ -0,0 +1,321 @@ +package resource_data + +import ( + "encoding/json" + "fmt" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" + "io/ioutil" + "net/http" + "reflect" + "strconv" + "strings" + "sync" +) + +func ResourceDataBatchCreate(c *gin.Context) { + var param []*models.ResourceDataTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + rowData, err := db.ResourceDataBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func ResourceDataList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.ResourceDataList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.ResourceDataTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func ResourceDataBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.ResourceDataBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func ResourceDataBatchUpdate(c *gin.Context) { + var param []*models.ResourceDataTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.ResourceDataBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TerraformOperation(c *gin.Context) { + rowData := models.PluginInterfaceResultObj{} + rowData.ResultCode = "0" + rowData.ResultMessage = "success" + defer func() { + if r := recover(); r != nil { + err := fmt.Errorf("TerraformOperation error: %v", r) + rowData.ResultCode = "1" + rowData.ResultMessage = err.Error() + c.JSON(http.StatusOK, rowData) + } + }() + + plugin := c.Param("plugin") + action := c.Param("action") + + if plugin == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param plugin can not be empty ")) + return + } + + if action == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param action can not be empty ")) + return + } + + var err error + bodyData, err := ioutil.ReadAll(c.Request.Body) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } + + var request_param map[string]interface{} + err = json.Unmarshal(bodyData, &request_param) + inputs := request_param["inputs"] + p := reflect.ValueOf(inputs) + params := []map[string]interface{}{} + for i := 0; i < p.Len(); i++ { + params = append(params, p.Index(i).Interface().(map[string]interface{})) + } + + // rowData := models.PluginInterfaceResultObj{} + // rowData.ResultCode = "0" + // rowData.ResultMessage = "success" + var curProviderData = models.ProviderTable{Name: ""} + count := len(params) + resultChan := make(chan []map[string]interface{}, count) + var wg sync.WaitGroup + wg.Add(count) + for i := range params { + go func(i int) { + defer wg.Done() + + if _, ok := request_param["operator"]; ok { + params[i]["operator_user"] = request_param["operator"] + } else { + params[i]["operator_user"] = "system" + } + params[i]["requestId"] = request_param["requestId"].(string) + "_" + strconv.Itoa(i + 1) + params[i]["requestSn"] = strconv.Itoa(i + 1) + debugFileContent := []map[string]interface{}{} + retData, _ := db.TerraformOperation(plugin, action, params[i], &debugFileContent, &curProviderData) + if _, ok := retData["errorCode"]; ok && retData["errorCode"] != "0" { + rowData.ResultCode = "1" + rowData.ResultMessage = "fail" + } + curResultOutputs := []map[string]interface{}{} + // handle one input, many output + if v, ok := retData[models.TerraformOutPutPrefix]; ok { + tmpData, _ := json.Marshal(v) + var resultList []map[string]interface{} + json.Unmarshal(tmpData, &resultList) + for i := range resultList { + tmpRetData := make(map[string]interface{}) + tmpRetData["callbackParameter"] = retData["callbackParameter"] + tmpRetData["errorCode"] = retData["errorCode"] + tmpRetData["errorMessage"] = retData["errorMessage"] + for k, v := range resultList[i] { + tmpRetData[k] = v + } + // rowData.Results.Outputs = append(rowData.Results.Outputs, tmpRetData) + curResultOutputs = append(curResultOutputs, tmpRetData) + } + } else { + // rowData.Results.Outputs = append(rowData.Results.Outputs, retData) + curResultOutputs = append(curResultOutputs, retData) + } + resultChan<-curResultOutputs + }(i) + } + wg.Wait() + close(resultChan) + for i := range resultChan { + curRes := i + rowData.Results.Outputs = append(rowData.Results.Outputs, curRes...) + } + + // clear the workpath + /* + if curProviderData.Name != "" { + db.DelDir(models.Config.TerraformFilePath + curProviderData.Name) + } + */ + + c.JSON(http.StatusOK, rowData) + return +} + +// for resource_data_debug +func ResourceDataDebugList (c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + queryIds := strings.Split(trimIds, ",") + queryIdsStr := strings.Join(queryIds, "','") + rowData, err := db.ResourceDataDebugList(queryIdsStr) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.ResourceDataQuery{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func TerraformOperationDebug (c *gin.Context) { + rowData := models.PluginInterfaceResultObjDebug{} + rowData.StatusCode = "OK" + rowData.ResultCode = "0" + rowData.ResultMessage = "success" + defer func() { + if r := recover(); r != nil { + err := fmt.Errorf("TerraformOperationDebug error: %v", r) + rowData.ResultCode = "1" + rowData.ResultMessage = err.Error() + c.JSON(http.StatusOK, rowData) + } + }() + + plugin := c.Param("plugin") + action := c.Param("action") + + if plugin == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param plugin can not be empty ")) + return + } + + if action == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param action can not be empty ")) + return + } + + var err error + bodyData, err := ioutil.ReadAll(c.Request.Body) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } + + var request_param map[string]interface{} + err = json.Unmarshal(bodyData, &request_param) + inputs := request_param["inputs"] + p := reflect.ValueOf(inputs) + params := []map[string]interface{}{} + for i := 0; i < p.Len(); i++ { + params = append(params, p.Index(i).Interface().(map[string]interface{})) + } + + // rowData := models.PluginInterfaceResultObjDebug{} + // rowData.StatusCode = "OK" + // rowData.ResultCode = "0" + // rowData.ResultMessage = "success" + var curProviderData = models.ProviderTable{Name: ""} + count := len(params) + resultChan := make(chan map[string]interface{}, count) + var wg sync.WaitGroup + wg.Add(count) + for i := range params { + go func(i int) { + defer wg.Done() + + if _, ok := request_param["operator"]; ok { + params[i]["operator_user"] = request_param["operator"] + } else { + params[i]["operator_user"] = "system" + } + params[i]["requestId"] = request_param["requestId"].(string) + "_" + strconv.Itoa(i+1) + params[i]["requestSn"] = strconv.Itoa(i + 1) + params[i][models.ResourceDataDebug] = true + debugFileContent := []map[string]interface{}{} + retData, _ := db.TerraformOperation(plugin, action, params[i], &debugFileContent, &curProviderData) + if _, ok := retData["errorCode"]; ok && retData["errorCode"] != "0" { + rowData.ResultCode = "1" + rowData.ResultMessage = "fail" + } + + // handle one input, many output + curResultOutputs := []map[string]interface{}{} + if v, ok := retData[models.TerraformOutPutPrefix]; ok { + tmpData, _ := json.Marshal(v) + var resultList []map[string]interface{} + json.Unmarshal(tmpData, &resultList) + for i := range resultList { + tmpRetData := make(map[string]interface{}) + tmpRetData["callbackParameter"] = retData["callbackParameter"] + tmpRetData["errorCode"] = retData["errorCode"] + tmpRetData["errorMessage"] = retData["errorMessage"] + for k, v := range resultList[i] { + tmpRetData[k] = v + } + curResultOutputs = append(curResultOutputs, tmpRetData) + } + if len(resultList) == 0 { + delete(retData, models.TerraformOutPutPrefix) + curResultOutputs = append(curResultOutputs, retData) + } + } else { + curResultOutputs = append(curResultOutputs, retData) + } + curCombineResult := make(map[string]interface{}) + curCombineResult["result_data"] = curResultOutputs + + curCombineResult["resource_results"] = debugFileContent + resultChan<-curCombineResult + }(i) + + // rowData.Results.Outputs = append(rowData.Results.Outputs, curCombineResult) + } + wg.Wait() + close(resultChan) + for i := range resultChan { + curRes := i + rowData.Results.Outputs = append(rowData.Results.Outputs, curRes) + } + + // c.JSON(http.StatusOK, rowData) + tmpRetVal, _ := json.Marshal(rowData) + c.Data(http.StatusOK, "application/json", tmpRetVal) + return +} \ No newline at end of file diff --git a/terraform-server/api/v1/source/source.go b/terraform-server/api/v1/source/source.go new file mode 100644 index 00000000..4218445b --- /dev/null +++ b/terraform-server/api/v1/source/source.go @@ -0,0 +1,85 @@ +package source + +import ( + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func SourceList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + + interfaceId := c.Query("interfaceId") + if interfaceId != "" { + paramsMap["interface"] = interfaceId + } + + providerId := c.Query("providerId") + if providerId != "" { + paramsMap["provider"] = providerId + } + + rowData, err := db.SourceList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.SourceTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func SourceBatchCreate(c *gin.Context) { + var param []*models.SourceTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + rowData, err := db.SourceBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func SourceBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.SourceBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func SourceBatchUpdate(c *gin.Context) { + var param []*models.SourceTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.SourceBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} diff --git a/terraform-server/api/v1/template/template.go b/terraform-server/api/v1/template/template.go new file mode 100644 index 00000000..645f00ff --- /dev/null +++ b/terraform-server/api/v1/template/template.go @@ -0,0 +1,93 @@ +package template + +import ( + "fmt" + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func TemplateList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.TemplateList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TemplateTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func TemplateBatchCreate(c *gin.Context) { + var param []*models.TemplateTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + rowData, err := db.TemplateBatchCreate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func TemplateBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.TemplateBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TemplateBatchUpdate(c *gin.Context) { + var param []*models.TemplateTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.TemplateBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TemplateListByPlugin(c *gin.Context) { + pluginId := c.Param("pluginId") + if pluginId == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param pluginId can not be empty")) + return + } + rowData, err := db.TemplateListByPlugin(pluginId) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TemplateTable{} + } + middleware.ReturnData(c, rowData) + } + return +} \ No newline at end of file diff --git a/terraform-server/api/v1/template/template_value.go b/terraform-server/api/v1/template/template_value.go new file mode 100644 index 00000000..d5553f85 --- /dev/null +++ b/terraform-server/api/v1/template/template_value.go @@ -0,0 +1,94 @@ +package template + +import ( + "fmt" + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func TemplateValueList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + rowData, err := db.TemplateValueList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TemplateValueTable{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func TemplateValueBatchCreate(c *gin.Context) { + var param []*models.TemplateValueTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + // rowData, err := db.TemplateValueBatchCreate(user, param) + rowData, err := db.TemplateValueBatchCreateUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func TemplateValueBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.TemplateValueBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TemplateValueBatchUpdate(c *gin.Context) { + var param []*models.TemplateValueTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.TemplateValueBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TemplateValueListByParameter(c *gin.Context) { + parameterId := c.Param("parameterId") + if parameterId == "" { + middleware.ReturnParamValidateError(c, fmt.Errorf("Url param pluginId can not be empty")) + return + } + rowData, err := db.TemplateValueListByParameter(parameterId) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TemplateValueTable{} + } + middleware.ReturnData(c, rowData) + } + return +} \ No newline at end of file diff --git a/terraform-server/api/v1/tf_argument/tf_argument.go b/terraform-server/api/v1/tf_argument/tf_argument.go new file mode 100644 index 00000000..dc034f61 --- /dev/null +++ b/terraform-server/api/v1/tf_argument/tf_argument.go @@ -0,0 +1,79 @@ +package tf_argument + +import ( + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func TfArgumentList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + sourceId := c.Query("sourceId") + if sourceId != "" { + paramsMap["source"] = sourceId + } + rowData, err := db.TfArgumentList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TfArgumentQuery{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func TfArgumentBatchCreate(c *gin.Context) { + var param []*models.TfArgumentTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + // rowData, err := db.TfArgumentBatchCreate(user, param) + rowData, err := db.TfArgumentBatchCreateUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func TfArgumentBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.TfArgumentBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TfArgumentBatchUpdate(c *gin.Context) { + var param []*models.TfArgumentTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.TfArgumentBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} diff --git a/terraform-server/api/v1/tfstate_attribute/tfstate_attribute.go b/terraform-server/api/v1/tfstate_attribute/tfstate_attribute.go new file mode 100644 index 00000000..c615f649 --- /dev/null +++ b/terraform-server/api/v1/tfstate_attribute/tfstate_attribute.go @@ -0,0 +1,79 @@ +package tfstate_attribute + +import ( + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/middleware" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" + "github.com/gin-gonic/gin" +) + +func TfstateAttributeList(c *gin.Context) { + paramsMap := make(map[string]interface{}) + sourceId := c.Query("sourceId") + if sourceId != "" { + paramsMap["source"] = sourceId + } + rowData, err := db.TfstateAttributeList(paramsMap) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + if len(rowData) == 0 { + rowData = []*models.TfstateAttributeQuery{} + } + middleware.ReturnData(c, rowData) + } + return +} + +func TfstateAttributeBatchCreate(c *gin.Context) { + var param []*models.TfstateAttributeTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + // rowData, err := db.TfstateAttributeBatchCreate(user, param) + rowData, err := db.TfstateAttributeBatchCreateUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnData(c, rowData) + } +} + +func TfstateAttributeBatchDelete(c *gin.Context) { + ids := c.Query("ids") + trimIds := strings.Trim(ids, ",") + param := strings.Split(trimIds, ",") + if len(param) == 0 { + middleware.ReturnParamEmptyError(c, "ids") + return + } + err := db.TfstateAttributeBatchDelete(param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} + +func TfstateAttributeBatchUpdate(c *gin.Context) { + var param []*models.TfstateAttributeTable + var err error + if err = c.ShouldBindJSON(¶m); err != nil { + middleware.ReturnParamValidateError(c, err) + return + } + user := middleware.GetRequestUser(c) + err = db.TfstateAttributeBatchUpdate(user, param) + if err != nil { + middleware.ReturnServerHandleError(c, err) + } else { + middleware.ReturnSuccess(c) + } + return +} diff --git a/terraform-server/common-lib/cipher/password.go b/terraform-server/common-lib/cipher/password.go new file mode 100644 index 00000000..5ae8beed --- /dev/null +++ b/terraform-server/common-lib/cipher/password.go @@ -0,0 +1,153 @@ +package cipher + +import ( + "bytes" + "crypto/aes" + "crypto/cipher" + "crypto/md5" + "encoding/hex" + "fmt" + "math/rand" + "strings" + "time" +) + +var ( + passwordLength = 12 + digitalBytes = []byte("0123456789") + lettersBytes = []byte("abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ") + DEFALT_CIPHER = "CIPHER_A" + CIPHER_MAP = map[string]string{"CIPHER_A": "{cipher_a}"} + // CIPHER_MAP = map[string]string{"CIPHER_A": "a"} +) + +func Md5Encode(rawData string) string { + data := []byte(rawData) + return fmt.Sprintf("%x", md5.Sum(data)) +} + +func PKCS7Padding(ciphertext []byte, blockSize int) []byte { + padding := blockSize - len(ciphertext)%blockSize + padtext := bytes.Repeat([]byte{byte(padding)}, padding) + return append(ciphertext, padtext...) +} + +func PKCS7UnPadding(origData []byte) []byte { + length := len(origData) + unpadding := int(origData[length-1]) + if length > unpadding { + return origData[:(length - unpadding)] + } + return []byte{} +} + +func AesEncode(key string, rawData string) (string, error) { + bytesRawKey := []byte(key) + block, err := aes.NewCipher(bytesRawKey) + if err != nil { + return "", err + } + blockSize := block.BlockSize() + origData := PKCS7Padding([]byte(rawData), blockSize) + blockMode := cipher.NewCBCEncrypter(block, bytesRawKey[:blockSize]) + crypted := make([]byte, len([]byte(origData))) + blockMode.CryptBlocks(crypted, origData) + return hex.EncodeToString(crypted), nil +} + +func AesDecode(key string, encryptData string) (password string, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + + bytesRawKey := []byte(key) + bytesRawData, _ := hex.DecodeString(encryptData) + block, err := aes.NewCipher(bytesRawKey) + if err != nil { + return + } + blockSize := block.BlockSize() + blockMode := cipher.NewCBCDecrypter(block, bytesRawKey[:blockSize]) + origData := make([]byte, len(bytesRawData)) + blockMode.CryptBlocks(origData, bytesRawData) + origData = PKCS7UnPadding(origData) + if len(origData) == 0 { + err = fmt.Errorf("password wrong") + return + } + + password = string(origData) + return +} + +func CreateRandomPassword() string { + var result []byte + r := rand.New(rand.NewSource(time.Now().UnixNano())) + for i := 0; i < passwordLength-4; i++ { + result = append(result, lettersBytes[r.Intn(len(lettersBytes))]) + } + for i := 0; i < 4; i++ { + result = append(result, digitalBytes[r.Intn(len(digitalBytes))]) + } + return string(result) +} + +func AesEnPassword(seed, password string) (string, error) { + md5sum := Md5Encode(seed) + enPassword, err := AesEncode(md5sum[0:16], password) + if err != nil { + return "", err + } + return enPassword, nil +} + +func AesDePassword(seed, password string) (string, error) { + md5sum := Md5Encode(seed) + dePassword, err := AesDecode(md5sum[0:16], password) + if err != nil { + return "", err + } + return dePassword, nil +} + +func AesEnPasswordByGuid(guid, seed, password, cipher string) (string, error) { + if seed == "" { + return password, nil + } + for _, _cipher := range CIPHER_MAP { + if strings.HasPrefix(password, _cipher) { + return password, nil + } + } + if cipher == "" { + cipher = DEFALT_CIPHER + } + md5sum := Md5Encode(guid + seed) + enPassword, err := AesEncode(md5sum[0:16], password) + if err != nil { + return "", err + } + return CIPHER_MAP[cipher] + enPassword, nil +} + +func AesDePasswordByGuid(guid, seed, password string) (string, error) { + var cipher string + for _, _cipher := range CIPHER_MAP { + if strings.HasPrefix(password, _cipher) { + cipher = _cipher + break + } + } + if cipher == "" { + return password, nil + } + password = password[len(cipher):] + md5sum := Md5Encode(guid + seed) + dePassword, err := AesDecode(md5sum[0:16], password) + if err != nil { + return "", err + } + return dePassword, nil +} \ No newline at end of file diff --git a/terraform-server/common-lib/cipher/rsa.go b/terraform-server/common-lib/cipher/rsa.go new file mode 100644 index 00000000..c1e45f18 --- /dev/null +++ b/terraform-server/common-lib/cipher/rsa.go @@ -0,0 +1,110 @@ +package cipher + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/x509" + "encoding/base64" + "encoding/pem" + "fmt" + "io/ioutil" + "log" + "math/big" + "strings" +) + +func DecryptRsa(inputString, rsaPemPath string) string { + if !strings.HasPrefix(strings.ToLower(inputString), "rsa@") { + return inputString + } + inputString = inputString[4:] + result := inputString + inputBytes, err := base64.StdEncoding.DecodeString(inputString) + if err != nil { + log.Printf("Input string format to base64 fail,%s \n", err.Error()) + return inputString + } + fileContent, err := ioutil.ReadFile(rsaPemPath) + if err != nil { + log.Printf("Read file %s fail,%s \n", rsaPemPath, err.Error()) + return result + } + block, _ := pem.Decode(fileContent) + privateKeyInterface, err := x509.ParsePKCS8PrivateKey(block.Bytes) + if err != nil { + log.Printf("Parse private key fail,%s \n", err.Error()) + return result + } + privateKey := privateKeyInterface.(*rsa.PrivateKey) + decodeBytes, err := rsa.DecryptPKCS1v15(rand.Reader, privateKey, inputBytes) + if err != nil { + log.Printf("Decode fail,%s \n", err.Error()) + return result + } + result = string(decodeBytes) + return result +} + +func RSAEncryptByPrivate(orgidata []byte, privatekey string) ([]byte, error) { + decodeBytes, err := base64.StdEncoding.DecodeString(privatekey) + if err != nil { + return nil, fmt.Errorf("RSASign private key is bad") + } + + privInterface, err := x509.ParsePKCS8PrivateKey(decodeBytes) + if err != nil { + return nil, err + } + + priv := privInterface.(*rsa.PrivateKey) + + k := (priv.N.BitLen() + 7) / 8 + tLen := len(orgidata) + em := make([]byte, k) + em[1] = 1 + for i := 2; i < k-tLen-1; i++ { + em[i] = 0xff + } + copy(em[k-tLen:k], orgidata) + c := new(big.Int).SetBytes(em) + if c.Cmp(priv.N) > 0 { + return nil, nil + } + var m *big.Int + var ir *big.Int + if priv.Precomputed.Dp == nil { + m = new(big.Int).Exp(c, priv.D, priv.N) + } else { + // We have the precalculated values needed for the CRT. + m = new(big.Int).Exp(c, priv.Precomputed.Dp, priv.Primes[0]) + m2 := new(big.Int).Exp(c, priv.Precomputed.Dq, priv.Primes[1]) + m.Sub(m, m2) + if m.Sign() < 0 { + m.Add(m, priv.Primes[0]) + } + m.Mul(m, priv.Precomputed.Qinv) + m.Mod(m, priv.Primes[0]) + m.Mul(m, priv.Primes[1]) + m.Add(m, m2) + + for i, values := range priv.Precomputed.CRTValues { + prime := priv.Primes[2+i] + m2.Exp(c, values.Exp, prime) + m2.Sub(m2, m) + m2.Mul(m2, values.Coeff) + m2.Mod(m2, prime) + if m2.Sign() < 0 { + m2.Add(m2, prime) + } + m2.Mul(m2, values.R) + m.Add(m, m2) + } + } + + if ir != nil { + // Unblind. + m.Mul(m, ir) + m.Mod(m, priv.N) + } + return m.Bytes(), nil +} diff --git a/terraform-server/common-lib/guid/rand.go b/terraform-server/common-lib/guid/rand.go new file mode 100644 index 00000000..67cc117e --- /dev/null +++ b/terraform-server/common-lib/guid/rand.go @@ -0,0 +1,37 @@ +package guid + +import ( + "crypto/rand" + "fmt" + "sort" + "time" +) + +func CreateGuid() string { + b := make([]byte, 16) + rand.Read(b) + return fmt.Sprintf("%x%x", uint32(time.Now().Unix()), b[4:]) +} + +func CreateGuidList(num int) []string { + var guidList guidSortList + for i := 0; i < num; i++ { + guidList = append(guidList, CreateGuid()) + } + sort.Sort(guidList) + return guidList +} + +type guidSortList []string + +func (l guidSortList) Len() int { + return len(l) +} + +func (l guidSortList) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l guidSortList) Less(i, j int) bool { + return l[i] < l[j] +} diff --git a/terraform-server/common-lib/logger/zap.go b/terraform-server/common-lib/logger/zap.go new file mode 100644 index 00000000..d5d893cd --- /dev/null +++ b/terraform-server/common-lib/logger/zap.go @@ -0,0 +1,60 @@ +package logger + +import ( + "go.uber.org/zap" + "go.uber.org/zap/zapcore" + "gopkg.in/natefinch/lumberjack.v2" + "strings" + "time" +) + +var levelStringList = []string{"debug", "info", "warn", "error"} + +type LogConfig struct { + Name string + FilePath string + LogLevel string + ArchiveMaxSize int + ArchiveMaxBackup int + ArchiveMaxDay int + Compress bool +} + +func InitArchiveZapLogger(config LogConfig) *zap.Logger { + config.LogLevel = strings.ToLower(config.LogLevel) + var level int + for i, v := range levelStringList { + if v == config.LogLevel { + level = i - 1 + break + } + } + zapLevel := zap.NewAtomicLevel() + zapLevel.SetLevel(zapcore.Level(level)) + hook := lumberjack.Logger{ + Filename: config.FilePath, + MaxSize: config.ArchiveMaxSize, + MaxBackups: config.ArchiveMaxBackup, + MaxAge: config.ArchiveMaxDay, + Compress: config.Compress, + } + encoderConfig := zapcore.EncoderConfig{ + TimeKey: "time", + LevelKey: "level", + NameKey: "logger", + CallerKey: "caller", + MessageKey: "msg", + StacktraceKey: "stacktrace", + LineEnding: zapcore.DefaultLineEnding, + EncodeLevel: zapcore.LowercaseLevelEncoder, + EncodeTime: func(t time.Time, enc zapcore.PrimitiveArrayEncoder) { + enc.AppendString(t.Format("2006-01-02 15:04:05")) + }, + EncodeDuration: zapcore.SecondsDurationEncoder, + EncodeCaller: zapcore.ShortCallerEncoder, + } + core := zapcore.NewCore(zapcore.NewJSONEncoder(encoderConfig), zapcore.NewMultiWriteSyncer(zapcore.AddSync(&hook)), zapLevel) + zapLogger := zap.New(core, zap.AddCaller(), zap.Development()) + zapLogger.Info("Success init " + config.Name + " log !!") + return zapLogger +} diff --git a/terraform-server/common-lib/token/wecube.go b/terraform-server/common-lib/token/wecube.go new file mode 100644 index 00000000..c2567786 --- /dev/null +++ b/terraform-server/common-lib/token/wecube.go @@ -0,0 +1,225 @@ +package token + +import ( + "bytes" + "encoding/base64" + "encoding/json" + "fmt" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/cipher" + "github.com/dgrijalva/jwt-go" + "io/ioutil" + "log" + "net/http" + "strconv" + "strings" + "time" +) + +var ( + coreRefreshToken string + coreRefreshTokenExpTime time.Time + coreRequestToken string + coreRequestTokenExpTime time.Time + requestCoreNonce = "monitor" + defaultSignedKey = "Platform+Auth+Server+Secret" +) + +type requestToken struct { + Password string `json:"password"` + Username string `json:"username"` + Nonce string `json:"nonce"` + ClientType string `json:"clientType"` +} + +type responseObj struct { + Status string `json:"status"` + Message string `json:"message"` + Data []*responseDataObj `json:"data"` +} + +type responseDataObj struct { + Expiration string `json:"expiration"` + Token string `json:"token"` + TokenType string `json:"tokenType"` +} + +type CoreToken struct { + BaseUrl string + JwtSigningKey string + SubSystemCode string + SubSystemKey string +} + +func (c *CoreToken) refreshToken() error { + req, err := http.NewRequest(http.MethodGet, c.BaseUrl+"/auth/v1/api/token", strings.NewReader("")) + if err != nil { + return fmt.Errorf("http new request fail,%s ", err.Error()) + } + req.Header.Set("Authorization", coreRefreshToken) + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("http response fail,%s ", err.Error()) + } + var respObj responseObj + bodyBytes, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + err = json.Unmarshal(bodyBytes, &respObj) + if err != nil { + return fmt.Errorf("http response body json unmarshal fail,%s ", err.Error()) + } + for _, v := range respObj.Data { + if len(v.Expiration) > 10 { + v.Expiration = v.Expiration[:10] + } + expInt, _ := strconv.ParseInt(v.Expiration, 10, 64) + if v.TokenType == "refreshToken" { + coreRefreshToken = "Bearer " + v.Token + coreRefreshTokenExpTime = time.Unix(expInt, 0) + } + if v.TokenType == "accessToken" { + coreRequestToken = "Bearer " + v.Token + coreRequestTokenExpTime = time.Unix(expInt, 0) + } + } + return nil +} + +func (c *CoreToken) requestCoreToken(rsaKey string) error { + encryptBytes, err := cipher.RSAEncryptByPrivate([]byte(fmt.Sprintf("%s:%s", c.SubSystemCode, requestCoreNonce)), rsaKey) + encryptString := base64.StdEncoding.EncodeToString(encryptBytes) + if err != nil { + return err + } + postParam := requestToken{Username: c.SubSystemCode, Nonce: requestCoreNonce, ClientType: "SUB_SYSTEM", Password: encryptString} + postBytes, _ := json.Marshal(postParam) + fmt.Printf("param: %s \n", string(postBytes)) + req, err := http.NewRequest(http.MethodPost, c.BaseUrl+"/auth/v1/api/login", bytes.NewReader(postBytes)) + if err != nil { + return fmt.Errorf("http new request fail,%s ", err.Error()) + } + resp, err := http.DefaultClient.Do(req) + if err != nil { + return fmt.Errorf("http response fail, %s ", err.Error()) + } + var respObj responseObj + bodyBytes, _ := ioutil.ReadAll(resp.Body) + resp.Body.Close() + err = json.Unmarshal(bodyBytes, &respObj) + if err != nil { + return fmt.Errorf("http response body read fail,%s ", err.Error()) + } + for _, v := range respObj.Data { + if len(v.Expiration) > 10 { + v.Expiration = v.Expiration[:10] + } + expInt, _ := strconv.ParseInt(v.Expiration, 10, 64) + if v.TokenType == "refreshToken" { + coreRefreshToken = "Bearer " + v.Token + coreRefreshTokenExpTime = time.Unix(expInt, 0) + } + if v.TokenType == "accessToken" { + coreRequestToken = "Bearer " + v.Token + coreRequestTokenExpTime = time.Unix(expInt, 0) + } + } + return nil +} + +func (c *CoreToken) InitCoreToken() { + err := c.requestCoreToken(c.SubSystemKey) + if err != nil { + log.Printf("Init core token fail,error: %s ", err.Error()) + } else { + log.Println("Init core token success") + } +} + +func (c *CoreToken) GetCoreToken() string { + if c.BaseUrl == "" { + return "" + } + if coreRequestTokenExpTime.Unix() > time.Now().Unix() && coreRequestToken != "" { + return coreRequestToken + } + if coreRefreshTokenExpTime.Unix() > time.Now().Unix() && coreRefreshToken != "" { + err := c.refreshToken() + if err != nil { + log.Printf("Refresh token fail,%s ", err.Error()) + } else { + return coreRequestToken + } + } + err := c.requestCoreToken(c.SubSystemKey) + if err != nil { + log.Printf("Try to init core token fail,%s ", err.Error()) + } + return coreRefreshToken +} + +type JwtToken struct { + User string `json:"user"` + Expire int64 `json:"expire"` + Roles []string `json:"roles"` +} + +func DecodeJwtToken(token, key string) (result JwtToken, err error) { + if strings.HasPrefix(token, "Bearer") { + token = token[7:] + } + if key == "" || strings.HasPrefix(key, "{{") { + key = defaultSignedKey + } + keyBytes, err := ioutil.ReadAll(base64.NewDecoder(base64.RawStdEncoding, bytes.NewBufferString(key))) + if err != nil { + return result, fmt.Errorf("Decode core token fail,base64 decode error,%s ", err.Error()) + } + pToken, err := jwt.Parse(token, func(*jwt.Token) (interface{}, error) { + return keyBytes, nil + }) + if err != nil { + return result, fmt.Errorf("Decode core token fail,jwt parse error,%s ", err.Error()) + } + claimMap, ok := pToken.Claims.(jwt.MapClaims) + if !ok { + return result, fmt.Errorf("Decode core token fail,claims to map error,%s ", err.Error()) + } + result.User = fmt.Sprintf("%s", claimMap["sub"]) + result.Expire, err = strconv.ParseInt(fmt.Sprintf("%.0f", claimMap["exp"]), 10, 64) + if err != nil { + return result, fmt.Errorf("Decode core token fail,parse expire to int64 error,%s ", err.Error()) + } + roleListString := fmt.Sprintf("%s", claimMap["authority"]) + roleListString = roleListString[1 : len(roleListString)-1] + if strings.Contains(roleListString, ",") { + result.Roles = strings.Split(roleListString, ",") + } else if strings.Contains(roleListString, " ") { + result.Roles = strings.Split(roleListString, " ") + } else { + result.Roles = []string{roleListString} + } + return result, nil +} + +type AuthClaims struct { + Authority []string `json:"authority"` + jwt.StandardClaims +} + +func CreateJwtToken(user, key string, expire int64, permission []string) (signedToken string, err error) { + var keyBytes []byte + if key == "" || strings.HasPrefix(key, "{{") { + key = defaultSignedKey + } + keyBytes, err = ioutil.ReadAll(base64.NewDecoder(base64.RawStdEncoding, bytes.NewBufferString("Platform+Auth+Server+Secret"))) + if err != nil { + err = fmt.Errorf("Create token error with decode sign key:%s \n", err.Error()) + return + } + newClaims := AuthClaims{StandardClaims: jwt.StandardClaims{ExpiresAt: expire, Subject: user}, Authority: permission} + newToken := jwt.NewWithClaims(jwt.SigningMethodHS256, newClaims) + signedToken, err = newToken.SignedString(keyBytes) + if err != nil { + err = fmt.Errorf("Create token error with sign in key:%s \n", err.Error()) + } + return +} diff --git a/terraform-server/common/log/log.go b/terraform-server/common/log/log.go new file mode 100644 index 00000000..ecc54c21 --- /dev/null +++ b/terraform-server/common/log/log.go @@ -0,0 +1,87 @@ +package log + +import ( + "encoding/json" + "fmt" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/logger" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "go.uber.org/zap" + "strings" +) + +var ( + Logger *zap.Logger + AccessLogger *zap.Logger + DatabaseLogger *zap.Logger +) + +func InitLogger() { + baseLogDir := models.Config.Log.LogDir + if strings.HasSuffix(models.Config.Log.LogDir, "/") { + baseLogDir = baseLogDir[:len(baseLogDir)-1] + } + Logger = logger.InitArchiveZapLogger(logger.LogConfig{ + Name: "server", + FilePath: fmt.Sprintf("%s/terraform.log", baseLogDir), + LogLevel: models.Config.Log.Level, + ArchiveMaxSize: models.Config.Log.ArchiveMaxSize, + ArchiveMaxBackup: models.Config.Log.ArchiveMaxBackup, + ArchiveMaxDay: models.Config.Log.ArchiveMaxDay, + Compress: models.Config.Log.Compress, + }) + if models.Config.Log.AccessLogEnable { + AccessLogger = logger.InitArchiveZapLogger(logger.LogConfig{ + Name: "access", + FilePath: fmt.Sprintf("%s/terraform-access.log", baseLogDir), + LogLevel: models.Config.Log.Level, + ArchiveMaxSize: models.Config.Log.ArchiveMaxSize, + ArchiveMaxBackup: models.Config.Log.ArchiveMaxBackup, + ArchiveMaxDay: models.Config.Log.ArchiveMaxDay, + Compress: models.Config.Log.Compress, + }) + } + if models.Config.Log.DbLogEnable { + DatabaseLogger = logger.InitArchiveZapLogger(logger.LogConfig{ + Name: "database", + FilePath: fmt.Sprintf("%s/terraform-db.log", baseLogDir), + LogLevel: models.Config.Log.Level, + ArchiveMaxSize: models.Config.Log.ArchiveMaxSize, + ArchiveMaxBackup: models.Config.Log.ArchiveMaxBackup, + ArchiveMaxDay: models.Config.Log.ArchiveMaxDay, + Compress: models.Config.Log.Compress, + }) + } +} + +func Error(err error) zap.Field { + return zap.Error(err) +} + +func String(k, v string) zap.Field { + return zap.String(k, v) +} + +func Int(k string, v int) zap.Field { + return zap.Int(k, v) +} + +func Int64(k string, v int64) zap.Field { + return zap.Int64(k, v) +} + +func Float64(k string, v float64) zap.Field { + return zap.Float64(k, v) +} + +func JsonObj(k string, v interface{}) zap.Field { + b, err := json.Marshal(v) + if err == nil { + return zap.String(k, string(b)) + } else { + return zap.Error(err) + } +} + +func StringList(k string, v []string) zap.Field { + return zap.Strings(k, v) +} diff --git a/terraform-server/conf/default.json b/terraform-server/conf/default.json new file mode 100644 index 00000000..c6aadb43 --- /dev/null +++ b/terraform-server/conf/default.json @@ -0,0 +1,41 @@ +{ + "default_language": "en", + "http_server": { + "port": "8999", + "cross": true + }, + "log": { + "level": "debug", + "log_dir": "logs", + "access_log_enable": true, + "db_log_enable": true, + "archive_max_size": 64, + "archive_max_backup": 10, + "archive_max_day": 15, + "compress": true + }, + "database": { + "server": "127.0.0.1", + "port": "3306", + "user": "root", + "password": "root", + "database": "terraform", + "maxOpen": 50, + "maxIdle": 10, + "timeout": 60 + }, + "rsa_key_path": "/data/certs/rsa_key", + "wecube": { + "base_url": "", + "jwt_signing_key": "", + "sub_system_code": "", + "sub_system_key": "" + }, + "auth": { + "password_seed": "" + }, + "terraform_file_path": "/data/terraform/", + "terraform_cmd_path": "/usr/local/bin/terraform", + "terraform_provider_os_arch": "linux_amd64", + "version": "" +} diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/CHANGELOG.md b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/CHANGELOG.md new file mode 100644 index 00000000..63242f2e --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/CHANGELOG.md @@ -0,0 +1,1740 @@ +## 1.56.16 (Unreleased) + +## 1.56.15 (July 07, 2021) + +BUG FIXES + +* Resource `tencentcloud_tc_kubernetes_cluster` filter the request field of *bandwidth_package_id* when it is null +* Resource `tencentcloud_tc_kubernetes_node_pool` filter the request field of *bandwidth_package_id* when it is null + +## 1.56.14 (July 06, 2021) + +BUG FIXES + +* Resource `tencentcloud_tc_clb_listener` exec the plan will lead the resource rebuild. + +ENHANCEMENTS: + +* Resource `tencentcloud_elasticsearch_instance` create **ES** cluster add new parametes of *web_node_type_info*. +* Resource `tencentcloud_tc_instance` add *instance_count* to support create multiple consecutive name of instance +* Resource `tencentcloud_tc_kubernetes_cluster` supports change *internet_max_bandwidth_out* +* Resource `tencentcloud_tc_instance` create cvm instance add *bandwidth_package_id* + + +## 1.56.13 (July 02, 2021) + +BUG FIXES + +* Resource `TkeCvmCreateInfo.data_disk.disk_type` support CLOUD_HSSD and CLOUD_TSSD + +## 1.56.12 (July 02, 2021) + +BUG FIXES + +* Resource `TkeCvmCreateInfo.data_disk.disk_type` support CLOUD_HSSD + +## 1.56.11 + +BUG FIXES + +* Resource `tencentcloud_kubernetes_cluster` fix create cluster without *desired_pod_num* in tf, then crash +* Resource `tencentcloud_kubernetes_cluster` fix when upgrade terraform-provider-tencentclod from v1.56.1 to newer, cluster_os force replacement +* Resource `tencentcloud_kubernetes_cluster` fix when upgrade terraform-provider-tencentclod from v1.56.1 to newer, enable_customized_pod_cidr force replace + +## 1.56.10 + +BUG FIXES + +* Resource `tencentcloud_tcr_namespace` fix create two namespace and one name is substring of another, then got an error about more than 1 +* Resource `tencentcloud_tcr_namespace` fix create two repositories and one name is substring of another, then got an error about more than 1 + + +## 1.56.9 (Jun 09, 2021) + +BUG FIXES: + +* Resource `tencentcloud_instance` fix words spell, in tencendcloud/resource_tc_instance.go L45, data.tencentcloud_availability_zones.my_favorate_zones.zones.0.name change to data.tencentcloud_availability_zones.my_favorite_zones.zones.0.name". +* Resource `tencentcloud_kubernetes_clusters` fix the description of is_non_static_ip_mode + +ENHANCEMENTS: + +* Resource `tencentcloud_clb_target_group` add create target group. +* Resource `tencentcloud_clb_instance` add internal CLB supports security group. +* Resource `tencentcloud_clb_instance` add supports open and close CLB security group, default is open. +* Resource `tencentcloud_clb_instance` add external CLB create multi AZ instance. +* Resource `tencentcloud_kubernetes_cluster` add supports params of img_id to assign image. +* Resource `tencentcloud_as_scaling_group` add MultiZoneSubnetPolicy. + +## 1.56.8 (May 26, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_cluster_attachment.worker_config` add `desired_pod_num`. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `worker_config_overrides`. +* Resource `tencentcloud_kubernetes_scale_worker` add `desired_pod_num`. +* Resource `tencentcloud_kubernetes_cluster` add `enable_customized_pod_cidr`, `base_pod_num`, `globe_desired_pod_num`, and `exist_instance`. +* Resource `tencentcloud_kubernetes_cluster` update available value of `cluster_os`. +* Resource `tencentcloud_as_lifecycle_hook` update `heartbeat_timeout` value ranges. + +## 1.56.7 (May 12, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_as_scaling_config` add `disk_type_policy`. +* Data Source `tencentcloud_as_scaling_configs` add `disk_type_policy` as result. + +## 1.56.6 (May 7, 2021) + +BUG FIXES: + +* Resource: `tencentcloud_scf_function` filed `cls_logset_id` and `cls_logset_id` change to Computed. + +## 1.56.5 (April 26, 2021) + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_cluster` upgrade cluster timeout from 3 to 9 minutes. + +## 1.56.4 (April 26, 2021) + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_cluster` upgrade instances timeout depend on instance number. + +## 1.56.3 (April 25, 2021) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add `upgrade_instances_follow_cluster` for upgrade all instances of cluster. + +## 1.56.2 (April 19, 2021) + +BUG FIXES: + +* Remove `ResourceInsufficient` from `retryableErrorCode`. + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` upgrade `cluster_version` will send old `cluster_extra_args` to tke. + +## 1.56.1 (April 6,2021) + +BUG FIXES: + +* Fix release permission denied. + +## 1.56.0 (April 2,2021) + +FEATURES: + +* **New Resource**: `tencentcloud_cdh_instance` +* **New Data Source**: `tencentcloud_cdh_instances` + +ENHANCEMENTS: + +* Resource: `tencentcloud_instance` add `cdh_instance_type` and `cdh_host_id` to support create instance based on cdh. + +## 1.55.2 (March 29, 2021) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add `node_pool_global_config` to support node pool global config setting. + +## 1.55.1 (March 26, 2021) + +ENHANCEMENTS: + +* Resource: `tencentcloud_tcr_vpc_attachment` add more time for retry. + +## 1.55.0 (March 26, 2021) + +FEATURES: + +* **New Resource**: `tencentcloud_ssm_secret` +* **New Resource**: `tencentcloud_ssm_secret_version` +* **New Data Source**: `tencentcloud_ssm_secrets` +* **New Data Source**: `tencentcloud_ssm_secret_versions` + +ENHANCEMENTS: + +* Resource: `tencentcloud_ssl_certificate` refactor logic with api3.0 . +* Data Source: `tencentcloud_ssl_certificates` refactor logic with api3.0 . +* Resource `tencentcloud_kubernetes_cluster` add `disaster_recover_group_ids` to set disaster recover group ID. +* Resource `tencentcloud_kubernetes_scale_worker` add `disaster_recover_group_ids` to set disaster recover group ID. + +## 1.54.1 (March 24, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_tcr_vpc_attachment` add `enable_public_domain_dns`, `enable_vpc_domain_dns` to set whether to enable dns. +* Data Source `tencentcloud_tcr_vpc_attachments` add `enable_public_domain_dns`, `enable_vpc_domain_dns`. + +## 1.54.0 (March 22, 2021) + +FEATURES: + +* **New Resource**: `tencentcloud_kms_key` +* **New Resource**: `tencentcloud_kms_external_key` +* **New Data Source**: `tencentcloud_kms_keys` + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_cluster_attachment` add `unschedulable` to set whether the joining node participates in the schedule. +* Resource `tencentcloud_kubernetes_cluster` add `unschedulable` to set whether the joining node participates in the schedule. +* Resource `tencentcloud_kubernetes_node_pool` add `unschedulable` to set whether the joining node participates in the schedule. +* Resource `tencentcloud_kubernetes_scale_worker` add `unschedulable` to set whether the joining node participates in the schedule. + +## 1.53.9 (March 19, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_tcr_instance` add `open_public_network` to control public network access. +* Resource `tencentcloud_cfs_file_system` add `storage_type` to change file service StorageType. + +## 1.53.8 (March 15, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_instance` add `cam_role_name` to support binding role to cvm instance. + +BUG FIXES: + +* Resource `tencentcloud_instance` fix bug that waiting 5 minutes when cloud disk sold out. +* Resource: `tencentcloud_tcr_instance` fix bug that only one tag is effective when setting multiple tags. + +## 1.53.7 (March 10, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_node_pool` add `internet_max_bandwidth_out`, `public_ip_assigned` to support internet traffic setting. +* Resource `tencentcloud_instance` remove limit of `data_disk_size`. + +## 1.53.6 (March 09, 2021) + +ENHANCEMENTS: +* Resource `tencentcloud_eip` support `internet_max_bandwidth_out` modification. +* Resource `tencentcloud_kubernetes_cluster` add `hostname` to support node hostname setting. +* Resource `tencentcloud_kubernetes_scale_worker` add `hostname` to support node hostname setting. + +## 1.53.5 (March 01, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_clb_instance` add `internet_charge_type`, `internet_bandwidth_max_out` to support internet traffic setting with OPEN CLB instance. +* Resource `tencentcloud_clb_rule` add `http2_switch` to support HTTP2 protocol setting. +* Resource `tencentcloud_kubernetes_cluster` add `lan_ip` to show node LAN IP. +* Resource `tencentcloud_kubernetes_scale_worker` add `lan_ip` to show node LAN IP. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `state` to show node state. +* Resource `tencentcloud_clb_rule` support certificate modifying. +* Data Source `tencentcloud_clb_instances` add `internet_charge_type`, `internet_bandwidth_max_out`. +* Data Source `tencentcloud_clb_rules` add `http2_switch`. + +BUG FIXES: + +* Resource: `tencentcloud_clb_attachment` fix bug that attach more than 20 targets will failed. + +## 1.53.4 (February 08, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_scale_worker` add `data_disk`, `docker_graph_path` to support advanced instance setting. +* Resource `tencentcloud_instance` add tags to the disks created with the instance. + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_cluster_attachment` fix bug that only one extra argument set successfully. +* Resource: `tencentcloud_as_scaling_policy` fix bug that missing required parameters error happened when update metric parameters. + +## 1.53.3 (February 02, 2021) + +ENHANCEMENTS: + +* Data Source `tencentcloud_cbs_storages` add `throughput_performance` to support adding extra performance to the cbs resources. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `hostname` to support setting hostname with the attached instance. + +## 1.53.2 (February 01, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_cbs_storage` add `throughput_performance` to support adding extra performance to the cbs resources. + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket` fix bug that error happens when applying unsupported logging region. +* Resource: `tencentcloud_as_scaling_policy` fix bug that missing required parameters error happened when update metric parameters. + +## 1.53.1 (January 23, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_instance` add `throughput_performance` to support adding extra performance to the data disks. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `file_system`, `auto_format_and_mount` and `mount_target` to support advanced instance setting. +* Resource `tencentcloud_kubernetes_node_pool` add `file_system`, `auto_format_and_mount` and `mount_target` to support advanced instance setting. +* Resource `tencentcloud_kubernetes_node_pool` add `scaling_mode` to support scaling mode setting. +* Resource `tencentcloud_kubernetes` support version upgrade. + +BUG FIXES: + +* Resource: `tencentcloud_gaap_http_rule` fix bug that exception happens when create more than one rule. + +## 1.53.0 (January 15, 2021) + +FEATURES: + +* **New Resource**: `tencentcloud_ssl_pay_certificate` to support ssl pay certificate. + +ENHANCEMENTS: + +* Resource `tencentcloud_ccn` add `charge_type` to support billing mode setting. +* Resource `tencentcloud_ccn` add `bandwidth_limit_type` to support the speed limit type setting. +* Resource `tencentcloud_ccn_bandwidth_limit` add `dst_region` to support destination area restriction setting. +* Resource `tencentcloud_cdn_domain` add `range_origin_switch` to support range back to source configuration. +* Resource `tencentcloud_cdn_domain` add `rule_cache` to support advanced path cache configuration. +* Resource `tencentcloud_cdn_domain` add `request_header` to support request header configuration. +* Data Source `tencentcloud_ccn_instances` add `charge_type` to support billing mode. +* Data Source `tencentcloud_ccn_instances` add `bandwidth_limit_type` to support the speed limit type. +* Data Source `tencentcloud_ccn_bandwidth_limit` add `dst_region` to support destination area restriction. +* Data Source `tencentcloud_cdn_domains` add `range_origin_switch` to support range back to source configuration. +* Data Source `tencentcloud_cdn_domains` add `rule_cache` to support advanced path cache configuration. +* Data Source `tencentcloud_cdn_domains` add `request_header` to support request header configuration. + +## 1.52.0 (December 28, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_kubernetes_node_pool` to support node management. + +DEPRECATED: + +* Resource: `tencentcloud_kubernetes_as_scaling_group` replaced by `tencentcloud_kubernetes_node_pool`. + +## 1.51.1 (December 22, 2020) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_cluster_attachment` add `extra_args` to support node extra arguments setting. +* Resource `tencentcloud_cos_bucket` add `log_enbale`, `log_target_bucket` and `log_prefix` to support log status setting. + +## 1.51.0 (December 15, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_tcr_vpc_attachment` +* **New Data Source**: `tencentcloud_tcr_vpc_attachments` + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` support `name`, `project_id` and `description` modification. +* Doc: optimize document. + +## 1.50.0 (December 08, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_address_template` +* **New Resource**: `tencentcloud_address_template_group` +* **New Resource**: `tencentcloud_protocol_template` +* **New Resource**: `tencentcloud_protocol_template_group` +* **New Data Source**: `tencentcloud_address_templates` +* **New Data Source**: `tencentcloud_address_template_groups` +* **New Data Source**: `tencentcloud_protocol_templates` +* **New Data Source**: `tencentcloud_protocol_template_groups` + +ENHANCEMENTS: + +* Resource `tencentcloud_sercurity_group_rule` add `address_template` and `protocol_template` to support building new security group rule with resource `tencentcloud_address_template` and `tencentcloud_protocol_template`. +* Doc: optimize document directory. + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket` fix bucket name validator. + +## 1.49.1 (December 01, 2020) + +ENHANCEMENTS: + +* Doc: Update directory of document. + +## 1.49.0 (November 27, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_tcr_instance` +* **New Resource**: `tencentcloud_tcr_token` +* **New Resource**: `tencentcloud_tcr_namespace` +* **New Resource**: `tencentcloud_tcr_repository` +* **New Data Source**: `tencentcloud_tcr_instances` +* **New Data Source**: `tencentcloud_tcr_tokens` +* **New Data Source**: `tencentcloud_tcr_namespaces` +* **New Data Source**: `tencentcloud_tcr_repositories` +* **New Resource**: `tencentcloud_cos_bucket_policy` + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_as_scaling_group` support `max_size` and `min_size` modification. + +## 1.48.0 (November 20, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_sqlserver_basic_instance` +* **New Data Source**: `tencentcloud_sqlserver_basic_instances` + +ENHANCEMENTS: + +* Resource: `tencentcloud_clb_listener` support configure HTTP health check for TCP listener([#539](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/539)). +* Resource: `tencentcloud_clb_listener` add computed argument `target_type`. +* Data Source: `tencentcloud_clb_listeners` support getting HTTP health check config for TCP listener. + +DEPRECATED: +* Resource: `tencentcloud_clb_target_group_attachment`: optional argument `targrt_group_id` is no longer supported, replace by `target_group_id`. + +## 1.47.0 (November 13, 2020) + +ENHANCEMENTS: +* Resource: `tencentcloud_clb_listener` support import. +* Resource: `tencentcloud_clb_listener` add computed argument `listener_id`. +* Resource: `tencentcloud_clb_listener_rule` support import. +* Resource: `tencentcloud_cdn_domain` add example that use COS bucket url as origin. +* Resource: `tencentcloud_sqlserver_instance` add new argument `tags`. +* Resource: `tencentcloud_sqlserver_readonly_instance` add new argument `tags`. +* Resource: `tencentcloud_elasticsearch_instance` support `node_type` and `disk_size` modification. +* Data Source: `tencentcloud_instance_types` add argument `exclude_sold_out` to support filtering sold out instance types. +* Data Source: `tencentcloud_sqlserver_instances` add new argument `tags`. +* Data Source: `tencentcloud_instance_types` add argument `exclude_sold_out` to support filtering sold out instance types. + +BUG FIXES: + +* Resource: `tencentcloud_elasticsearch_instance` fix inconsistent bug. +* Resource: `tencentcloud_redis_instance` fix incorrect number when updating `mem_size`. +* Data Source: `tencentcloud_redis_instances` fix incorrect number for `mem_size`. + +## 1.46.4 (November 6, 2020) + +BUG FIXES: +* Resource: `tencentcloud_kubernetes_cluster` fix force replacement when updating `docker_graph_path`. + +## 1.46.3 (November 6, 2020) + +ENHANCEMENTS: +* Resource: `tencentcloud_kubernetes_cluster` add more values with argument `cluster_os` to support linux OS system. + +## 1.46.2 (November 5, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add new argument `kube_config`. +* Resource: `tencentcloud_kubernetes_cluster` add value `tlinux2.4x86_64` with argument `cluster_os` to support linux OS system. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `mount_target` to support set disk mount path. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `docker_graph_path` to support set docker graph path. +* Resource: `tencentcloud_clb_redirection` add new argument `delete_all_auto_rewirte` to delete all auto-associated redirection when destroying the resource. +* Resource: `tencentcloud_kubernetes_scale_worker` add new argument `labels` to support scale worker labels. +* Data Source: `tencentcloud_kubernetes_clusters` add new argument `kube_config`. +* Data Source: `tencentcloud_availability_regions` support getting local region info by setting argument `name` with value `default`. +* Docs: update argument description. + +BUG FIXES: + +* Resource: `tencentcloud_clb_redirection` fix inconsistent bug when creating more than one auto redirection. +* Resource: `tencentcloud_redis_instance` fix updating issue when redis `type_id` is set `5`. + +## 1.46.1 (October 29, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_cos_bucket` add new argument `cos_bucket_url`. +* Resource: `tencentcloud_nat_gateway` add new argument `tags`. +* Resource: `tencentcloud_postgresql_instance` add new argument `tags`. +* Data Source: `tencentcloud_cos_buckets` add new argument `cos_bucket_url`. +* Data Source: `tencentcloud_nat_gateways` add new argument `tags`. +* Data Source: `tencentcloud_postgresql_instances` add new argument `tags`. + +## 1.46.0 (October 26, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_api_gateway_api` +* **New Resource**: `tencentcloud_api_gateway_service` +* **New Resource**: `tencentcloud_api_gateway_custom_domain` +* **New Resource**: `tencentcloud_api_gateway_usage_plan` +* **New Resource**: `tencentcloud_api_gateway_usage_plan_attachment` +* **New Resource**: `tencentcloud_api_gateway_ip_strategy` +* **New Resource**: `tencentcloud_api_gateway_strategy_attachment` +* **New Resource**: `tencentcloud_api_gateway_api_key` +* **New Resource**: `tencentcloud_api_gateway_api_key_attachment` +* **New Resource**: `tencentcloud_api_gateway_service_release` +* **New Data Source**: `tencentcloud_api_gateway_apis` +* **New Data Source**: `tencentcloud_api_gateway_services` +* **New Data Source**: `tencentcloud_api_gateway_throttling_apis` +* **New Data Source**: `tencentcloud_api_gateway_throttling_services` +* **New Data Source**: `tencentcloud_api_gateway_usage_plans` +* **New Data Source**: `tencentcloud_api_gateway_ip_strategies` +* **New Data Source**: `tencentcloud_api_gateway_customer_domains` +* **New Data Source**: `tencentcloud_api_gateway_usage_plan_environments` +* **New Data Source**: `tencentcloud_api_gateway_api_keys` + +## 1.45.3 (October 21, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_sqlserver_instance` Fix the error of releasing associated resources when destroying sqlserver postpaid instance. +* Resource: `tencentcloud_sqlserver_readonly_instance` Fix the bug that the instance cannot be recycled when destroying sqlserver postpaid instance. +* Resource: `tencentcloud_clb_instance` fix force new when updating tags. +* Resource: `tencentcloud_redis_backup_config` fix doc issues. +* Resource: `tencentcloud_instance` fix `keep_image_login` force new issue when updating terraform version. +* Resource: `tencentcloud_clb_instance` fix tag creation bug. + +## 1.45.2 (October 19, 2020) + +BUG FIXES: +* Resource: `tencentcloud_mysql_instance` fix creating prepaid instance error. + +## 1.45.1 (October 16, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_clb_target_group_instance_attachment` update doc. +* Resource: `tencentcloud_clb_target_group_attachment` update doc. + +## 1.45.0 (October 15, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_clb_target_group_attachment` +* **New Resource**: `tencentcloud_clb_target_group` +* **New Resource**: `tencentcloud_clb_target_group_instance_attachment` +* **New Resource**: `tencentcloud_sqlserver_publish_subscribe` +* **New Resource**: `tencentcloud_vod_adaptive_dynamic_streaming_template` +* **New Resource**: `tencentcloud_vod_procedure_template` +* **New Resource**: `tencentcloud_vod_snapshot_by_time_offset_template` +* **New Resource**: `tencentcloud_vod_image_sprite_template` +* **New Resource**: `tencentcloud_vod_super_player_config` +* **New Data Source**: `tencentcloud_clb_target_groups` +* **New Data Source**: `tencentcloud_sqlserver_publish_subscribes` +* **New Data Source**: `tencentcloud_vod_adaptive_dynamic_streaming_templates` +* **New Data Source**: `tencentcloud_vod_image_sprite_templates` +* **New Data Source**: `tencentcloud_vod_procedure_templates` +* **New Data Source**: `tencentcloud_vod_snapshot_by_time_offset_templates` +* **New Data Source**: `tencentcloud_vod_super_player_configs` + +ENHANCEMENTS: + +* Resource: `tencentcloud_clb_listener_rule` add new argument `target_type` to support backend target type with rule. +* Resource: `tencentcloud_mysql_instance` modify argument `engine_version` to support mysql 8.0. +* Resource: `tencentcloud_clb_listener_rule` add new argument `forward_type` to support backend protocol([#522](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/522)). +* Resource: `tencentcloud_instance` add new argument `keep_image_login` to support keeping image login. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `extra_args` to support Kubelet. +* Resource: `tencentcloud_kubernetes_scale_worker` add new argument `extra_args` to support Kubelet. +* Resource: `tencentcloud_kubernetes_as_scaling_group` add new argument `extra_args` to support Kubelet. + +## 1.44.0 (September 25, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_cynosdb_cluster` +* **New Resource**: `tencentcloud_cynosdb_readonly_instance`. +* **New Data Source**: `tencentcloud_cynosdb_clusters` +* **New Data Source**: `tencentcloud_cynosdb_readonly_instances`. + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_standby_instance` change example type to `POSTPAID`. +* Resource: `tencentcloud_instance` add new argument `encrypt` to support data disk with encrypt. +* Resource: `tencentcloud_elasticsearch` add new argument `encrypt` to support disk with encrypt. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `cam_role_name` to support authorization with instances. + +## 1.43.0 (September 18, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_image` +* **New Resource**: `tencentcloud_audit` +* **New Data Source**: `tencentcloud_audits` +* **New Data Source**: `tencentcloud_audit_cos_regions` +* **New Data Source**: `tencentcloud_audit_key_alias` + +ENHANCEMENTS: + +* Resource: `tencentcloud_instance` add new argument `data_disk_snapshot_id` to support data disk with `SnapshotId`([#469](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/469)) +* Data Source: `tencentcloud_instances` support filter by tags. + +## 1.42.2 (September 14, 2020) + +BUG FIXES: +* Resource: `tencentcloud_instance` fix `key_name` update error([#515](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/515)). + +## 1.42.1 (September 10, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_mongodb_instance` Fix the error of releasing associated resources when destroying mongodb postpaid instance. +* Resource: `tencentcloud_mongodb_sharding_instance` Fix the error of releasing associated resources when destroying mongodb postpaid sharding instance. +* Resource: `tencentcloud_mongodb_standby_instance` Fix the error of releasing associated resources when destroying mongodb postpaid standby instance. + +## 1.42.0 (September 8, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_ckafka_topic` +* **New Data Source**: `tencentcloud_ckafka_topics` + +ENHANCEMENTS: + +* Doc: optimize document directory. +* Resource: `tencentcloud_mongodb_instance`, `tencentcloud_mongodb_sharding_instance` and `tencentcloud_mongodb_standby_instance` remove system reserved tag `project`. + +## 1.41.3 (September 3, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_vpc_acl_attachment` perfect example field `subnet_ids` to `subnet_id`([#505](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/505)). +* Resource: `tencentcloud_cbs_storage_attachment` support import. +* Resource: `tencentcloud_eip_association` support import. +* Resource: `tencentcloud_route_table_entry` support import. +* Resource: `tencentcloud_acl_attachment` support import. + +## 1.41.2 (August 28, 2020) + +BUG FIXES: +* Resource: `tencentcloud_vpn_connection` fix `security_group_policy` update issue when apply repeatedly. +* Resource: `tencentcloud_vpn_connection` fix inconsistent state when deleted on console. + +## 1.41.1 (August 27, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_vpn_gateway` fix force new issue when apply repeatedly. +* Resource: `tencentcloud_vpn_connection` fix force new issue when apply repeatedly. +* Resource: `tencentcloud_instance` support for adjusting `internet_max_bandwidth_out` without forceNew when attribute `internet_charge_type` within `TRAFFIC_POSTPAID_BY_HOUR`,`BANDWIDTH_POSTPAID_BY_HOUR`,`BANDWIDTH_PACKAGE` ([#498](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/498)). + +## 1.41.0 (August 17, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_sqlserver_instance` +* **New Resource**: `tencentcloud_sqlserver_readonly_instance` +* **New Resource**: `tencentcloud_sqlserver_db` +* **New Resource**: `tencentcloud_sqlserver_account` +* **New Resource**: `tencentcloud_sqlserver_db_account_attachment` +* **New Resource**: `tencentcloud_vpc_acl` +* **New Resource**: `tencentcloud_vpc_acl_attachment` +* **New Resource**: `tencentcloud_ckafka_acl` +* **New Resource**: `tencentcloud_ckafka_user` +* **New Data Source**: `tencentcloud_sqlserver_instance` +* **New Data Source**: `tencentcloud_sqlserver_readonly_groups` +* **New Data Source**: `tencentcloud_vpc_acls` +* **New Data Source**: `tencentcloud_ckafka_acls` +* **New Data Source**: `tencentcloud_ckafka_users` + +DEPRECATED: + +* Data Source: `tencentcloud_cdn_domains` optional argument `offset` is no longer supported. + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_instance`, `tencentcloud_mongodb_sharding_instance` and `tencentcloud_mongodb_standby_instance` remove spec update validation. + +## 1.40.3 (August 11, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_kubernetes_clusters`add new attributes `cluster_as_enabled`,`node_name_type`,`cluster_extra_args`,`network_type`,`is_non_static_ip_mode`,`kube_proxy_mode`,`service_cidr`,`eni_subnet_ids`,`claim_expired_seconds` and `deletion_protection`. + +BUG FIXES: + +* Resource: `tencentcloud_vpn_gateway` fix creation of instance when `vpc_id` is specified. +* Resource: `tencentcloud_vpn_connection` fix creation of instance when `vpc_id` is specified. +* Resource: `tencentcloud_instance` fix `internet_charge_type` inconsistency when public ip is not allocated. + +## 1.40.2 (August 08, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix accidentally fail to delete prepaid instance ([#485](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/485)). + +## 1.40.1 (August 05, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_vpn_connection` fix mulit `security_group_policy` is not supported ([#487](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/487)). + +## 1.40.0 (July 31, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_mongodb_standby_instance` + +ENHANCEMENTS: + +* Resource: `tencentcloud_gaap_http_rule` argument `realservers` now is optional. +* Resource: `tencentcloud_kubernetes_cluster` supports multiple `availability_zone`. +* Data Source: `tencentcloud_mongodb_instances` add new argument `charge_type` and `auto_renew_flag` to support prepaid type. +* Resource: `tencentcloud_mongodb_instance` supports prepaid type, new mongodb SDK version `2019-07-25` and standby instance. +* Resource: `tencentcloud_mongodb_sharding_instance` supports prepaid type, new mongodb SDK version `2019-07-25` and standby instance. +* Resource: `tencentcloud_security_group_lite_rule` refine update process and doc. + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix set `key_name` error. + +## 1.39.0 (July 18, 2020) + +ENHANCEMENTS: + +* upgrade terraform 0.13 +* update readme to new repository + +## 1.38.3 (July 13, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_images` supports list of snapshots. +* Resource: `tencentcloud_kubernetes_cluster_attachment` add new argument `worker_config` to support config with existing instances. +* Resource: `tencentcloud_ccn` add new argument `tags` to support tags settings. +* Resource: `tencentcloud_cfs_file_system` add new argument `tags` to support tags settings. + +BUG FIXES: + +* Resource: `tencentcloud_gaap_layer4_listener` fix error InvalidParameter when destroy resource. +* Resource: `tencentcloud_gaap_layer7_listener` fix error InvalidParameter when destroy resource. +* Resource: `tencentcloud_cdn_domain` fix incorrect setting `server_certificate_config`, `client_certificate_config` caused the program to crash. + +## 1.38.2 (July 03, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix `allocate_public_ip` inconsistency when eip is attached to the cvm. +* Resource: `tencentcloud_mysql_instance` fix auto-forcenew on `charge_type` and `pay_type` when upgrading terraform version. ([#459](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/pull/459)). + +## 1.38.1 (June 30, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket` fix creation failure. + +## 1.38.0 (June 29, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_cdn_domains` + +BUG FIXES: + +* Resource: `tencentcloud_gaap_http_domain` fix a condition for setting client certificate ids([#454](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/pull/454)). + +## 1.37.0 (June 23, 2020) + +FEATURES: +* **New Resource**: `tencentcloud_postgresql_instance` +* **New Data Source**: `tencentcloud_postgresql_instances` +* **New Data Source**: `tencentcloud_postgresql_speccodes` +* **New Data Source**: `tencentcloud_sqlserver_zone_config` + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_instance` support more machine type. + +## 1.36.1 (June 12, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add new argument `labels`. +* Resource: `tencentcloud_kubernetes_as_scaling_group` add new argument `labels`. +* Resource: `tencentcloud_cos_bucket` add new arguments `encryption_algorithm` and `versioning_enable`. + +## 1.36.0 (June 08, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_availability_regions` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_redis_instances` add new argument `charge_type` to support prepaid type. +* Resource: `tencentcloud_redis_instance` add new argument `charge_type`, `prepaid_period` and `force_delete` to support prepaid type. +* Resource: `tencentcloud_mysql_instance` add new argument `force_delete` to support soft deletion. +* Resource: `tencentcloud_mysql_readonly_instance` add new argument `force_delete` to support soft deletion. + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix `allocate_public_ip` inconsistency when eip is attached to the cvm. + +DEPRECATED: +* Data Source: `tencentcloud_mysql_instances`: optional argument `pay_type` is no longer supported, replace by `charge_type`. +* Resource: `tencentcloud_mysql_instance`: optional arguments `pay_type` and `period` are no longer supported, replace by `charge_type` and `prepaid_period`. +* Resource: `tencentcloud_mysql_readonly_instance`: optional arguments `pay_type` and `period` are no longer supported, replace by `charge_type` and `prepaid_period`. +* Resource: `tencentcloud_tcaplus_group` replace by `tencentcloud_tcaplus_tablegroup` +* Data Source: `tencentcloud_tcaplus_groups` replace by `tencentcloud_tcaplus_tablegroups` +* Resource: `tencentcloud_tcaplus_tablegroup`,`tencentcloud_tcaplus_idl` and `tencentcloud_tcaplus_table` arguments `group_id`/`group_name` replace by `tablegroup_id`/`tablegroup_name` +* Data Source: `tencentcloud_tcaplus_groups`,`tencentcloud_tcaplus_idls` and `tencentcloud_tcaplus_tables` arguments `group_id`/`group_name` replace by `tablegroup_id`/`tablegroup_name` + +## 1.35.1 (June 02, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_as_scaling_config`, `tencentcloud_eip` and `tencentcloud_kubernetes_cluster` remove the validate function of `internet_max_bandwidth_out`. +* Resource: `tencentcloud_vpn_gateway` update available value of `bandwidth`. + +## 1.35.0 (June 01, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_elasticsearch_instances` +* **New Resource**: `tencentcloud_elasticsearch_instance` + +## 1.34.0 (May 28, 2020) + +ENHANCEMENTS: + +* upgrade terraform-plugin-sdk + +## 1.33.2 (May 25, 2020) + +DEPRECATED: +* Data Source: `tencentcloud_tcaplus_applications` replace by `tencentcloud_tcaplus_clusters`,optional arguments `app_id` and `app_name` are no longer supported, replace by `cluster_id` and `cluster_name` +* Data Source: `tencentcloud_tcaplus_zones` replace by `tencentcloud_tcaplus_groups`,optional arguments `app_id`,`zone_id` and `zone_name` are no longer supported, replace by `cluster_id`,`group_id` and `cluster_name` +* Data Source: `tencentcloud_tcaplus_tables` optional arguments `app_id` and `zone_id` are no longer supported, replace by `cluster_id` and `group_id` +* Data Source: `tencentcloud_tcaplus_idls`: optional argument `app_id` is no longer supported, replace by `cluster_id`. +* Resource: `tencentcloud_tcaplus_application` replace by `tencentcloud_tcaplus_cluster`,input argument `app_name` is no longer supported, replace by `cluster_name` +* Resource: `tencentcloud_tcaplus_zone` replace by `tencentcloud_tcaplus_group`, input arguments `app_id` and `zone_name` are no longer supported, replace by `cluster_id` and `group_name` +* Resource: `tencentcloud_tcaplus_idl` input arguments `app_id` and `zone_id` are no longer supported, replace by `cluster_id` and `group_id` +* Resource: `tencentcloud_tcaplus_table` input arguments `app_id`and `zone_id` are no longer supported, replace by `cluster_id` and `group_id` +* Resource: `tencentcloud_redis_instance`: optional argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_instances`: output argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_zone_config`: output argument `type` is no longer supported, replace by `type_id`. + +## 1.33.1 (May 22, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_redis_instances` add new argument `type_id`, `redis_shard_num`, `redis_replicas_num` +* Data Source: `tencentcloud_redis_zone_config` add output argument `type_id` and new output argument `type_id`, `redis_shard_nums`, `redis_replicas_nums` +* Data Source: `tencentcloud_ccn_instances` add new type `VPNGW` for field `instance_type` +* Data Source: `tencentcloud_vpn_gateways` add new type `CCN` for field `type` +* Resource: `tencentcloud_redis_instance` add new argument `type_id`, `redis_shard_num`, `redis_replicas_num` +* Resource: `tencentcloud_ccn_attachment` add new type `CNN_INSTANCE_TYPE_VPNGW` for field `instance_type` +* Resource: `tencentcloud_vpn_gateway` add new type `CCN` for field `type` + +BUG FIXES: + +* Resource: `tencentcloud_cdn_domain` fix `https_config` inconsistency after apply([#413](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/413)). + +DEPRECATED: + +* Resource: `tencentcloud_redis_instance`: optional argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_instances`: output argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_zone_config`: output argument `type` is no longer supported, replace by `type_id`. + +## 1.33.0 (May 18, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_monitor_policy_conditions` +* **New Data Source**: `tencentcloud_monitor_data` +* **New Data Source**: `tencentcloud_monitor_product_event` +* **New Data Source**: `tencentcloud_monitor_binding_objects` +* **New Data Source**: `tencentcloud_monitor_policy_groups` +* **New Data Source**: `tencentcloud_monitor_product_namespace` +* **New Resource**: `tencentcloud_monitor_policy_group` +* **New Resource**: `tencentcloud_monitor_binding_object` +* **New Resource**: `tencentcloud_monitor_binding_receiver` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_instances` add new output argument `instance_charge_type_prepaid_renew_flag`. +* Data Source: `tencentcloud_cbs_storages` add new output argument `prepaid_renew_flag`. +* Data Source: `tencentcloud_cbs_storages` add new output argument `charge_type`. +* Resource: `tencentcloud_instance` support update with argument `instance_charge_type_prepaid_renew_flag`. +* Resource: `tencentcloud_cbs_storage` add new argument `force_delete`. +* Resource: `tencentcloud_cbs_storage` add new argument `charge_type`. +* Resource: `tencentcloud_cbs_storage` add new argument `prepaid_renew_flag`. +* Resource: `tencentcloud_cdn_domain` add new argument `full_url_cache`([#405](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/405)). + +DEPRECATED: + +* Resource: `tencentcloud_cbs_storage`: optional argument `period` is no longer supported. + +## 1.32.1 (April 30, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_ccn_attachment` add new argument `ccn_uin`. +* Resource: `tencentcloud_instance` add new argument `force_delete`. + +BUG FIXES: + +* Resource: `tencentcloud_scf_function` fix update `zip_file`. + +## 1.32.0 (April 20, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_kubernetes_cluster_attachment`([#285](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/285)). + +ENHANCEMENTS: + +* Resource: `tencentcloud_cdn_domain` add new attribute `cname`([#395](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/395)). + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket_object` mark the object as destroyed when the object not exist. + +## 1.31.2 (April 17, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_cbs_storage` support modify `tags`. + +## 1.31.1 (April 14, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_keypair` fix bug when trying to destroy resources containing CVM and key pair([#375](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/375)). +* Resource: `tencentcloud_clb_attachment` fix bug when trying to destroy multiple attachments in the array. +* Resource: `tencentcloud_cam_group_membership` fix bug when trying to destroy multiple users in the array. + +ENHANCEMENTS: + +* Resource: `tencentcloud_mysql_account` add new argument `host`([#372](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/372)). +* Resource: `tencentcloud_mysql_account_privilege` add new argument `account_host`([#372](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/372)). +* Resource: `tencentcloud_mysql_privilege` add new argument `account_host`([#372](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/372)). +* Resource: `tencentcloud_mysql_readonly_instance` check master monitor data before create([#379](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/379)). +* Resource: `tencentcloud_tcaplus_application` remove the pull password from server. +* Resource: `tencentcloud_instance` support import `allocate_public_ip`([#382](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/382)). +* Resource: `tencentcloud_redis_instance` add two redis types. +* Data Source: `tencentcloud_vpc_instances` add new argument `cidr_block`,`tag_key` ([#378](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/378)). +* Data Source: `tencentcloud_vpc_route_tables` add new argument `tag_key`,`vpc_id`,`association_main` ([#378](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/378)). +* Data Source: `tencentcloud_vpc_subnets` add new argument `cidr_block`,`tag_key`,`is_remote_vpc_snat` ([#378](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/378)). +* Data Source: `tencentcloud_mysql_zone_config` and `tencentcloud_redis_zone_config` remove region check. + +## 1.31.0 (April 07, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_cdn_domain` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_cam_users` add new argument `user_id`. +* Resource: `tencentcloud_vpc` add retry logic. + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix timeout error when modify password. + +## 1.30.7 (March 31, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_as_scaling_group` set a value to argument `key_ids` cause error . + +## 1.30.6 (March 30, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_tcaplus_idl` add new argument `zone_id`. +* Resource: `tencentcloud_cam_user` add new argument `force_delete`.([#354](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/354)) +* Data Source: `tencentcloud_vpc_subnets` add new argument `vpc_id`. + +## 1.30.5 (March 19, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_key_pair` will be replaced when `public_key` contains comment. +* Resource: `tencentcloud_scf_function` upload local file error. + +ENHANCEMENTS: + +* Resource: `tencentcloud_scf_function` runtime support nodejs8.9 and nodejs10.15. + +## 1.30.4 (March 10, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_cam_policy` fix read nil issue when the resource is not exist.([#344](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/#344)). +* Resource: `tencentcloud_key_pair` will be replaced when the end of `public_key` contains spaces([#343](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/343)). +* Resource: `tencentcloud_scf_function` fix trigger does not support cos_region. + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add new attributes `cluster_os_type`,`cluster_internet`,`cluster_intranet`,`managed_cluster_internet_security_policies` and `cluster_intranet_subnet_id`. + + +## 1.30.3 (February 24, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix that classic network does not support([#339](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/339)). + +## 1.30.2 (February 17, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_cam_policies` add new attribute `policy_id`. +* Data Source: `tencentcloud_cam_groups` add new attribute `group_id`. + +## 1.30.1 (January 21, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_dnat` fix `elastic_port` and `internal_port` type error. +* Resource: `tencentcloud_vpn_gateway` fix `state` type error. +* Resource: `tencentcloud_dayu_ddos_policy` fix that `white_ips` and `black_ips` can not be updated. +* Resource: `tencentcloud_dayu_l4_rule` fix that rule parameters can not be updated. + +ENHANCEMENTS: + +* Data Source: `tencentcloud_key_pairs` support regular expression search by name. + +## 1.30.0 (January 14, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_dayu_cc_http_policies` +* **New Data Source**: `tencentcloud_dayu_cc_https_policies` +* **New Data Source**: `tencentcloud_dayu_ddos_policies` +* **New Data Source**: `tencentcloud_dayu_ddos_policy_attachments` +* **New Data Source**: `tencentcloud_dayu_ddos_policy_cases` +* **New Data Source**: `tencentcloud_dayu_l4_rules` +* **New Data Source**: `tencentcloud_dayu_l7_rules` +* **New Resource**: `tencentcloud_dayu_cc_http_policy` +* **New Resource**: `tencentcloud_dayu_cc_https_policy` +* **New Resource**: `tencentcloud_dayu_ddos_policy` +* **New Resource**: `tencentcloud_dayu_ddos_policy_attachment` +* **New Resource**: `tencentcloud_dayu_ddos_policy_case` +* **New Resource**: `tencentcloud_dayu_l4_rule` +* **New Resource**: `tencentcloud_dayu_l7_rule` + +BUG FIXES: + +* gaap: optimize gaap describe: when describe resource by id but get more than 1 resources, return error directly instead of using the first result +* Resource: `tencentcloud_eni_attachment` fix detach may failed. +* Resource: `tencentcloud_instance` remove the tag that be added by as attachment automatically([#300](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/300)). +* Resource: `tencentcloud_clb_listener` fix `sni_switch` type error([#297](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/297)). +* Resource: `tencentcloud_vpn_gateway` shows argument `prepaid_renew_flag` has changed when applied again([#298](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/298)). +* Resource: `tencentcloud_clb_instance` fix the bug that instance id is not set in state file([#303](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/303)). +* Resource: `tencentcloud_vpn_gateway` that is postpaid charge type cannot be deleted normally([#312](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/312)). +* Resource: `tencentcloud_vpn_gateway` add `InternalError` SDK error to triggering the retry process. +* Resource: `tencentcloud_vpn_gateway` fix read nil issue when the resource is not exist. +* Resource: `tencentcloud_clb_listener_rule` fix unclear error message of SSL type error. +* Resource: `tencentcloud_ha_vip_attachment` fix read nil issue when the resource is not exist. +* Data Source: `tencentcloud_security_group` fix `project_id` type error. +* Data Source: `tencentcloud_security_groups` fix `project_id` filter not works([#303](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/314)). + +## 1.29.0 (January 06, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_gaap_domain_error_pages` +* **New Resource**: `tencentcloud_gaap_domain_error_page` + +ENHANCEMENTS: +* Data Source: `tencentcloud_vpc_instances` add new optional argument `is_default`. +* Data Source: `tencentcloud_vpc_subnets` add new optional argument `availability_zone`,`is_default`. + +BUG FIXES: +* Resource: `tencentcloud_redis_instance` field security_groups are id list, not name list([#291](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/291)). + +## 1.28.0 (December 25, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cbs_snapshot_policies` +* **New Resource**: `tencentcloud_cbs_snapshot_policy_attachment` + +ENHANCEMENTS: + +* doc: rewrite website index +* Resource: `tencentcloud_instance` support modifying instance type([#251](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/251)). +* Resource: `tencentcloud_gaap_http_domain` add new optional argument `realserver_certificate_ids`. +* Data Source: `tencentcloud_gaap_http_domains` add new output argument `realserver_certificate_ids`. + +DEPRECATED: + +* Resource: `tencentcloud_gaap_http_domain`: optional argument `realserver_certificate_id` is no longer supported. +* Data Source: `tencentcloud_gaap_http_domains`: output argument `realserver_certificate_id` is no longer supported. + +## 1.27.0 (December 17, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_tcaplus_applications` +* **New Data Source**: `tencentcloud_tcaplus_zones` +* **New Data Source**: `tencentcloud_tcaplus_tables` +* **New Data Source**: `tencentcloud_tcaplus_idls` +* **New Resource**: `tencentcloud_tcaplus_application` +* **New Resource**: `tencentcloud_tcaplus_zone` +* **New Resource**: `tencentcloud_tcaplus_idl` +* **New Resource**: `tencentcloud_tcaplus_table` + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_instance` support more instance type([#241](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/241)). +* Resource: `tencentcloud_kubernetes_cluster` support more instance type([#237](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/237)). + +BUG FIXES: + +* Fix bug that resource `tencentcloud_instance` delete error when instance launch failed. +* Fix bug that resource `tencentcloud_security_group` read error when response is InternalError. +* Fix bug that the type of `cluster_type` is wrong in data source `tencentcloud_mongodb_instances`([#242](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/242)). +* Fix bug that resource `tencentcloud_eip` unattach error([#233](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/233)). +* Fix bug that terraform read nil attachment resource when the attached resource of attachment resource is removed of resource CLB and CAM. +* Fix doc example error of resource `tencentcloud_nat_gateway`. + +DEPRECATED: + +* Resource: `tencentcloud_eip`: optional argument `applicable_for_clb` is no longer supported. + +## 1.26.0 (December 09, 2019) + +FEATURES: + +* **New Resource**: `tencentcloud_mysql_privilege`([#223](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/223)). +* **New Resource**: `tencentcloud_kubernetes_as_scaling_group`([#202](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/202)). + +ENHANCEMENTS: + +* Resource: `tencentcloud_gaap_layer4_listener` support import. +* Resource: `tencentcloud_gaap_http_rule` support import. +* Resource: `tencentcloud_gaap_security_rule` support import. +* Resource: `tencentcloud_gaap_http_domain` add new optional argument `client_certificate_ids`. +* Resource: `tencentcloud_gaap_layer7_listener` add new optional argument `client_certificate_ids`. +* Data Source: `tencentcloud_gaap_http_domains` add new output argument `client_certificate_ids`. +* Data Source: `tencentcloud_gaap_layer7_listeners` add new output argument `client_certificate_ids`. + +DEPRECATED: + +* Resource: `tencentcloud_gaap_http_domain`: optional argument `client_certificate_id` is no longer supported. +* Resource: `tencentcloud_gaap_layer7_listener`: optional argument `client_certificate_id` is no longer supported. +* Resource: `tencentcloud_mysql_account_privilege` replaced by `tencentcloud_mysql_privilege`. +* Data Source: `tencentcloud_gaap_http_domains`: output argument `client_certificate_id` is no longer supported. +* Data Source: `tencentcloud_gaap_layer7_listeners`: output argument `client_certificate_id` is no longer supported. + +BUG FIXES: + +* Fix bug that resource `tencentcloud_clb_listener` 's unchangeable `health_check_switch`([#235](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/235)). +* Fix bug that resource `tencentcloud_clb_instance` read nil and report error. +* Fix example errors of resource `tencentcloud_cbs_snapshot_policy` and data source `tencentcloud_dnats`. + +## 1.25.2 (December 04, 2019) + +BUG FIXES: +* Fixed bug that the validator of cvm instance type is incorrect. + +## 1.25.1 (December 03, 2019) + +ENHANCEMENTS: +* Optimized error message of validators. + +BUG FIXES: +* Fixed bug that the type of `state` is incorrect in data source `tencentcloud_nat_gateways`([#226](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/226)). +* Fixed bug that the value of `cluster_max_pod_num` is incorrect in resource `tencentcloud_kubernetes_cluster`([#228](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/228)). + + +## 1.25.0 (December 02, 2019) + +ENHANCEMENTS: + +* Resource: `tencentcloud_instance` support `SPOTPAID` instance. Thanks to @LipingMao ([#209](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/209)). +* Resource: `tencentcloud_vpn_gateway` add argument `prepaid_renew_flag` and `prepaid_period` to support prepaid VPN gateway instance creation. + +BUG FIXES: +* Fixed bugs that update operations on `tencentcloud_cam_policy` do not work. +* Fixed bugs that filters on `tencentcloud_cam_users` do not work. + +DEPRECATED: + * Data Source: `tencentcloud_cam_user_policy_attachments`:`policy_type` is no longer supported. + * Data Source: `tencentcloud_cam_group_policy_attachments`:`policy_type` is no longer supported. + +## 1.24.1 (November 26, 2019) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add support for `PREPAID` instance type. Thanks to @woodylic ([#204](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/204)). +* Resource: `tencentcloud_cos_bucket` add optional argument tags +* Data Source: `tencentcloud_cos_buckets` add optional argument tags + +BUG FIXES: +* Fixed docs issues of `tencentcloud_nat_gateway` + +## 1.24.0 (November 20, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_ha_vips` +* **New Data Source**: `tencentcloud_ha_vip_eip_attachments` +* **New Resource**: `tencentcloud_ha_vip` +* **New Resource**: `tencentcloud_ha_vip_eip_attachment` + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` cluster_os add new support: `centos7.6x86_64` and `ubuntu18.04.1 LTSx86_64` +* Resource: `tencentcloud_nat_gateway` add computed argument `created_time`. + +BUG FIXES: + +* Fixed docs issues of CAM, DNAT and NAT_GATEWAY +* Fixed query issue that paged-query was not supported in data source `tencentcloud_dnats` +* Fixed query issue that filter `address_ip` was set incorrectly in data source `tencentcloud_eips` + +## 1.23.0 (November 14, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_images` +* **New Data Source**: `tencentcloud_vpn_gateways` +* **New Data Source**: `tencentcloud_customer_gateways` +* **New Data Source**: `tencentcloud_vpn_connections` +* **New Resource**: `tencentcloud_vpn_gateway` +* **New Resource**: `tencentcloud_customer_gateway` +* **New Resource**: `tencentcloud_vpn_connection` +* **Provider TencentCloud**: add `security_token` argument + +ENHANCEMENTS: + +* All api calls now using api3.0 +* Resource: `tencentcloud_eip` add optional argument `tags`. +* Data Source: `tencentcloud_eips` add optional argument `tags`. + +BUG FIXES: + +* Fixed docs of CAM + +## 1.22.0 (November 05, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cfs_file_systems` +* **New Data Source**: `tencentcloud_cfs_access_groups` +* **New Data Source**: `tencentcloud_cfs_access_rules` +* **New Data Source**: `tencentcloud_scf_functions` +* **New Data Source**: `tencentcloud_scf_namespaces` +* **New Data Source**: `tencentcloud_scf_logs` +* **New Resource**: `tencentcloud_cfs_file_system` +* **New Resource**: `tencentcloud_cfs_access_group` +* **New Resource**: `tencentcloud_cfs_access_rule` +* **New Resource**: `tencentcloud_scf_function` +* **New Resource**: `tencentcloud_scf_namespace` + +## 1.21.2 (October 29, 2019) + +BUG FIXES: + +* Resource: `tencentcloud_gaap_realserver` add ip/domain exists check +* Resource: `tencentcloud_kubernetes_cluster` add error handling logic and optional argument `tags`. +* Resource: `tencentcloud_kubernetes_scale_worker` add error handling logic. +* Data Source: `tencentcloud_kubernetes_clusters` add optional argument `tags`. + +## 1.21.1 (October 23, 2019) + +ENHANCEMENTS: + +* Updated golang to version 1.13.x + +BUG FIXES: + +* Fixed docs of CAM + +## 1.21.0 (October 15, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cam_users` +* **New Data Source**: `tencentcloud_cam_groups` +* **New Data Source**: `tencentcloud_cam_policies` +* **New Data Source**: `tencentcloud_cam_roles` +* **New Data Source**: `tencentcloud_cam_user_policy_attachments` +* **New Data Source**: `tencentcloud_cam_group_policy_attachments` +* **New Data Source**: `tencentcloud_cam_role_policy_attachments` +* **New Data Source**: `tencentcloud_cam_group_memberships` +* **New Data Source**: `tencentcloud_cam_saml_providers` +* **New Data Source**: `tencentcloud_reserved_instance_configs` +* **New Data Source**: `tencentcloud_reserved_instances` +* **New Resource**: `tencentcloud_cam_user` +* **New Resource**: `tencentcloud_cam_group` +* **New Resource**: `tencentcloud_cam_role` +* **New Resource**: `tencentcloud_cam_policy` +* **New Resource**: `tencentcloud_cam_user_policy_attachment` +* **New Resource**: `tencentcloud_cam_group_policy_attachment` +* **New Resource**: `tencentcloud_cam_role_policy_attachment` +* **New Resource**: `tencentcloud_cam_group_membership` +* **New Resource**: `tencentcloud_cam_saml_provider` +* **New Resource**: `tencentcloud_reserved_instance` + +ENHANCEMENTS: + +* Resource: `tencentcloud_gaap_http_domain` support import +* Resource: `tencentcloud_gaap_layer7_listener` support import + +BUG FIXES: + +* Resource: `tencentcloud_gaap_http_domain` fix sometimes can't enable realserver auth + +## 1.20.1 (October 08, 2019) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_availability_zones` refactor logic with api3.0 . +* Data Source: `tencentcloud_as_scaling_groups` add optional argument `tags` and attribute `tags` for `scaling_group_list`. +* Resource: `tencentcloud_eip` add optional argument `type`, `anycast_zone`, `internet_service_provider`, etc. +* Resource: `tencentcloud_as_scaling_group` add optional argument `tags`. + +BUG FIXES: + +* Data Source: `tencentcloud_gaap_http_domains` set response `certificate_id`, `client_certificate_id`, `realserver_auth`, `basic_auth` and `gaap_auth` default value when they are nil. +* Resource: `tencentcloud_gaap_http_domain` set response `certificate_id`, `client_certificate_id`, `realserver_auth`, `basic_auth` and `gaap_auth` default value when they are nil. + +## 1.20.0 (September 24, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_eips` +* **New Data Source**: `tencentcloud_instances` +* **New Data Source**: `tencentcloud_key_pairs` +* **New Data Source**: `tencentcloud_placement_groups` +* **New Resource**: `tencentcloud_placement_group` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_redis_instances` add optional argument `tags`. +* Data Source: `tencentcloud_mongodb_instances` add optional argument `tags`. +* Data Source: `tencentcloud_instance_types` add optional argument `availability_zone` and `gpu_core_count`. +* Data Source: `tencentcloud_gaap_http_rules` add optional argument `forward_host` and attributes `forward_host` in `rules`. +* Resource: `tencentcloud_redis_instance` add optional argument `tags`. +* Resource: `tencentcloud_mongodb_instance` add optional argument `tags`. +* Resource: `tencentcloud_mongodb_sharding_instance` add optional argument `tags`. +* Resource: `tencentcloud_instance` add optional argument `placement_group_id`. +* Resource: `tencentcloud_eip` refactor logic with api3.0 . +* Resource: `tencentcloud_eip_association` refactor logic with api3.0 . +* Resource: `tencentcloud_key_pair` refactor logic with api3.0 . +* Resource: `tencentcloud_gaap_http_rule` add optional argument `forward_host`. + +BUG FIXES: +* Resource: `tencentcloud_mysql_instance`: miss argument `availability_zone` causes the instance to be recreated. + +DEPRECATED: + +* Data Source: `tencentcloud_eip` replaced by `tencentcloud_eips`. + +## 1.19.0 (September 19, 2019) + +FEATURES: + +* **New Resource**: `tencentcloud_security_group_lite_rule`. + +ENHANCEMENTS: + +* Data Source: `tencentcloud_security_groups`: add optional argument `tags`. +* Data Source: `tencentcloud_security_groups`: add optional argument `result_output_file` and new attributes `ingress`, `egress` for list `security_groups`. +* Resource: `tencentcloud_security_group`: add optional argument `tags`. +* Resource: `tencentcloud_as_scaling_config`: internet charge type support `BANDWIDTH_PREPAID`, `TRAFFIC_POSTPAID_BY_HOUR` and `BANDWIDTH_PACKAGE`. + +BUG FIXES: +* Resource: `tencentcloud_clb_listener_rule`: fix unclear description and errors in example. +* Resource: `tencentcloud_instance`: fix hostname is not work. + +## 1.18.1 (September 17, 2019) + +FEATURES: + +* **Update Data Source**: `tencentcloud_vpc_instances` add optional argument `tags` +* **Update Data Source**: `tencentcloud_vpc_subnets` add optional argument `tags` +* **Update Data Source**: `tencentcloud_route_tables` add optional argument `tags` +* **Update Resource**: `tencentcloud_vpc` add optional argument `tags` +* **Update Resource**: `tencentcloud_subnet` add optional argument `tags` +* **Update Resource**: `tencentcloud_route_table` add optional argument `tags` + +ENHANCEMENTS: + +* Data Source:`tencentcloud_kubernetes_clusters` support pull out authentication information for cluster access too. +* Resource:`tencentcloud_kubernetes_cluster` support pull out authentication information for cluster access. + +BUG FIXES: + +* Resource: `tencentcloud_mysql_instance`: when the mysql is abnormal state, read the basic information report error + +DEPRECATED: + +* Data Source: `tencentcloud_kubernetes_clusters`:`container_runtime` is no longer supported. + +## 1.18.0 (September 10, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_ssl_certificates` +* **New Data Source**: `tencentcloud_dnats` +* **New Data Source**: `tencentcloud_nat_gateways` +* **New Resource**: `tencentcloud_ssl_certificate` +* **Update Resource**: `tencentcloud_clb_redirection` add optional argument `is_auto_rewrite` +* **Update Resource**: `tencentcloud_nat_gateway` , add more configurable items. +* **Update Resource**: `tencentcloud_nat` , add more configurable items. + +DEPRECATED: +* Data Source: `tencentcloud_nats` replaced by `tencentcloud_nat_gateways`. + +## 1.17.0 (September 04, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_gaap_proxies` +* **New Data Source**: `tencentcloud_gaap_realservers` +* **New Data Source**: `tencentcloud_gaap_layer4_listeners` +* **New Data Source**: `tencentcloud_gaap_layer7_listeners` +* **New Data Source**: `tencentcloud_gaap_http_domains` +* **New Data Source**: `tencentcloud_gaap_http_rules` +* **New Data Source**: `tencentcloud_gaap_security_policies` +* **New Data Source**: `tencentcloud_gaap_security_rules` +* **New Data Source**: `tencentcloud_gaap_certificates` +* **New Resource**: `tencentcloud_gaap_proxy` +* **New Resource**: `tencentcloud_gaap_realserver` +* **New Resource**: `tencentcloud_gaap_layer4_listener` +* **New Resource**: `tencentcloud_gaap_layer7_listener` +* **New Resource**: `tencentcloud_gaap_http_domain` +* **New Resource**: `tencentcloud_gaap_http_rule` +* **New Resource**: `tencentcloud_gaap_certificate` +* **New Resource**: `tencentcloud_gaap_security_policy` +* **New Resource**: `tencentcloud_gaap_security_rule` + +## 1.16.3 (August 30, 2019) + +BUG FIXIES: + +* Resource: `tencentcloud_kubernetes_cluster`: cgi error retry. +* Resource: `tencentcloud_kubernetes_scale_worker`: cgi error retry. + +## 1.16.2 (August 28, 2019) + +BUG FIXIES: + +* Resource: `tencentcloud_instance`: fixed cvm data disks missing computed. +* Resource: `tencentcloud_mysql_backup_policy`: `backup_model` remove logical backup support. +* Resource: `tencentcloud_mysql_instance`: `tags` adapt to the new official api. + +## 1.16.1 (August 27, 2019) + +ENHANCEMENTS: +* `tencentcloud_instance`: refactor logic with api3.0 . + +## 1.16.0 (August 20, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_kubernetes_clusters` +* **New Resource**: `tencentcloud_kubernetes_scale_worker` +* **New Resource**: `tencentcloud_kubernetes_cluster` + +DEPRECATED: +* Data Source: `tencentcloud_container_clusters` replaced by `tencentcloud_kubernetes_clusters`. +* Data Source: `tencentcloud_container_cluster_instances` replaced by `tencentcloud_kubernetes_clusters`. +* Resource: `tencentcloud_container_cluster` replaced by `tencentcloud_kubernetes_cluster`. +* Resource: `tencentcloud_container_cluster_instance` replaced by `tencentcloud_kubernetes_scale_worker`. + +## 1.15.2 (August 14, 2019) + +ENHANCEMENTS: + +* `tencentcloud_as_scaling_group`: fixed issue that binding scaling group to load balancer does not work. +* `tencentcloud_clb_attachements`: rename `rewrite_source_rule_id` with `source_rule_id` and rename `rewrite_target_rule_id` with `target_rule_id`. + +## 1.15.1 (August 13, 2019) + +ENHANCEMENTS: + +* `tencentcloud_instance`: changed `image_id` property to ForceNew ([#78](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/78)) +* `tencentcloud_instance`: improved with retry ([#82](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/82)) +* `tencentcloud_cbs_storages`: improved with retry ([#82](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/82)) +* `tencentcloud_clb_instance`: bug fixed and improved with retry ([#37](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/37)) + +## 1.15.0 (August 07, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_clb_instances` +* **New Data Source**: `tencentcloud_clb_listeners` +* **New Data Source**: `tencentcloud_clb_listener_rules` +* **New Data Source**: `tencentcloud_clb_attachments` +* **New Data Source**: `tencentcloud_clb_redirections` +* **New Resource**: `tencentcloud_clb_instance` +* **New Resource**: `tencentcloud_clb_listener` +* **New Resource**: `tencentcloud_clb_listener_rule` +* **New Resource**: `tencentcloud_clb_attachment` +* **New Resource**: `tencentcloud_clb_redirection` + +DEPRECATED: +* Resource: `tencentcloud_lb` replaced by `tencentcloud_clb_instance`. +* Resource: `tencentcloud_alb_server_attachment` replaced by `tencentcloud_clb_attachment`. + +## 1.14.1 (August 05, 2019) + +BUG FIXIES: + +* resource/tencentcloud_security_group_rule: fixed security group rule id is not compatible with previous version. + +## 1.14.0 (July 30, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_security_groups` +* **New Data Source**: `tencentcloud_mongodb_instances` +* **New Data Source**: `tencentcloud_mongodb_zone_config` +* **New Resource**: `tencentcloud_mongodb_instance` +* **New Resource**: `tencentcloud_mongodb_sharding_instance` +* **Update Resource**: `tencentcloud_security_group_rule` add optional argument `description` + +DEPRECATED: +* Data Source: `tencnetcloud_security_group` replaced by `tencentcloud_security_groups` + +ENHANCEMENTS: +* Refactoring security_group logic with api3.0 + +## 1.13.0 (July 23, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_dc_gateway_instances` +* **New Data Source**: `tencentcloud_dc_gateway_ccn_routes` +* **New Resource**: `tencentcloud_dc_gateway` +* **New Resource**: `tencentcloud_dc_gateway_ccn_route` + +## 1.12.0 (July 16, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_dc_instances` +* **New Data Source**: `tencentcloud_dcx_instances` +* **New Resource**: `tencentcloud_dcx` +* **UPDATE Resource**:`tencentcloud_mysql_instance` and `tencentcloud_mysql_readonly_instance` completely delete instance. + +BUG FIXIES: + +* resource/tencentcloud_instance: fixed issue when data disks set as delete_with_instance not works. +* resource/tencentcloud_instance: if managed public_ip manually, please don't define `allocate_public_ip` ([#62](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/62)). +* resource/tencentcloud_eip_association: fixed issue when instances were manually deleted ([#60](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/60)). +* resource/tencentcloud_mysql_readonly_instance:remove an unsupported property `gtid` + +## 1.11.0 (July 02, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_ccn_instances` +* **New Data Source**: `tencentcloud_ccn_bandwidth_limits` +* **New Resource**: `tencentcloud_ccn` +* **New Resource**: `tencentcloud_ccn_attachment` +* **New Resource**: `tencentcloud_ccn_bandwidth_limit` + +## 1.10.0 (June 27, 2019) + +ENHANCEMENTS: + +* Refactoring vpc logic with api3.0 +* Refactoring cbs logic with api3.0 + +FEATURES: +* **New Data Source**: `tencentcloud_vpc_instances` +* **New Data Source**: `tencentcloud_vpc_subnets` +* **New Data Source**: `tencentcloud_vpc_route_tables` +* **New Data Source**: `tencentcloud_cbs_storages` +* **New Data Source**: `tencentcloud_cbs_snapshots` +* **New Resource**: `tencentcloud_route_table_entry` +* **New Resource**: `tencentcloud_cbs_snapshot_policy` +* **Update Resource**: `tencentcloud_vpc` , add more configurable items. +* **Update Resource**: `tencentcloud_subnet` , add more configurable items. +* **Update Resource**: `tencentcloud_route_table`, add more configurable items. +* **Update Resource**: `tencentcloud_cbs_storage`, add more configurable items. +* **Update Resource**: `tencentcloud_instance`: add optional argument `tags`. +* **Update Resource**: `tencentcloud_security_group_rule`: add optional argument `source_sgid`. + +DEPRECATED: +* Data Source: `tencentcloud_vpc` replaced by `tencentcloud_vpc_instances`. +* Data Source: `tencentcloud_subnet` replaced by `tencentcloud_vpc_subnets`. +* Data Source: `tencentcloud_route_table` replaced by `tencentcloud_vpc_route_tables`. +* Resource: `tencentcloud_route_entry` replaced by `tencentcloud_route_table_entry`. + +## 1.9.1 (June 24, 2019) + +BUG FIXIES: + +* data/tencentcloud_instance: fixed vpc ip is in use error when re-creating with private ip ([#46](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/46)). + +## 1.9.0 (June 18, 2019) + +ENHANCEMENTS: + +* update to `v0.12.1` Terraform SDK version + +BUG FIXIES: + +* data/tencentcloud_security_group: `project_id` remote API return sometime is string type. +* resource/tencentcloud_security_group: just like `data/tencentcloud_security_group` + +## 1.8.0 (June 11, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_as_scaling_configs` +* **New Data Source**: `tencentcloud_as_scaling_groups` +* **New Data Source**: `tencentcloud_as_scaling_policies` +* **New Resource**: `tencentcloud_as_scaling_config` +* **New Resource**: `tencentcloud_as_scaling_group` +* **New Resource**: `tencentcloud_as_attachment` +* **New Resource**: `tencentcloud_as_scaling_policy` +* **New Resource**: `tencentcloud_as_schedule` +* **New Resource**: `tencentcloud_as_lifecycle_hook` +* **New Resource**: `tencentcloud_as_notification` + +## 1.7.0 (May 23, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_redis_zone_config` +* **New Data Source**: `tencentcloud_redis_instances` +* **New Resource**: `tencentcloud_redis_instance` +* **New Resource**: `tencentcloud_redis_backup_config` + +ENHANCEMENTS: + +* resource/tencentcloud_instance: Add `hostname`, `project_id`, `delete_with_instance` argument. +* Update tencentcloud-sdk-go to better support redis api. + +## 1.6.0 (May 15, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cos_buckets` +* **New Data Source**: `tencentcloud_cos_bucket_object` +* **New Resource**: `tencentcloud_cos_bucket` +* **New Resource**: `tencentcloud_cos_bucket_object` + +ENHANCEMENTS: + +* Add the framework of auto generating terraform docs + +## 1.5.0 (April 26, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_mysql_backup_list` +* **New Data Source**: `tencentcloud_mysql_zone_config` +* **New Data Source**: `tencentcloud_mysql_parameter_list` +* **New Data Source**: `tencentcloud_mysql_instance` +* **New Resource**: `tencentcloud_mysql_backup_policy` +* **New Resource**: `tencentcloud_mysql_account` +* **New Resource**: `tencentcloud_mysql_account_privilege` +* **New Resource**: `tencentcloud_mysql_instance` +* **New Resource**: `tencentcloud_mysql_readonly_instance` + +ENHANCEMENTS: + +* resource/tencentcloud_subnet: `route_table_id` now is an optional argument + +## 1.4.0 (April 12, 2019) + +ENHANCEMENTS: + +* data/tencentcloud_image: add `image_name` attribute to this data source. +* resource/tencentcloud_instance: data disk count limit now is upgrade from 1 to 10, as API has supported more disks. +* resource/tencentcloud_instance: PREPAID instance now can be deleted, but still have some limit in API. + +BUG FIXIES: + +* resource/tencentcloud_instance: `allocate_public_ip` doesn't work properly when it is set to false. + +## 1.3.0 (March 12, 2019) + +FEATURES: + +* **New Resource**: `tencentcloud_lb` ([#3](https://github.com/terraform-providers/terraform-provider-scaffolding/issues/3)) + +ENHANCEMENTS: + +* resource/tencentcloud_instance: Add `user_data_raw` argument ([#4](https://github.com/terraform-providers/terraform-provider-scaffolding/issues/4)) + +## 1.2.2 (September 28, 2018) + +BUG FIXES: + +* resource/tencentcloud_cbs_storage: make name to be required ([#25](https://github.com/tencentyun/terraform-provider-tencentcloud/issues/25)) +* resource/tencentcloud_instance: support user data and private ip + +## 1.2.0 (April 3, 2018) + +FEATURES: + +* **New Resource**: `tencentcloud_container_cluster` +* **New Resource**: `tencentcloud_container_cluster_instance` +* **New Data Source**: `tencentcloud_container_clusters` +* **New Data Source**: `tencentcloud_container_cluster_instances` + +## 1.1.0 (March 9, 2018) + +FEATURES: + +* **New Resource**: `tencentcloud_eip` +* **New Resource**: `tencentcloud_eip_association` +* **New Data Source**: `tencentcloud_eip` +* **New Resource**: `tencentcloud_nat_gateway` +* **New Resource**: `tencentcloud_dnat` +* **New Data Source**: `tencentcloud_nats` +* **New Resource**: `tencentcloud_cbs_snapshot` +* **New Resource**: `tencentcloud_alb_server_attachment` + +## 1.0.0 (January 19, 2018) + +FEATURES: + +### CVM + +RESOURCES: + +* instance create +* instance read +* instance update + * reset instance + * reset password + * update instance name + * update security groups +* instance delete +* key pair create +* key pair read +* key pair delete + +DATA SOURCES: + +* image read +* instance\_type read +* zone read + +### VPC + +RESOURCES: + +* vpc create +* vpc read +* vpc update (update name) +* vpc delete +* subnet create +* subnet read +* subnet update (update name) +* subnet delete +* security group create +* security group read +* security group update (update name, description) +* security group delete +* security group rule create +* security group rule read +* security group rule delete +* route table create +* route table read +* route table update (update name) +* route table delete +* route entry create +* route entry read +* route entry delete + +DATA SOURCES: + +* vpc read +* subnet read +* security group read +* route table read + +### CBS + +RESOURCES: + +* storage create +* storage read +* storage update (update name) +* storage attach +* storage detach diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/LICENSE b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/README.md b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/README.md new file mode 100644 index 00000000..2e7239d8 --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/README.md @@ -0,0 +1,98 @@ +# terraform-provider-tencentcloud + +## Requirements + +* [Terraform](https://www.terraform.io/downloads.html) 0.13.x +* [Go](https://golang.org/doc/install) 1.13.x (to build the provider plugin) + +## Usage + +### Build from source code + +Clone repository to: `$GOPATH/src/github.com/tencentcloudstack/terraform-provider-tencentcloud` + +```sh +$ mkdir -p $GOPATH/src/github.com/tencentcloudstack +$ cd $GOPATH/src/github.com/tencentcloudstack +$ git clone https://github.com/tencentcloudstack/terraform-provider-tencentcloud.git +$ cd terraform-provider-tencentcloud +$ go build . +``` + +If you're building the provider, follow the instructions to [install it as a plugin.](https://www.terraform.io/docs/plugins/basics.html#installing-a-plugin) After placing it into your plugins directory, run `terraform init` to initialize it. + +## Configuration + +### Configure credentials + +You will need to have a pair of secret id and secret key to access Tencent Cloud resources, configure it in the provider arguments or export it in environment variables. If you don't have it yet, please access [Tencent Cloud Management Console](https://console.cloud.tencent.com/cam/capi) to create one. + +``` +export TENCENTCLOUD_SECRET_ID=AKID9HH4OpqLJ5f6LPr4iIm5GF2s-EXAMPLE +export TENCENTCLOUD_SECRET_KEY=72pQp14tWKUglrnX5RbaNEtN-EXAMPLE +``` + +### Configure proxy info (optional) + +If you are beind a proxy, for example, in a corporate network, you must set the proxy environment variables correctly. For example: + +``` +export http_proxy=http://your-proxy-host:your-proxy-port # This is just an example, use your real proxy settings! +export https_proxy=$http_proxy +export HTTP_PROXY=$http_proxy +export HTTPS_PROXY=$http_proxy +``` + +## Run demo + +You can edit your own terraform configuration files. Learn examples from examples directory. + +### Terrafrom it + +Now you can try your terraform demo: + +``` +terraform init +terraform plan +terraform apply +``` + +If you want to destroy the resource, make sure the instance is already in ``running`` status, otherwise the destroy might fail. + +``` +terraform destroy +``` + +## Developer Guide + +### DEBUG + +You will need to set an environment variable named ``TF_LOG``, for more info please refer to [Terraform official doc](https://www.terraform.io/docs/internals/debugging.html): + +``` +export TF_LOG=DEBUG +``` + +In your source file, import the standard package ``log`` and print the message such as: + +``` +log.Println("[DEBUG] the message and some import values: %v", importantValues) +``` + +### Test + +The quicker way for development and debug is writing test cases. +How to trigger running the test cases, please refer the `test.sh` script. +How to write test cases, check the `xxx_test.go` files. + +### Avoid ``terrafrom init`` + +``` +export TF_SKIP_PROVIDER_VERIFY=1 +``` + +This will disable the verify steps, so after you update this provider, you won't need to create new resources, but use previously saved state. + +### Document + +Keep in mind that document changes is also needed when resources, data sources, attributes changed in code. diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/terraform-provider-tencentcloud_v1.56.15 b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/terraform-provider-tencentcloud_v1.56.15 new file mode 100755 index 00000000..aa62e482 Binary files /dev/null and b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64/terraform-provider-tencentcloud_v1.56.15 differ diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64_hcl/.terraform.lock.hcl b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64_hcl/.terraform.lock.hcl new file mode 100644 index 00000000..c782b870 --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/darwin_amd64_hcl/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/tencentcloudstack/tencentcloud" { + version = "1.56.15" + hashes = [ + "h1:dywt0JaLAhGQOhOWZJmxe5/Nu97v0RIr2jqwayA/aHs=", + "zh:27e11e1af3447b803ab7b79de32bf8a9970b39b8da5909c367607ad647d350d5", + "zh:2b1827cb0707130c3370052da658e979c156ca5c7b836fe9297382d5a93b9a25", + "zh:32094f8d878aab92055b828e86acac1cf84468f0e9407077a68b38df2e268f9d", + "zh:4d2e07904f3a11579fb818ed186e735f2a8ee3073587cc3a7803a2c14a2beaa4", + "zh:5298589208aa6a6af110951fcee6073c98f3126eea5a9860062df8d975ee1d0b", + "zh:9116269ab905f5de483c7bc149f1696f251c7c2bb79ad8773499e5e01c8c5e1f", + "zh:92f44bfd80d1a9cc5af6c82371c99cf2dfaff56dee30cc10a9cd0a753881cd1a", + "zh:9d7a1d0add38c8b1295e6e1edba4bf4591bdfe531ee4956db1a41ccba3877c9f", + "zh:9fbe25a6575de44e8a2f716d9c41a0a2e5ccec2914a3f824b1fbf5118ea4fead", + "zh:a652486f5ef22c3c8f9eca217ec85978aa1c9dd321e6a7317834ae75292e64e0", + "zh:b1edab515278f67cffbda6a53750d5ce97e2f278d6273dd9ddf9e60601da0255", + "zh:c983bd114899dfe6689d29b3d9e93fe5e0e49d54315460b2a2968a65c5ee39d1", + "zh:ca94a5624069dbf18d011f3a648a38d4b4518259bafe2013d3a065acf71f8778", + ] +} diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/CHANGELOG.md b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/CHANGELOG.md new file mode 100644 index 00000000..63242f2e --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/CHANGELOG.md @@ -0,0 +1,1740 @@ +## 1.56.16 (Unreleased) + +## 1.56.15 (July 07, 2021) + +BUG FIXES + +* Resource `tencentcloud_tc_kubernetes_cluster` filter the request field of *bandwidth_package_id* when it is null +* Resource `tencentcloud_tc_kubernetes_node_pool` filter the request field of *bandwidth_package_id* when it is null + +## 1.56.14 (July 06, 2021) + +BUG FIXES + +* Resource `tencentcloud_tc_clb_listener` exec the plan will lead the resource rebuild. + +ENHANCEMENTS: + +* Resource `tencentcloud_elasticsearch_instance` create **ES** cluster add new parametes of *web_node_type_info*. +* Resource `tencentcloud_tc_instance` add *instance_count* to support create multiple consecutive name of instance +* Resource `tencentcloud_tc_kubernetes_cluster` supports change *internet_max_bandwidth_out* +* Resource `tencentcloud_tc_instance` create cvm instance add *bandwidth_package_id* + + +## 1.56.13 (July 02, 2021) + +BUG FIXES + +* Resource `TkeCvmCreateInfo.data_disk.disk_type` support CLOUD_HSSD and CLOUD_TSSD + +## 1.56.12 (July 02, 2021) + +BUG FIXES + +* Resource `TkeCvmCreateInfo.data_disk.disk_type` support CLOUD_HSSD + +## 1.56.11 + +BUG FIXES + +* Resource `tencentcloud_kubernetes_cluster` fix create cluster without *desired_pod_num* in tf, then crash +* Resource `tencentcloud_kubernetes_cluster` fix when upgrade terraform-provider-tencentclod from v1.56.1 to newer, cluster_os force replacement +* Resource `tencentcloud_kubernetes_cluster` fix when upgrade terraform-provider-tencentclod from v1.56.1 to newer, enable_customized_pod_cidr force replace + +## 1.56.10 + +BUG FIXES + +* Resource `tencentcloud_tcr_namespace` fix create two namespace and one name is substring of another, then got an error about more than 1 +* Resource `tencentcloud_tcr_namespace` fix create two repositories and one name is substring of another, then got an error about more than 1 + + +## 1.56.9 (Jun 09, 2021) + +BUG FIXES: + +* Resource `tencentcloud_instance` fix words spell, in tencendcloud/resource_tc_instance.go L45, data.tencentcloud_availability_zones.my_favorate_zones.zones.0.name change to data.tencentcloud_availability_zones.my_favorite_zones.zones.0.name". +* Resource `tencentcloud_kubernetes_clusters` fix the description of is_non_static_ip_mode + +ENHANCEMENTS: + +* Resource `tencentcloud_clb_target_group` add create target group. +* Resource `tencentcloud_clb_instance` add internal CLB supports security group. +* Resource `tencentcloud_clb_instance` add supports open and close CLB security group, default is open. +* Resource `tencentcloud_clb_instance` add external CLB create multi AZ instance. +* Resource `tencentcloud_kubernetes_cluster` add supports params of img_id to assign image. +* Resource `tencentcloud_as_scaling_group` add MultiZoneSubnetPolicy. + +## 1.56.8 (May 26, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_cluster_attachment.worker_config` add `desired_pod_num`. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `worker_config_overrides`. +* Resource `tencentcloud_kubernetes_scale_worker` add `desired_pod_num`. +* Resource `tencentcloud_kubernetes_cluster` add `enable_customized_pod_cidr`, `base_pod_num`, `globe_desired_pod_num`, and `exist_instance`. +* Resource `tencentcloud_kubernetes_cluster` update available value of `cluster_os`. +* Resource `tencentcloud_as_lifecycle_hook` update `heartbeat_timeout` value ranges. + +## 1.56.7 (May 12, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_as_scaling_config` add `disk_type_policy`. +* Data Source `tencentcloud_as_scaling_configs` add `disk_type_policy` as result. + +## 1.56.6 (May 7, 2021) + +BUG FIXES: + +* Resource: `tencentcloud_scf_function` filed `cls_logset_id` and `cls_logset_id` change to Computed. + +## 1.56.5 (April 26, 2021) + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_cluster` upgrade cluster timeout from 3 to 9 minutes. + +## 1.56.4 (April 26, 2021) + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_cluster` upgrade instances timeout depend on instance number. + +## 1.56.3 (April 25, 2021) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add `upgrade_instances_follow_cluster` for upgrade all instances of cluster. + +## 1.56.2 (April 19, 2021) + +BUG FIXES: + +* Remove `ResourceInsufficient` from `retryableErrorCode`. + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` upgrade `cluster_version` will send old `cluster_extra_args` to tke. + +## 1.56.1 (April 6,2021) + +BUG FIXES: + +* Fix release permission denied. + +## 1.56.0 (April 2,2021) + +FEATURES: + +* **New Resource**: `tencentcloud_cdh_instance` +* **New Data Source**: `tencentcloud_cdh_instances` + +ENHANCEMENTS: + +* Resource: `tencentcloud_instance` add `cdh_instance_type` and `cdh_host_id` to support create instance based on cdh. + +## 1.55.2 (March 29, 2021) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add `node_pool_global_config` to support node pool global config setting. + +## 1.55.1 (March 26, 2021) + +ENHANCEMENTS: + +* Resource: `tencentcloud_tcr_vpc_attachment` add more time for retry. + +## 1.55.0 (March 26, 2021) + +FEATURES: + +* **New Resource**: `tencentcloud_ssm_secret` +* **New Resource**: `tencentcloud_ssm_secret_version` +* **New Data Source**: `tencentcloud_ssm_secrets` +* **New Data Source**: `tencentcloud_ssm_secret_versions` + +ENHANCEMENTS: + +* Resource: `tencentcloud_ssl_certificate` refactor logic with api3.0 . +* Data Source: `tencentcloud_ssl_certificates` refactor logic with api3.0 . +* Resource `tencentcloud_kubernetes_cluster` add `disaster_recover_group_ids` to set disaster recover group ID. +* Resource `tencentcloud_kubernetes_scale_worker` add `disaster_recover_group_ids` to set disaster recover group ID. + +## 1.54.1 (March 24, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_tcr_vpc_attachment` add `enable_public_domain_dns`, `enable_vpc_domain_dns` to set whether to enable dns. +* Data Source `tencentcloud_tcr_vpc_attachments` add `enable_public_domain_dns`, `enable_vpc_domain_dns`. + +## 1.54.0 (March 22, 2021) + +FEATURES: + +* **New Resource**: `tencentcloud_kms_key` +* **New Resource**: `tencentcloud_kms_external_key` +* **New Data Source**: `tencentcloud_kms_keys` + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_cluster_attachment` add `unschedulable` to set whether the joining node participates in the schedule. +* Resource `tencentcloud_kubernetes_cluster` add `unschedulable` to set whether the joining node participates in the schedule. +* Resource `tencentcloud_kubernetes_node_pool` add `unschedulable` to set whether the joining node participates in the schedule. +* Resource `tencentcloud_kubernetes_scale_worker` add `unschedulable` to set whether the joining node participates in the schedule. + +## 1.53.9 (March 19, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_tcr_instance` add `open_public_network` to control public network access. +* Resource `tencentcloud_cfs_file_system` add `storage_type` to change file service StorageType. + +## 1.53.8 (March 15, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_instance` add `cam_role_name` to support binding role to cvm instance. + +BUG FIXES: + +* Resource `tencentcloud_instance` fix bug that waiting 5 minutes when cloud disk sold out. +* Resource: `tencentcloud_tcr_instance` fix bug that only one tag is effective when setting multiple tags. + +## 1.53.7 (March 10, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_node_pool` add `internet_max_bandwidth_out`, `public_ip_assigned` to support internet traffic setting. +* Resource `tencentcloud_instance` remove limit of `data_disk_size`. + +## 1.53.6 (March 09, 2021) + +ENHANCEMENTS: +* Resource `tencentcloud_eip` support `internet_max_bandwidth_out` modification. +* Resource `tencentcloud_kubernetes_cluster` add `hostname` to support node hostname setting. +* Resource `tencentcloud_kubernetes_scale_worker` add `hostname` to support node hostname setting. + +## 1.53.5 (March 01, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_clb_instance` add `internet_charge_type`, `internet_bandwidth_max_out` to support internet traffic setting with OPEN CLB instance. +* Resource `tencentcloud_clb_rule` add `http2_switch` to support HTTP2 protocol setting. +* Resource `tencentcloud_kubernetes_cluster` add `lan_ip` to show node LAN IP. +* Resource `tencentcloud_kubernetes_scale_worker` add `lan_ip` to show node LAN IP. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `state` to show node state. +* Resource `tencentcloud_clb_rule` support certificate modifying. +* Data Source `tencentcloud_clb_instances` add `internet_charge_type`, `internet_bandwidth_max_out`. +* Data Source `tencentcloud_clb_rules` add `http2_switch`. + +BUG FIXES: + +* Resource: `tencentcloud_clb_attachment` fix bug that attach more than 20 targets will failed. + +## 1.53.4 (February 08, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_scale_worker` add `data_disk`, `docker_graph_path` to support advanced instance setting. +* Resource `tencentcloud_instance` add tags to the disks created with the instance. + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_cluster_attachment` fix bug that only one extra argument set successfully. +* Resource: `tencentcloud_as_scaling_policy` fix bug that missing required parameters error happened when update metric parameters. + +## 1.53.3 (February 02, 2021) + +ENHANCEMENTS: + +* Data Source `tencentcloud_cbs_storages` add `throughput_performance` to support adding extra performance to the cbs resources. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `hostname` to support setting hostname with the attached instance. + +## 1.53.2 (February 01, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_cbs_storage` add `throughput_performance` to support adding extra performance to the cbs resources. + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket` fix bug that error happens when applying unsupported logging region. +* Resource: `tencentcloud_as_scaling_policy` fix bug that missing required parameters error happened when update metric parameters. + +## 1.53.1 (January 23, 2021) + +ENHANCEMENTS: + +* Resource `tencentcloud_instance` add `throughput_performance` to support adding extra performance to the data disks. +* Resource `tencentcloud_kubernetes_cluster_attachment` add `file_system`, `auto_format_and_mount` and `mount_target` to support advanced instance setting. +* Resource `tencentcloud_kubernetes_node_pool` add `file_system`, `auto_format_and_mount` and `mount_target` to support advanced instance setting. +* Resource `tencentcloud_kubernetes_node_pool` add `scaling_mode` to support scaling mode setting. +* Resource `tencentcloud_kubernetes` support version upgrade. + +BUG FIXES: + +* Resource: `tencentcloud_gaap_http_rule` fix bug that exception happens when create more than one rule. + +## 1.53.0 (January 15, 2021) + +FEATURES: + +* **New Resource**: `tencentcloud_ssl_pay_certificate` to support ssl pay certificate. + +ENHANCEMENTS: + +* Resource `tencentcloud_ccn` add `charge_type` to support billing mode setting. +* Resource `tencentcloud_ccn` add `bandwidth_limit_type` to support the speed limit type setting. +* Resource `tencentcloud_ccn_bandwidth_limit` add `dst_region` to support destination area restriction setting. +* Resource `tencentcloud_cdn_domain` add `range_origin_switch` to support range back to source configuration. +* Resource `tencentcloud_cdn_domain` add `rule_cache` to support advanced path cache configuration. +* Resource `tencentcloud_cdn_domain` add `request_header` to support request header configuration. +* Data Source `tencentcloud_ccn_instances` add `charge_type` to support billing mode. +* Data Source `tencentcloud_ccn_instances` add `bandwidth_limit_type` to support the speed limit type. +* Data Source `tencentcloud_ccn_bandwidth_limit` add `dst_region` to support destination area restriction. +* Data Source `tencentcloud_cdn_domains` add `range_origin_switch` to support range back to source configuration. +* Data Source `tencentcloud_cdn_domains` add `rule_cache` to support advanced path cache configuration. +* Data Source `tencentcloud_cdn_domains` add `request_header` to support request header configuration. + +## 1.52.0 (December 28, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_kubernetes_node_pool` to support node management. + +DEPRECATED: + +* Resource: `tencentcloud_kubernetes_as_scaling_group` replaced by `tencentcloud_kubernetes_node_pool`. + +## 1.51.1 (December 22, 2020) + +ENHANCEMENTS: + +* Resource `tencentcloud_kubernetes_cluster_attachment` add `extra_args` to support node extra arguments setting. +* Resource `tencentcloud_cos_bucket` add `log_enbale`, `log_target_bucket` and `log_prefix` to support log status setting. + +## 1.51.0 (December 15, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_tcr_vpc_attachment` +* **New Data Source**: `tencentcloud_tcr_vpc_attachments` + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` support `name`, `project_id` and `description` modification. +* Doc: optimize document. + +## 1.50.0 (December 08, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_address_template` +* **New Resource**: `tencentcloud_address_template_group` +* **New Resource**: `tencentcloud_protocol_template` +* **New Resource**: `tencentcloud_protocol_template_group` +* **New Data Source**: `tencentcloud_address_templates` +* **New Data Source**: `tencentcloud_address_template_groups` +* **New Data Source**: `tencentcloud_protocol_templates` +* **New Data Source**: `tencentcloud_protocol_template_groups` + +ENHANCEMENTS: + +* Resource `tencentcloud_sercurity_group_rule` add `address_template` and `protocol_template` to support building new security group rule with resource `tencentcloud_address_template` and `tencentcloud_protocol_template`. +* Doc: optimize document directory. + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket` fix bucket name validator. + +## 1.49.1 (December 01, 2020) + +ENHANCEMENTS: + +* Doc: Update directory of document. + +## 1.49.0 (November 27, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_tcr_instance` +* **New Resource**: `tencentcloud_tcr_token` +* **New Resource**: `tencentcloud_tcr_namespace` +* **New Resource**: `tencentcloud_tcr_repository` +* **New Data Source**: `tencentcloud_tcr_instances` +* **New Data Source**: `tencentcloud_tcr_tokens` +* **New Data Source**: `tencentcloud_tcr_namespaces` +* **New Data Source**: `tencentcloud_tcr_repositories` +* **New Resource**: `tencentcloud_cos_bucket_policy` + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_as_scaling_group` support `max_size` and `min_size` modification. + +## 1.48.0 (November 20, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_sqlserver_basic_instance` +* **New Data Source**: `tencentcloud_sqlserver_basic_instances` + +ENHANCEMENTS: + +* Resource: `tencentcloud_clb_listener` support configure HTTP health check for TCP listener([#539](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/539)). +* Resource: `tencentcloud_clb_listener` add computed argument `target_type`. +* Data Source: `tencentcloud_clb_listeners` support getting HTTP health check config for TCP listener. + +DEPRECATED: +* Resource: `tencentcloud_clb_target_group_attachment`: optional argument `targrt_group_id` is no longer supported, replace by `target_group_id`. + +## 1.47.0 (November 13, 2020) + +ENHANCEMENTS: +* Resource: `tencentcloud_clb_listener` support import. +* Resource: `tencentcloud_clb_listener` add computed argument `listener_id`. +* Resource: `tencentcloud_clb_listener_rule` support import. +* Resource: `tencentcloud_cdn_domain` add example that use COS bucket url as origin. +* Resource: `tencentcloud_sqlserver_instance` add new argument `tags`. +* Resource: `tencentcloud_sqlserver_readonly_instance` add new argument `tags`. +* Resource: `tencentcloud_elasticsearch_instance` support `node_type` and `disk_size` modification. +* Data Source: `tencentcloud_instance_types` add argument `exclude_sold_out` to support filtering sold out instance types. +* Data Source: `tencentcloud_sqlserver_instances` add new argument `tags`. +* Data Source: `tencentcloud_instance_types` add argument `exclude_sold_out` to support filtering sold out instance types. + +BUG FIXES: + +* Resource: `tencentcloud_elasticsearch_instance` fix inconsistent bug. +* Resource: `tencentcloud_redis_instance` fix incorrect number when updating `mem_size`. +* Data Source: `tencentcloud_redis_instances` fix incorrect number for `mem_size`. + +## 1.46.4 (November 6, 2020) + +BUG FIXES: +* Resource: `tencentcloud_kubernetes_cluster` fix force replacement when updating `docker_graph_path`. + +## 1.46.3 (November 6, 2020) + +ENHANCEMENTS: +* Resource: `tencentcloud_kubernetes_cluster` add more values with argument `cluster_os` to support linux OS system. + +## 1.46.2 (November 5, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add new argument `kube_config`. +* Resource: `tencentcloud_kubernetes_cluster` add value `tlinux2.4x86_64` with argument `cluster_os` to support linux OS system. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `mount_target` to support set disk mount path. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `docker_graph_path` to support set docker graph path. +* Resource: `tencentcloud_clb_redirection` add new argument `delete_all_auto_rewirte` to delete all auto-associated redirection when destroying the resource. +* Resource: `tencentcloud_kubernetes_scale_worker` add new argument `labels` to support scale worker labels. +* Data Source: `tencentcloud_kubernetes_clusters` add new argument `kube_config`. +* Data Source: `tencentcloud_availability_regions` support getting local region info by setting argument `name` with value `default`. +* Docs: update argument description. + +BUG FIXES: + +* Resource: `tencentcloud_clb_redirection` fix inconsistent bug when creating more than one auto redirection. +* Resource: `tencentcloud_redis_instance` fix updating issue when redis `type_id` is set `5`. + +## 1.46.1 (October 29, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_cos_bucket` add new argument `cos_bucket_url`. +* Resource: `tencentcloud_nat_gateway` add new argument `tags`. +* Resource: `tencentcloud_postgresql_instance` add new argument `tags`. +* Data Source: `tencentcloud_cos_buckets` add new argument `cos_bucket_url`. +* Data Source: `tencentcloud_nat_gateways` add new argument `tags`. +* Data Source: `tencentcloud_postgresql_instances` add new argument `tags`. + +## 1.46.0 (October 26, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_api_gateway_api` +* **New Resource**: `tencentcloud_api_gateway_service` +* **New Resource**: `tencentcloud_api_gateway_custom_domain` +* **New Resource**: `tencentcloud_api_gateway_usage_plan` +* **New Resource**: `tencentcloud_api_gateway_usage_plan_attachment` +* **New Resource**: `tencentcloud_api_gateway_ip_strategy` +* **New Resource**: `tencentcloud_api_gateway_strategy_attachment` +* **New Resource**: `tencentcloud_api_gateway_api_key` +* **New Resource**: `tencentcloud_api_gateway_api_key_attachment` +* **New Resource**: `tencentcloud_api_gateway_service_release` +* **New Data Source**: `tencentcloud_api_gateway_apis` +* **New Data Source**: `tencentcloud_api_gateway_services` +* **New Data Source**: `tencentcloud_api_gateway_throttling_apis` +* **New Data Source**: `tencentcloud_api_gateway_throttling_services` +* **New Data Source**: `tencentcloud_api_gateway_usage_plans` +* **New Data Source**: `tencentcloud_api_gateway_ip_strategies` +* **New Data Source**: `tencentcloud_api_gateway_customer_domains` +* **New Data Source**: `tencentcloud_api_gateway_usage_plan_environments` +* **New Data Source**: `tencentcloud_api_gateway_api_keys` + +## 1.45.3 (October 21, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_sqlserver_instance` Fix the error of releasing associated resources when destroying sqlserver postpaid instance. +* Resource: `tencentcloud_sqlserver_readonly_instance` Fix the bug that the instance cannot be recycled when destroying sqlserver postpaid instance. +* Resource: `tencentcloud_clb_instance` fix force new when updating tags. +* Resource: `tencentcloud_redis_backup_config` fix doc issues. +* Resource: `tencentcloud_instance` fix `keep_image_login` force new issue when updating terraform version. +* Resource: `tencentcloud_clb_instance` fix tag creation bug. + +## 1.45.2 (October 19, 2020) + +BUG FIXES: +* Resource: `tencentcloud_mysql_instance` fix creating prepaid instance error. + +## 1.45.1 (October 16, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_clb_target_group_instance_attachment` update doc. +* Resource: `tencentcloud_clb_target_group_attachment` update doc. + +## 1.45.0 (October 15, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_clb_target_group_attachment` +* **New Resource**: `tencentcloud_clb_target_group` +* **New Resource**: `tencentcloud_clb_target_group_instance_attachment` +* **New Resource**: `tencentcloud_sqlserver_publish_subscribe` +* **New Resource**: `tencentcloud_vod_adaptive_dynamic_streaming_template` +* **New Resource**: `tencentcloud_vod_procedure_template` +* **New Resource**: `tencentcloud_vod_snapshot_by_time_offset_template` +* **New Resource**: `tencentcloud_vod_image_sprite_template` +* **New Resource**: `tencentcloud_vod_super_player_config` +* **New Data Source**: `tencentcloud_clb_target_groups` +* **New Data Source**: `tencentcloud_sqlserver_publish_subscribes` +* **New Data Source**: `tencentcloud_vod_adaptive_dynamic_streaming_templates` +* **New Data Source**: `tencentcloud_vod_image_sprite_templates` +* **New Data Source**: `tencentcloud_vod_procedure_templates` +* **New Data Source**: `tencentcloud_vod_snapshot_by_time_offset_templates` +* **New Data Source**: `tencentcloud_vod_super_player_configs` + +ENHANCEMENTS: + +* Resource: `tencentcloud_clb_listener_rule` add new argument `target_type` to support backend target type with rule. +* Resource: `tencentcloud_mysql_instance` modify argument `engine_version` to support mysql 8.0. +* Resource: `tencentcloud_clb_listener_rule` add new argument `forward_type` to support backend protocol([#522](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/522)). +* Resource: `tencentcloud_instance` add new argument `keep_image_login` to support keeping image login. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `extra_args` to support Kubelet. +* Resource: `tencentcloud_kubernetes_scale_worker` add new argument `extra_args` to support Kubelet. +* Resource: `tencentcloud_kubernetes_as_scaling_group` add new argument `extra_args` to support Kubelet. + +## 1.44.0 (September 25, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_cynosdb_cluster` +* **New Resource**: `tencentcloud_cynosdb_readonly_instance`. +* **New Data Source**: `tencentcloud_cynosdb_clusters` +* **New Data Source**: `tencentcloud_cynosdb_readonly_instances`. + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_standby_instance` change example type to `POSTPAID`. +* Resource: `tencentcloud_instance` add new argument `encrypt` to support data disk with encrypt. +* Resource: `tencentcloud_elasticsearch` add new argument `encrypt` to support disk with encrypt. +* Resource: `tencentcloud_kubernetes_cluster` add new argument `cam_role_name` to support authorization with instances. + +## 1.43.0 (September 18, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_image` +* **New Resource**: `tencentcloud_audit` +* **New Data Source**: `tencentcloud_audits` +* **New Data Source**: `tencentcloud_audit_cos_regions` +* **New Data Source**: `tencentcloud_audit_key_alias` + +ENHANCEMENTS: + +* Resource: `tencentcloud_instance` add new argument `data_disk_snapshot_id` to support data disk with `SnapshotId`([#469](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/469)) +* Data Source: `tencentcloud_instances` support filter by tags. + +## 1.42.2 (September 14, 2020) + +BUG FIXES: +* Resource: `tencentcloud_instance` fix `key_name` update error([#515](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/515)). + +## 1.42.1 (September 10, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_mongodb_instance` Fix the error of releasing associated resources when destroying mongodb postpaid instance. +* Resource: `tencentcloud_mongodb_sharding_instance` Fix the error of releasing associated resources when destroying mongodb postpaid sharding instance. +* Resource: `tencentcloud_mongodb_standby_instance` Fix the error of releasing associated resources when destroying mongodb postpaid standby instance. + +## 1.42.0 (September 8, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_ckafka_topic` +* **New Data Source**: `tencentcloud_ckafka_topics` + +ENHANCEMENTS: + +* Doc: optimize document directory. +* Resource: `tencentcloud_mongodb_instance`, `tencentcloud_mongodb_sharding_instance` and `tencentcloud_mongodb_standby_instance` remove system reserved tag `project`. + +## 1.41.3 (September 3, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_vpc_acl_attachment` perfect example field `subnet_ids` to `subnet_id`([#505](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/505)). +* Resource: `tencentcloud_cbs_storage_attachment` support import. +* Resource: `tencentcloud_eip_association` support import. +* Resource: `tencentcloud_route_table_entry` support import. +* Resource: `tencentcloud_acl_attachment` support import. + +## 1.41.2 (August 28, 2020) + +BUG FIXES: +* Resource: `tencentcloud_vpn_connection` fix `security_group_policy` update issue when apply repeatedly. +* Resource: `tencentcloud_vpn_connection` fix inconsistent state when deleted on console. + +## 1.41.1 (August 27, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_vpn_gateway` fix force new issue when apply repeatedly. +* Resource: `tencentcloud_vpn_connection` fix force new issue when apply repeatedly. +* Resource: `tencentcloud_instance` support for adjusting `internet_max_bandwidth_out` without forceNew when attribute `internet_charge_type` within `TRAFFIC_POSTPAID_BY_HOUR`,`BANDWIDTH_POSTPAID_BY_HOUR`,`BANDWIDTH_PACKAGE` ([#498](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/498)). + +## 1.41.0 (August 17, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_sqlserver_instance` +* **New Resource**: `tencentcloud_sqlserver_readonly_instance` +* **New Resource**: `tencentcloud_sqlserver_db` +* **New Resource**: `tencentcloud_sqlserver_account` +* **New Resource**: `tencentcloud_sqlserver_db_account_attachment` +* **New Resource**: `tencentcloud_vpc_acl` +* **New Resource**: `tencentcloud_vpc_acl_attachment` +* **New Resource**: `tencentcloud_ckafka_acl` +* **New Resource**: `tencentcloud_ckafka_user` +* **New Data Source**: `tencentcloud_sqlserver_instance` +* **New Data Source**: `tencentcloud_sqlserver_readonly_groups` +* **New Data Source**: `tencentcloud_vpc_acls` +* **New Data Source**: `tencentcloud_ckafka_acls` +* **New Data Source**: `tencentcloud_ckafka_users` + +DEPRECATED: + +* Data Source: `tencentcloud_cdn_domains` optional argument `offset` is no longer supported. + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_instance`, `tencentcloud_mongodb_sharding_instance` and `tencentcloud_mongodb_standby_instance` remove spec update validation. + +## 1.40.3 (August 11, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_kubernetes_clusters`add new attributes `cluster_as_enabled`,`node_name_type`,`cluster_extra_args`,`network_type`,`is_non_static_ip_mode`,`kube_proxy_mode`,`service_cidr`,`eni_subnet_ids`,`claim_expired_seconds` and `deletion_protection`. + +BUG FIXES: + +* Resource: `tencentcloud_vpn_gateway` fix creation of instance when `vpc_id` is specified. +* Resource: `tencentcloud_vpn_connection` fix creation of instance when `vpc_id` is specified. +* Resource: `tencentcloud_instance` fix `internet_charge_type` inconsistency when public ip is not allocated. + +## 1.40.2 (August 08, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix accidentally fail to delete prepaid instance ([#485](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/485)). + +## 1.40.1 (August 05, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_vpn_connection` fix mulit `security_group_policy` is not supported ([#487](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/487)). + +## 1.40.0 (July 31, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_mongodb_standby_instance` + +ENHANCEMENTS: + +* Resource: `tencentcloud_gaap_http_rule` argument `realservers` now is optional. +* Resource: `tencentcloud_kubernetes_cluster` supports multiple `availability_zone`. +* Data Source: `tencentcloud_mongodb_instances` add new argument `charge_type` and `auto_renew_flag` to support prepaid type. +* Resource: `tencentcloud_mongodb_instance` supports prepaid type, new mongodb SDK version `2019-07-25` and standby instance. +* Resource: `tencentcloud_mongodb_sharding_instance` supports prepaid type, new mongodb SDK version `2019-07-25` and standby instance. +* Resource: `tencentcloud_security_group_lite_rule` refine update process and doc. + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix set `key_name` error. + +## 1.39.0 (July 18, 2020) + +ENHANCEMENTS: + +* upgrade terraform 0.13 +* update readme to new repository + +## 1.38.3 (July 13, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_images` supports list of snapshots. +* Resource: `tencentcloud_kubernetes_cluster_attachment` add new argument `worker_config` to support config with existing instances. +* Resource: `tencentcloud_ccn` add new argument `tags` to support tags settings. +* Resource: `tencentcloud_cfs_file_system` add new argument `tags` to support tags settings. + +BUG FIXES: + +* Resource: `tencentcloud_gaap_layer4_listener` fix error InvalidParameter when destroy resource. +* Resource: `tencentcloud_gaap_layer7_listener` fix error InvalidParameter when destroy resource. +* Resource: `tencentcloud_cdn_domain` fix incorrect setting `server_certificate_config`, `client_certificate_config` caused the program to crash. + +## 1.38.2 (July 03, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix `allocate_public_ip` inconsistency when eip is attached to the cvm. +* Resource: `tencentcloud_mysql_instance` fix auto-forcenew on `charge_type` and `pay_type` when upgrading terraform version. ([#459](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/pull/459)). + +## 1.38.1 (June 30, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket` fix creation failure. + +## 1.38.0 (June 29, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_cdn_domains` + +BUG FIXES: + +* Resource: `tencentcloud_gaap_http_domain` fix a condition for setting client certificate ids([#454](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/pull/454)). + +## 1.37.0 (June 23, 2020) + +FEATURES: +* **New Resource**: `tencentcloud_postgresql_instance` +* **New Data Source**: `tencentcloud_postgresql_instances` +* **New Data Source**: `tencentcloud_postgresql_speccodes` +* **New Data Source**: `tencentcloud_sqlserver_zone_config` + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_instance` support more machine type. + +## 1.36.1 (June 12, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add new argument `labels`. +* Resource: `tencentcloud_kubernetes_as_scaling_group` add new argument `labels`. +* Resource: `tencentcloud_cos_bucket` add new arguments `encryption_algorithm` and `versioning_enable`. + +## 1.36.0 (June 08, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_availability_regions` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_redis_instances` add new argument `charge_type` to support prepaid type. +* Resource: `tencentcloud_redis_instance` add new argument `charge_type`, `prepaid_period` and `force_delete` to support prepaid type. +* Resource: `tencentcloud_mysql_instance` add new argument `force_delete` to support soft deletion. +* Resource: `tencentcloud_mysql_readonly_instance` add new argument `force_delete` to support soft deletion. + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix `allocate_public_ip` inconsistency when eip is attached to the cvm. + +DEPRECATED: +* Data Source: `tencentcloud_mysql_instances`: optional argument `pay_type` is no longer supported, replace by `charge_type`. +* Resource: `tencentcloud_mysql_instance`: optional arguments `pay_type` and `period` are no longer supported, replace by `charge_type` and `prepaid_period`. +* Resource: `tencentcloud_mysql_readonly_instance`: optional arguments `pay_type` and `period` are no longer supported, replace by `charge_type` and `prepaid_period`. +* Resource: `tencentcloud_tcaplus_group` replace by `tencentcloud_tcaplus_tablegroup` +* Data Source: `tencentcloud_tcaplus_groups` replace by `tencentcloud_tcaplus_tablegroups` +* Resource: `tencentcloud_tcaplus_tablegroup`,`tencentcloud_tcaplus_idl` and `tencentcloud_tcaplus_table` arguments `group_id`/`group_name` replace by `tablegroup_id`/`tablegroup_name` +* Data Source: `tencentcloud_tcaplus_groups`,`tencentcloud_tcaplus_idls` and `tencentcloud_tcaplus_tables` arguments `group_id`/`group_name` replace by `tablegroup_id`/`tablegroup_name` + +## 1.35.1 (June 02, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_as_scaling_config`, `tencentcloud_eip` and `tencentcloud_kubernetes_cluster` remove the validate function of `internet_max_bandwidth_out`. +* Resource: `tencentcloud_vpn_gateway` update available value of `bandwidth`. + +## 1.35.0 (June 01, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_elasticsearch_instances` +* **New Resource**: `tencentcloud_elasticsearch_instance` + +## 1.34.0 (May 28, 2020) + +ENHANCEMENTS: + +* upgrade terraform-plugin-sdk + +## 1.33.2 (May 25, 2020) + +DEPRECATED: +* Data Source: `tencentcloud_tcaplus_applications` replace by `tencentcloud_tcaplus_clusters`,optional arguments `app_id` and `app_name` are no longer supported, replace by `cluster_id` and `cluster_name` +* Data Source: `tencentcloud_tcaplus_zones` replace by `tencentcloud_tcaplus_groups`,optional arguments `app_id`,`zone_id` and `zone_name` are no longer supported, replace by `cluster_id`,`group_id` and `cluster_name` +* Data Source: `tencentcloud_tcaplus_tables` optional arguments `app_id` and `zone_id` are no longer supported, replace by `cluster_id` and `group_id` +* Data Source: `tencentcloud_tcaplus_idls`: optional argument `app_id` is no longer supported, replace by `cluster_id`. +* Resource: `tencentcloud_tcaplus_application` replace by `tencentcloud_tcaplus_cluster`,input argument `app_name` is no longer supported, replace by `cluster_name` +* Resource: `tencentcloud_tcaplus_zone` replace by `tencentcloud_tcaplus_group`, input arguments `app_id` and `zone_name` are no longer supported, replace by `cluster_id` and `group_name` +* Resource: `tencentcloud_tcaplus_idl` input arguments `app_id` and `zone_id` are no longer supported, replace by `cluster_id` and `group_id` +* Resource: `tencentcloud_tcaplus_table` input arguments `app_id`and `zone_id` are no longer supported, replace by `cluster_id` and `group_id` +* Resource: `tencentcloud_redis_instance`: optional argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_instances`: output argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_zone_config`: output argument `type` is no longer supported, replace by `type_id`. + +## 1.33.1 (May 22, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_redis_instances` add new argument `type_id`, `redis_shard_num`, `redis_replicas_num` +* Data Source: `tencentcloud_redis_zone_config` add output argument `type_id` and new output argument `type_id`, `redis_shard_nums`, `redis_replicas_nums` +* Data Source: `tencentcloud_ccn_instances` add new type `VPNGW` for field `instance_type` +* Data Source: `tencentcloud_vpn_gateways` add new type `CCN` for field `type` +* Resource: `tencentcloud_redis_instance` add new argument `type_id`, `redis_shard_num`, `redis_replicas_num` +* Resource: `tencentcloud_ccn_attachment` add new type `CNN_INSTANCE_TYPE_VPNGW` for field `instance_type` +* Resource: `tencentcloud_vpn_gateway` add new type `CCN` for field `type` + +BUG FIXES: + +* Resource: `tencentcloud_cdn_domain` fix `https_config` inconsistency after apply([#413](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/413)). + +DEPRECATED: + +* Resource: `tencentcloud_redis_instance`: optional argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_instances`: output argument `type` is no longer supported, replace by `type_id`. +* Data Source: `tencentcloud_redis_zone_config`: output argument `type` is no longer supported, replace by `type_id`. + +## 1.33.0 (May 18, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_monitor_policy_conditions` +* **New Data Source**: `tencentcloud_monitor_data` +* **New Data Source**: `tencentcloud_monitor_product_event` +* **New Data Source**: `tencentcloud_monitor_binding_objects` +* **New Data Source**: `tencentcloud_monitor_policy_groups` +* **New Data Source**: `tencentcloud_monitor_product_namespace` +* **New Resource**: `tencentcloud_monitor_policy_group` +* **New Resource**: `tencentcloud_monitor_binding_object` +* **New Resource**: `tencentcloud_monitor_binding_receiver` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_instances` add new output argument `instance_charge_type_prepaid_renew_flag`. +* Data Source: `tencentcloud_cbs_storages` add new output argument `prepaid_renew_flag`. +* Data Source: `tencentcloud_cbs_storages` add new output argument `charge_type`. +* Resource: `tencentcloud_instance` support update with argument `instance_charge_type_prepaid_renew_flag`. +* Resource: `tencentcloud_cbs_storage` add new argument `force_delete`. +* Resource: `tencentcloud_cbs_storage` add new argument `charge_type`. +* Resource: `tencentcloud_cbs_storage` add new argument `prepaid_renew_flag`. +* Resource: `tencentcloud_cdn_domain` add new argument `full_url_cache`([#405](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/405)). + +DEPRECATED: + +* Resource: `tencentcloud_cbs_storage`: optional argument `period` is no longer supported. + +## 1.32.1 (April 30, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_ccn_attachment` add new argument `ccn_uin`. +* Resource: `tencentcloud_instance` add new argument `force_delete`. + +BUG FIXES: + +* Resource: `tencentcloud_scf_function` fix update `zip_file`. + +## 1.32.0 (April 20, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_kubernetes_cluster_attachment`([#285](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/285)). + +ENHANCEMENTS: + +* Resource: `tencentcloud_cdn_domain` add new attribute `cname`([#395](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/395)). + +BUG FIXES: + +* Resource: `tencentcloud_cos_bucket_object` mark the object as destroyed when the object not exist. + +## 1.31.2 (April 17, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_cbs_storage` support modify `tags`. + +## 1.31.1 (April 14, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_keypair` fix bug when trying to destroy resources containing CVM and key pair([#375](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/375)). +* Resource: `tencentcloud_clb_attachment` fix bug when trying to destroy multiple attachments in the array. +* Resource: `tencentcloud_cam_group_membership` fix bug when trying to destroy multiple users in the array. + +ENHANCEMENTS: + +* Resource: `tencentcloud_mysql_account` add new argument `host`([#372](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/372)). +* Resource: `tencentcloud_mysql_account_privilege` add new argument `account_host`([#372](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/372)). +* Resource: `tencentcloud_mysql_privilege` add new argument `account_host`([#372](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/372)). +* Resource: `tencentcloud_mysql_readonly_instance` check master monitor data before create([#379](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/379)). +* Resource: `tencentcloud_tcaplus_application` remove the pull password from server. +* Resource: `tencentcloud_instance` support import `allocate_public_ip`([#382](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/382)). +* Resource: `tencentcloud_redis_instance` add two redis types. +* Data Source: `tencentcloud_vpc_instances` add new argument `cidr_block`,`tag_key` ([#378](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/378)). +* Data Source: `tencentcloud_vpc_route_tables` add new argument `tag_key`,`vpc_id`,`association_main` ([#378](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/378)). +* Data Source: `tencentcloud_vpc_subnets` add new argument `cidr_block`,`tag_key`,`is_remote_vpc_snat` ([#378](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/378)). +* Data Source: `tencentcloud_mysql_zone_config` and `tencentcloud_redis_zone_config` remove region check. + +## 1.31.0 (April 07, 2020) + +FEATURES: + +* **New Resource**: `tencentcloud_cdn_domain` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_cam_users` add new argument `user_id`. +* Resource: `tencentcloud_vpc` add retry logic. + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix timeout error when modify password. + +## 1.30.7 (March 31, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_kubernetes_as_scaling_group` set a value to argument `key_ids` cause error . + +## 1.30.6 (March 30, 2020) + +ENHANCEMENTS: + +* Resource: `tencentcloud_tcaplus_idl` add new argument `zone_id`. +* Resource: `tencentcloud_cam_user` add new argument `force_delete`.([#354](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/354)) +* Data Source: `tencentcloud_vpc_subnets` add new argument `vpc_id`. + +## 1.30.5 (March 19, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_key_pair` will be replaced when `public_key` contains comment. +* Resource: `tencentcloud_scf_function` upload local file error. + +ENHANCEMENTS: + +* Resource: `tencentcloud_scf_function` runtime support nodejs8.9 and nodejs10.15. + +## 1.30.4 (March 10, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_cam_policy` fix read nil issue when the resource is not exist.([#344](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/#344)). +* Resource: `tencentcloud_key_pair` will be replaced when the end of `public_key` contains spaces([#343](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/343)). +* Resource: `tencentcloud_scf_function` fix trigger does not support cos_region. + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add new attributes `cluster_os_type`,`cluster_internet`,`cluster_intranet`,`managed_cluster_internet_security_policies` and `cluster_intranet_subnet_id`. + + +## 1.30.3 (February 24, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_instance` fix that classic network does not support([#339](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/339)). + +## 1.30.2 (February 17, 2020) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_cam_policies` add new attribute `policy_id`. +* Data Source: `tencentcloud_cam_groups` add new attribute `group_id`. + +## 1.30.1 (January 21, 2020) + +BUG FIXES: + +* Resource: `tencentcloud_dnat` fix `elastic_port` and `internal_port` type error. +* Resource: `tencentcloud_vpn_gateway` fix `state` type error. +* Resource: `tencentcloud_dayu_ddos_policy` fix that `white_ips` and `black_ips` can not be updated. +* Resource: `tencentcloud_dayu_l4_rule` fix that rule parameters can not be updated. + +ENHANCEMENTS: + +* Data Source: `tencentcloud_key_pairs` support regular expression search by name. + +## 1.30.0 (January 14, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_dayu_cc_http_policies` +* **New Data Source**: `tencentcloud_dayu_cc_https_policies` +* **New Data Source**: `tencentcloud_dayu_ddos_policies` +* **New Data Source**: `tencentcloud_dayu_ddos_policy_attachments` +* **New Data Source**: `tencentcloud_dayu_ddos_policy_cases` +* **New Data Source**: `tencentcloud_dayu_l4_rules` +* **New Data Source**: `tencentcloud_dayu_l7_rules` +* **New Resource**: `tencentcloud_dayu_cc_http_policy` +* **New Resource**: `tencentcloud_dayu_cc_https_policy` +* **New Resource**: `tencentcloud_dayu_ddos_policy` +* **New Resource**: `tencentcloud_dayu_ddos_policy_attachment` +* **New Resource**: `tencentcloud_dayu_ddos_policy_case` +* **New Resource**: `tencentcloud_dayu_l4_rule` +* **New Resource**: `tencentcloud_dayu_l7_rule` + +BUG FIXES: + +* gaap: optimize gaap describe: when describe resource by id but get more than 1 resources, return error directly instead of using the first result +* Resource: `tencentcloud_eni_attachment` fix detach may failed. +* Resource: `tencentcloud_instance` remove the tag that be added by as attachment automatically([#300](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/300)). +* Resource: `tencentcloud_clb_listener` fix `sni_switch` type error([#297](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/297)). +* Resource: `tencentcloud_vpn_gateway` shows argument `prepaid_renew_flag` has changed when applied again([#298](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/298)). +* Resource: `tencentcloud_clb_instance` fix the bug that instance id is not set in state file([#303](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/303)). +* Resource: `tencentcloud_vpn_gateway` that is postpaid charge type cannot be deleted normally([#312](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/312)). +* Resource: `tencentcloud_vpn_gateway` add `InternalError` SDK error to triggering the retry process. +* Resource: `tencentcloud_vpn_gateway` fix read nil issue when the resource is not exist. +* Resource: `tencentcloud_clb_listener_rule` fix unclear error message of SSL type error. +* Resource: `tencentcloud_ha_vip_attachment` fix read nil issue when the resource is not exist. +* Data Source: `tencentcloud_security_group` fix `project_id` type error. +* Data Source: `tencentcloud_security_groups` fix `project_id` filter not works([#303](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/314)). + +## 1.29.0 (January 06, 2020) + +FEATURES: + +* **New Data Source**: `tencentcloud_gaap_domain_error_pages` +* **New Resource**: `tencentcloud_gaap_domain_error_page` + +ENHANCEMENTS: +* Data Source: `tencentcloud_vpc_instances` add new optional argument `is_default`. +* Data Source: `tencentcloud_vpc_subnets` add new optional argument `availability_zone`,`is_default`. + +BUG FIXES: +* Resource: `tencentcloud_redis_instance` field security_groups are id list, not name list([#291](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/291)). + +## 1.28.0 (December 25, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cbs_snapshot_policies` +* **New Resource**: `tencentcloud_cbs_snapshot_policy_attachment` + +ENHANCEMENTS: + +* doc: rewrite website index +* Resource: `tencentcloud_instance` support modifying instance type([#251](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/251)). +* Resource: `tencentcloud_gaap_http_domain` add new optional argument `realserver_certificate_ids`. +* Data Source: `tencentcloud_gaap_http_domains` add new output argument `realserver_certificate_ids`. + +DEPRECATED: + +* Resource: `tencentcloud_gaap_http_domain`: optional argument `realserver_certificate_id` is no longer supported. +* Data Source: `tencentcloud_gaap_http_domains`: output argument `realserver_certificate_id` is no longer supported. + +## 1.27.0 (December 17, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_tcaplus_applications` +* **New Data Source**: `tencentcloud_tcaplus_zones` +* **New Data Source**: `tencentcloud_tcaplus_tables` +* **New Data Source**: `tencentcloud_tcaplus_idls` +* **New Resource**: `tencentcloud_tcaplus_application` +* **New Resource**: `tencentcloud_tcaplus_zone` +* **New Resource**: `tencentcloud_tcaplus_idl` +* **New Resource**: `tencentcloud_tcaplus_table` + +ENHANCEMENTS: + +* Resource: `tencentcloud_mongodb_instance` support more instance type([#241](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/241)). +* Resource: `tencentcloud_kubernetes_cluster` support more instance type([#237](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/237)). + +BUG FIXES: + +* Fix bug that resource `tencentcloud_instance` delete error when instance launch failed. +* Fix bug that resource `tencentcloud_security_group` read error when response is InternalError. +* Fix bug that the type of `cluster_type` is wrong in data source `tencentcloud_mongodb_instances`([#242](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/242)). +* Fix bug that resource `tencentcloud_eip` unattach error([#233](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/233)). +* Fix bug that terraform read nil attachment resource when the attached resource of attachment resource is removed of resource CLB and CAM. +* Fix doc example error of resource `tencentcloud_nat_gateway`. + +DEPRECATED: + +* Resource: `tencentcloud_eip`: optional argument `applicable_for_clb` is no longer supported. + +## 1.26.0 (December 09, 2019) + +FEATURES: + +* **New Resource**: `tencentcloud_mysql_privilege`([#223](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/223)). +* **New Resource**: `tencentcloud_kubernetes_as_scaling_group`([#202](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/202)). + +ENHANCEMENTS: + +* Resource: `tencentcloud_gaap_layer4_listener` support import. +* Resource: `tencentcloud_gaap_http_rule` support import. +* Resource: `tencentcloud_gaap_security_rule` support import. +* Resource: `tencentcloud_gaap_http_domain` add new optional argument `client_certificate_ids`. +* Resource: `tencentcloud_gaap_layer7_listener` add new optional argument `client_certificate_ids`. +* Data Source: `tencentcloud_gaap_http_domains` add new output argument `client_certificate_ids`. +* Data Source: `tencentcloud_gaap_layer7_listeners` add new output argument `client_certificate_ids`. + +DEPRECATED: + +* Resource: `tencentcloud_gaap_http_domain`: optional argument `client_certificate_id` is no longer supported. +* Resource: `tencentcloud_gaap_layer7_listener`: optional argument `client_certificate_id` is no longer supported. +* Resource: `tencentcloud_mysql_account_privilege` replaced by `tencentcloud_mysql_privilege`. +* Data Source: `tencentcloud_gaap_http_domains`: output argument `client_certificate_id` is no longer supported. +* Data Source: `tencentcloud_gaap_layer7_listeners`: output argument `client_certificate_id` is no longer supported. + +BUG FIXES: + +* Fix bug that resource `tencentcloud_clb_listener` 's unchangeable `health_check_switch`([#235](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/235)). +* Fix bug that resource `tencentcloud_clb_instance` read nil and report error. +* Fix example errors of resource `tencentcloud_cbs_snapshot_policy` and data source `tencentcloud_dnats`. + +## 1.25.2 (December 04, 2019) + +BUG FIXES: +* Fixed bug that the validator of cvm instance type is incorrect. + +## 1.25.1 (December 03, 2019) + +ENHANCEMENTS: +* Optimized error message of validators. + +BUG FIXES: +* Fixed bug that the type of `state` is incorrect in data source `tencentcloud_nat_gateways`([#226](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/226)). +* Fixed bug that the value of `cluster_max_pod_num` is incorrect in resource `tencentcloud_kubernetes_cluster`([#228](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/228)). + + +## 1.25.0 (December 02, 2019) + +ENHANCEMENTS: + +* Resource: `tencentcloud_instance` support `SPOTPAID` instance. Thanks to @LipingMao ([#209](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/209)). +* Resource: `tencentcloud_vpn_gateway` add argument `prepaid_renew_flag` and `prepaid_period` to support prepaid VPN gateway instance creation. + +BUG FIXES: +* Fixed bugs that update operations on `tencentcloud_cam_policy` do not work. +* Fixed bugs that filters on `tencentcloud_cam_users` do not work. + +DEPRECATED: + * Data Source: `tencentcloud_cam_user_policy_attachments`:`policy_type` is no longer supported. + * Data Source: `tencentcloud_cam_group_policy_attachments`:`policy_type` is no longer supported. + +## 1.24.1 (November 26, 2019) + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` add support for `PREPAID` instance type. Thanks to @woodylic ([#204](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/204)). +* Resource: `tencentcloud_cos_bucket` add optional argument tags +* Data Source: `tencentcloud_cos_buckets` add optional argument tags + +BUG FIXES: +* Fixed docs issues of `tencentcloud_nat_gateway` + +## 1.24.0 (November 20, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_ha_vips` +* **New Data Source**: `tencentcloud_ha_vip_eip_attachments` +* **New Resource**: `tencentcloud_ha_vip` +* **New Resource**: `tencentcloud_ha_vip_eip_attachment` + +ENHANCEMENTS: + +* Resource: `tencentcloud_kubernetes_cluster` cluster_os add new support: `centos7.6x86_64` and `ubuntu18.04.1 LTSx86_64` +* Resource: `tencentcloud_nat_gateway` add computed argument `created_time`. + +BUG FIXES: + +* Fixed docs issues of CAM, DNAT and NAT_GATEWAY +* Fixed query issue that paged-query was not supported in data source `tencentcloud_dnats` +* Fixed query issue that filter `address_ip` was set incorrectly in data source `tencentcloud_eips` + +## 1.23.0 (November 14, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_images` +* **New Data Source**: `tencentcloud_vpn_gateways` +* **New Data Source**: `tencentcloud_customer_gateways` +* **New Data Source**: `tencentcloud_vpn_connections` +* **New Resource**: `tencentcloud_vpn_gateway` +* **New Resource**: `tencentcloud_customer_gateway` +* **New Resource**: `tencentcloud_vpn_connection` +* **Provider TencentCloud**: add `security_token` argument + +ENHANCEMENTS: + +* All api calls now using api3.0 +* Resource: `tencentcloud_eip` add optional argument `tags`. +* Data Source: `tencentcloud_eips` add optional argument `tags`. + +BUG FIXES: + +* Fixed docs of CAM + +## 1.22.0 (November 05, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cfs_file_systems` +* **New Data Source**: `tencentcloud_cfs_access_groups` +* **New Data Source**: `tencentcloud_cfs_access_rules` +* **New Data Source**: `tencentcloud_scf_functions` +* **New Data Source**: `tencentcloud_scf_namespaces` +* **New Data Source**: `tencentcloud_scf_logs` +* **New Resource**: `tencentcloud_cfs_file_system` +* **New Resource**: `tencentcloud_cfs_access_group` +* **New Resource**: `tencentcloud_cfs_access_rule` +* **New Resource**: `tencentcloud_scf_function` +* **New Resource**: `tencentcloud_scf_namespace` + +## 1.21.2 (October 29, 2019) + +BUG FIXES: + +* Resource: `tencentcloud_gaap_realserver` add ip/domain exists check +* Resource: `tencentcloud_kubernetes_cluster` add error handling logic and optional argument `tags`. +* Resource: `tencentcloud_kubernetes_scale_worker` add error handling logic. +* Data Source: `tencentcloud_kubernetes_clusters` add optional argument `tags`. + +## 1.21.1 (October 23, 2019) + +ENHANCEMENTS: + +* Updated golang to version 1.13.x + +BUG FIXES: + +* Fixed docs of CAM + +## 1.21.0 (October 15, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cam_users` +* **New Data Source**: `tencentcloud_cam_groups` +* **New Data Source**: `tencentcloud_cam_policies` +* **New Data Source**: `tencentcloud_cam_roles` +* **New Data Source**: `tencentcloud_cam_user_policy_attachments` +* **New Data Source**: `tencentcloud_cam_group_policy_attachments` +* **New Data Source**: `tencentcloud_cam_role_policy_attachments` +* **New Data Source**: `tencentcloud_cam_group_memberships` +* **New Data Source**: `tencentcloud_cam_saml_providers` +* **New Data Source**: `tencentcloud_reserved_instance_configs` +* **New Data Source**: `tencentcloud_reserved_instances` +* **New Resource**: `tencentcloud_cam_user` +* **New Resource**: `tencentcloud_cam_group` +* **New Resource**: `tencentcloud_cam_role` +* **New Resource**: `tencentcloud_cam_policy` +* **New Resource**: `tencentcloud_cam_user_policy_attachment` +* **New Resource**: `tencentcloud_cam_group_policy_attachment` +* **New Resource**: `tencentcloud_cam_role_policy_attachment` +* **New Resource**: `tencentcloud_cam_group_membership` +* **New Resource**: `tencentcloud_cam_saml_provider` +* **New Resource**: `tencentcloud_reserved_instance` + +ENHANCEMENTS: + +* Resource: `tencentcloud_gaap_http_domain` support import +* Resource: `tencentcloud_gaap_layer7_listener` support import + +BUG FIXES: + +* Resource: `tencentcloud_gaap_http_domain` fix sometimes can't enable realserver auth + +## 1.20.1 (October 08, 2019) + +ENHANCEMENTS: + +* Data Source: `tencentcloud_availability_zones` refactor logic with api3.0 . +* Data Source: `tencentcloud_as_scaling_groups` add optional argument `tags` and attribute `tags` for `scaling_group_list`. +* Resource: `tencentcloud_eip` add optional argument `type`, `anycast_zone`, `internet_service_provider`, etc. +* Resource: `tencentcloud_as_scaling_group` add optional argument `tags`. + +BUG FIXES: + +* Data Source: `tencentcloud_gaap_http_domains` set response `certificate_id`, `client_certificate_id`, `realserver_auth`, `basic_auth` and `gaap_auth` default value when they are nil. +* Resource: `tencentcloud_gaap_http_domain` set response `certificate_id`, `client_certificate_id`, `realserver_auth`, `basic_auth` and `gaap_auth` default value when they are nil. + +## 1.20.0 (September 24, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_eips` +* **New Data Source**: `tencentcloud_instances` +* **New Data Source**: `tencentcloud_key_pairs` +* **New Data Source**: `tencentcloud_placement_groups` +* **New Resource**: `tencentcloud_placement_group` + +ENHANCEMENTS: + +* Data Source: `tencentcloud_redis_instances` add optional argument `tags`. +* Data Source: `tencentcloud_mongodb_instances` add optional argument `tags`. +* Data Source: `tencentcloud_instance_types` add optional argument `availability_zone` and `gpu_core_count`. +* Data Source: `tencentcloud_gaap_http_rules` add optional argument `forward_host` and attributes `forward_host` in `rules`. +* Resource: `tencentcloud_redis_instance` add optional argument `tags`. +* Resource: `tencentcloud_mongodb_instance` add optional argument `tags`. +* Resource: `tencentcloud_mongodb_sharding_instance` add optional argument `tags`. +* Resource: `tencentcloud_instance` add optional argument `placement_group_id`. +* Resource: `tencentcloud_eip` refactor logic with api3.0 . +* Resource: `tencentcloud_eip_association` refactor logic with api3.0 . +* Resource: `tencentcloud_key_pair` refactor logic with api3.0 . +* Resource: `tencentcloud_gaap_http_rule` add optional argument `forward_host`. + +BUG FIXES: +* Resource: `tencentcloud_mysql_instance`: miss argument `availability_zone` causes the instance to be recreated. + +DEPRECATED: + +* Data Source: `tencentcloud_eip` replaced by `tencentcloud_eips`. + +## 1.19.0 (September 19, 2019) + +FEATURES: + +* **New Resource**: `tencentcloud_security_group_lite_rule`. + +ENHANCEMENTS: + +* Data Source: `tencentcloud_security_groups`: add optional argument `tags`. +* Data Source: `tencentcloud_security_groups`: add optional argument `result_output_file` and new attributes `ingress`, `egress` for list `security_groups`. +* Resource: `tencentcloud_security_group`: add optional argument `tags`. +* Resource: `tencentcloud_as_scaling_config`: internet charge type support `BANDWIDTH_PREPAID`, `TRAFFIC_POSTPAID_BY_HOUR` and `BANDWIDTH_PACKAGE`. + +BUG FIXES: +* Resource: `tencentcloud_clb_listener_rule`: fix unclear description and errors in example. +* Resource: `tencentcloud_instance`: fix hostname is not work. + +## 1.18.1 (September 17, 2019) + +FEATURES: + +* **Update Data Source**: `tencentcloud_vpc_instances` add optional argument `tags` +* **Update Data Source**: `tencentcloud_vpc_subnets` add optional argument `tags` +* **Update Data Source**: `tencentcloud_route_tables` add optional argument `tags` +* **Update Resource**: `tencentcloud_vpc` add optional argument `tags` +* **Update Resource**: `tencentcloud_subnet` add optional argument `tags` +* **Update Resource**: `tencentcloud_route_table` add optional argument `tags` + +ENHANCEMENTS: + +* Data Source:`tencentcloud_kubernetes_clusters` support pull out authentication information for cluster access too. +* Resource:`tencentcloud_kubernetes_cluster` support pull out authentication information for cluster access. + +BUG FIXES: + +* Resource: `tencentcloud_mysql_instance`: when the mysql is abnormal state, read the basic information report error + +DEPRECATED: + +* Data Source: `tencentcloud_kubernetes_clusters`:`container_runtime` is no longer supported. + +## 1.18.0 (September 10, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_ssl_certificates` +* **New Data Source**: `tencentcloud_dnats` +* **New Data Source**: `tencentcloud_nat_gateways` +* **New Resource**: `tencentcloud_ssl_certificate` +* **Update Resource**: `tencentcloud_clb_redirection` add optional argument `is_auto_rewrite` +* **Update Resource**: `tencentcloud_nat_gateway` , add more configurable items. +* **Update Resource**: `tencentcloud_nat` , add more configurable items. + +DEPRECATED: +* Data Source: `tencentcloud_nats` replaced by `tencentcloud_nat_gateways`. + +## 1.17.0 (September 04, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_gaap_proxies` +* **New Data Source**: `tencentcloud_gaap_realservers` +* **New Data Source**: `tencentcloud_gaap_layer4_listeners` +* **New Data Source**: `tencentcloud_gaap_layer7_listeners` +* **New Data Source**: `tencentcloud_gaap_http_domains` +* **New Data Source**: `tencentcloud_gaap_http_rules` +* **New Data Source**: `tencentcloud_gaap_security_policies` +* **New Data Source**: `tencentcloud_gaap_security_rules` +* **New Data Source**: `tencentcloud_gaap_certificates` +* **New Resource**: `tencentcloud_gaap_proxy` +* **New Resource**: `tencentcloud_gaap_realserver` +* **New Resource**: `tencentcloud_gaap_layer4_listener` +* **New Resource**: `tencentcloud_gaap_layer7_listener` +* **New Resource**: `tencentcloud_gaap_http_domain` +* **New Resource**: `tencentcloud_gaap_http_rule` +* **New Resource**: `tencentcloud_gaap_certificate` +* **New Resource**: `tencentcloud_gaap_security_policy` +* **New Resource**: `tencentcloud_gaap_security_rule` + +## 1.16.3 (August 30, 2019) + +BUG FIXIES: + +* Resource: `tencentcloud_kubernetes_cluster`: cgi error retry. +* Resource: `tencentcloud_kubernetes_scale_worker`: cgi error retry. + +## 1.16.2 (August 28, 2019) + +BUG FIXIES: + +* Resource: `tencentcloud_instance`: fixed cvm data disks missing computed. +* Resource: `tencentcloud_mysql_backup_policy`: `backup_model` remove logical backup support. +* Resource: `tencentcloud_mysql_instance`: `tags` adapt to the new official api. + +## 1.16.1 (August 27, 2019) + +ENHANCEMENTS: +* `tencentcloud_instance`: refactor logic with api3.0 . + +## 1.16.0 (August 20, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_kubernetes_clusters` +* **New Resource**: `tencentcloud_kubernetes_scale_worker` +* **New Resource**: `tencentcloud_kubernetes_cluster` + +DEPRECATED: +* Data Source: `tencentcloud_container_clusters` replaced by `tencentcloud_kubernetes_clusters`. +* Data Source: `tencentcloud_container_cluster_instances` replaced by `tencentcloud_kubernetes_clusters`. +* Resource: `tencentcloud_container_cluster` replaced by `tencentcloud_kubernetes_cluster`. +* Resource: `tencentcloud_container_cluster_instance` replaced by `tencentcloud_kubernetes_scale_worker`. + +## 1.15.2 (August 14, 2019) + +ENHANCEMENTS: + +* `tencentcloud_as_scaling_group`: fixed issue that binding scaling group to load balancer does not work. +* `tencentcloud_clb_attachements`: rename `rewrite_source_rule_id` with `source_rule_id` and rename `rewrite_target_rule_id` with `target_rule_id`. + +## 1.15.1 (August 13, 2019) + +ENHANCEMENTS: + +* `tencentcloud_instance`: changed `image_id` property to ForceNew ([#78](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/78)) +* `tencentcloud_instance`: improved with retry ([#82](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/82)) +* `tencentcloud_cbs_storages`: improved with retry ([#82](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/82)) +* `tencentcloud_clb_instance`: bug fixed and improved with retry ([#37](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/37)) + +## 1.15.0 (August 07, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_clb_instances` +* **New Data Source**: `tencentcloud_clb_listeners` +* **New Data Source**: `tencentcloud_clb_listener_rules` +* **New Data Source**: `tencentcloud_clb_attachments` +* **New Data Source**: `tencentcloud_clb_redirections` +* **New Resource**: `tencentcloud_clb_instance` +* **New Resource**: `tencentcloud_clb_listener` +* **New Resource**: `tencentcloud_clb_listener_rule` +* **New Resource**: `tencentcloud_clb_attachment` +* **New Resource**: `tencentcloud_clb_redirection` + +DEPRECATED: +* Resource: `tencentcloud_lb` replaced by `tencentcloud_clb_instance`. +* Resource: `tencentcloud_alb_server_attachment` replaced by `tencentcloud_clb_attachment`. + +## 1.14.1 (August 05, 2019) + +BUG FIXIES: + +* resource/tencentcloud_security_group_rule: fixed security group rule id is not compatible with previous version. + +## 1.14.0 (July 30, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_security_groups` +* **New Data Source**: `tencentcloud_mongodb_instances` +* **New Data Source**: `tencentcloud_mongodb_zone_config` +* **New Resource**: `tencentcloud_mongodb_instance` +* **New Resource**: `tencentcloud_mongodb_sharding_instance` +* **Update Resource**: `tencentcloud_security_group_rule` add optional argument `description` + +DEPRECATED: +* Data Source: `tencnetcloud_security_group` replaced by `tencentcloud_security_groups` + +ENHANCEMENTS: +* Refactoring security_group logic with api3.0 + +## 1.13.0 (July 23, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_dc_gateway_instances` +* **New Data Source**: `tencentcloud_dc_gateway_ccn_routes` +* **New Resource**: `tencentcloud_dc_gateway` +* **New Resource**: `tencentcloud_dc_gateway_ccn_route` + +## 1.12.0 (July 16, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_dc_instances` +* **New Data Source**: `tencentcloud_dcx_instances` +* **New Resource**: `tencentcloud_dcx` +* **UPDATE Resource**:`tencentcloud_mysql_instance` and `tencentcloud_mysql_readonly_instance` completely delete instance. + +BUG FIXIES: + +* resource/tencentcloud_instance: fixed issue when data disks set as delete_with_instance not works. +* resource/tencentcloud_instance: if managed public_ip manually, please don't define `allocate_public_ip` ([#62](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/62)). +* resource/tencentcloud_eip_association: fixed issue when instances were manually deleted ([#60](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/60)). +* resource/tencentcloud_mysql_readonly_instance:remove an unsupported property `gtid` + +## 1.11.0 (July 02, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_ccn_instances` +* **New Data Source**: `tencentcloud_ccn_bandwidth_limits` +* **New Resource**: `tencentcloud_ccn` +* **New Resource**: `tencentcloud_ccn_attachment` +* **New Resource**: `tencentcloud_ccn_bandwidth_limit` + +## 1.10.0 (June 27, 2019) + +ENHANCEMENTS: + +* Refactoring vpc logic with api3.0 +* Refactoring cbs logic with api3.0 + +FEATURES: +* **New Data Source**: `tencentcloud_vpc_instances` +* **New Data Source**: `tencentcloud_vpc_subnets` +* **New Data Source**: `tencentcloud_vpc_route_tables` +* **New Data Source**: `tencentcloud_cbs_storages` +* **New Data Source**: `tencentcloud_cbs_snapshots` +* **New Resource**: `tencentcloud_route_table_entry` +* **New Resource**: `tencentcloud_cbs_snapshot_policy` +* **Update Resource**: `tencentcloud_vpc` , add more configurable items. +* **Update Resource**: `tencentcloud_subnet` , add more configurable items. +* **Update Resource**: `tencentcloud_route_table`, add more configurable items. +* **Update Resource**: `tencentcloud_cbs_storage`, add more configurable items. +* **Update Resource**: `tencentcloud_instance`: add optional argument `tags`. +* **Update Resource**: `tencentcloud_security_group_rule`: add optional argument `source_sgid`. + +DEPRECATED: +* Data Source: `tencentcloud_vpc` replaced by `tencentcloud_vpc_instances`. +* Data Source: `tencentcloud_subnet` replaced by `tencentcloud_vpc_subnets`. +* Data Source: `tencentcloud_route_table` replaced by `tencentcloud_vpc_route_tables`. +* Resource: `tencentcloud_route_entry` replaced by `tencentcloud_route_table_entry`. + +## 1.9.1 (June 24, 2019) + +BUG FIXIES: + +* data/tencentcloud_instance: fixed vpc ip is in use error when re-creating with private ip ([#46](https://github.com/tencentcloudstack/terraform-provider-tencentcloud/issues/46)). + +## 1.9.0 (June 18, 2019) + +ENHANCEMENTS: + +* update to `v0.12.1` Terraform SDK version + +BUG FIXIES: + +* data/tencentcloud_security_group: `project_id` remote API return sometime is string type. +* resource/tencentcloud_security_group: just like `data/tencentcloud_security_group` + +## 1.8.0 (June 11, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_as_scaling_configs` +* **New Data Source**: `tencentcloud_as_scaling_groups` +* **New Data Source**: `tencentcloud_as_scaling_policies` +* **New Resource**: `tencentcloud_as_scaling_config` +* **New Resource**: `tencentcloud_as_scaling_group` +* **New Resource**: `tencentcloud_as_attachment` +* **New Resource**: `tencentcloud_as_scaling_policy` +* **New Resource**: `tencentcloud_as_schedule` +* **New Resource**: `tencentcloud_as_lifecycle_hook` +* **New Resource**: `tencentcloud_as_notification` + +## 1.7.0 (May 23, 2019) + +FEATURES: +* **New Data Source**: `tencentcloud_redis_zone_config` +* **New Data Source**: `tencentcloud_redis_instances` +* **New Resource**: `tencentcloud_redis_instance` +* **New Resource**: `tencentcloud_redis_backup_config` + +ENHANCEMENTS: + +* resource/tencentcloud_instance: Add `hostname`, `project_id`, `delete_with_instance` argument. +* Update tencentcloud-sdk-go to better support redis api. + +## 1.6.0 (May 15, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_cos_buckets` +* **New Data Source**: `tencentcloud_cos_bucket_object` +* **New Resource**: `tencentcloud_cos_bucket` +* **New Resource**: `tencentcloud_cos_bucket_object` + +ENHANCEMENTS: + +* Add the framework of auto generating terraform docs + +## 1.5.0 (April 26, 2019) + +FEATURES: + +* **New Data Source**: `tencentcloud_mysql_backup_list` +* **New Data Source**: `tencentcloud_mysql_zone_config` +* **New Data Source**: `tencentcloud_mysql_parameter_list` +* **New Data Source**: `tencentcloud_mysql_instance` +* **New Resource**: `tencentcloud_mysql_backup_policy` +* **New Resource**: `tencentcloud_mysql_account` +* **New Resource**: `tencentcloud_mysql_account_privilege` +* **New Resource**: `tencentcloud_mysql_instance` +* **New Resource**: `tencentcloud_mysql_readonly_instance` + +ENHANCEMENTS: + +* resource/tencentcloud_subnet: `route_table_id` now is an optional argument + +## 1.4.0 (April 12, 2019) + +ENHANCEMENTS: + +* data/tencentcloud_image: add `image_name` attribute to this data source. +* resource/tencentcloud_instance: data disk count limit now is upgrade from 1 to 10, as API has supported more disks. +* resource/tencentcloud_instance: PREPAID instance now can be deleted, but still have some limit in API. + +BUG FIXIES: + +* resource/tencentcloud_instance: `allocate_public_ip` doesn't work properly when it is set to false. + +## 1.3.0 (March 12, 2019) + +FEATURES: + +* **New Resource**: `tencentcloud_lb` ([#3](https://github.com/terraform-providers/terraform-provider-scaffolding/issues/3)) + +ENHANCEMENTS: + +* resource/tencentcloud_instance: Add `user_data_raw` argument ([#4](https://github.com/terraform-providers/terraform-provider-scaffolding/issues/4)) + +## 1.2.2 (September 28, 2018) + +BUG FIXES: + +* resource/tencentcloud_cbs_storage: make name to be required ([#25](https://github.com/tencentyun/terraform-provider-tencentcloud/issues/25)) +* resource/tencentcloud_instance: support user data and private ip + +## 1.2.0 (April 3, 2018) + +FEATURES: + +* **New Resource**: `tencentcloud_container_cluster` +* **New Resource**: `tencentcloud_container_cluster_instance` +* **New Data Source**: `tencentcloud_container_clusters` +* **New Data Source**: `tencentcloud_container_cluster_instances` + +## 1.1.0 (March 9, 2018) + +FEATURES: + +* **New Resource**: `tencentcloud_eip` +* **New Resource**: `tencentcloud_eip_association` +* **New Data Source**: `tencentcloud_eip` +* **New Resource**: `tencentcloud_nat_gateway` +* **New Resource**: `tencentcloud_dnat` +* **New Data Source**: `tencentcloud_nats` +* **New Resource**: `tencentcloud_cbs_snapshot` +* **New Resource**: `tencentcloud_alb_server_attachment` + +## 1.0.0 (January 19, 2018) + +FEATURES: + +### CVM + +RESOURCES: + +* instance create +* instance read +* instance update + * reset instance + * reset password + * update instance name + * update security groups +* instance delete +* key pair create +* key pair read +* key pair delete + +DATA SOURCES: + +* image read +* instance\_type read +* zone read + +### VPC + +RESOURCES: + +* vpc create +* vpc read +* vpc update (update name) +* vpc delete +* subnet create +* subnet read +* subnet update (update name) +* subnet delete +* security group create +* security group read +* security group update (update name, description) +* security group delete +* security group rule create +* security group rule read +* security group rule delete +* route table create +* route table read +* route table update (update name) +* route table delete +* route entry create +* route entry read +* route entry delete + +DATA SOURCES: + +* vpc read +* subnet read +* security group read +* route table read + +### CBS + +RESOURCES: + +* storage create +* storage read +* storage update (update name) +* storage attach +* storage detach diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/LICENSE b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/LICENSE new file mode 100644 index 00000000..a612ad98 --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/README.md b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/README.md new file mode 100644 index 00000000..2e7239d8 --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/README.md @@ -0,0 +1,98 @@ +# terraform-provider-tencentcloud + +## Requirements + +* [Terraform](https://www.terraform.io/downloads.html) 0.13.x +* [Go](https://golang.org/doc/install) 1.13.x (to build the provider plugin) + +## Usage + +### Build from source code + +Clone repository to: `$GOPATH/src/github.com/tencentcloudstack/terraform-provider-tencentcloud` + +```sh +$ mkdir -p $GOPATH/src/github.com/tencentcloudstack +$ cd $GOPATH/src/github.com/tencentcloudstack +$ git clone https://github.com/tencentcloudstack/terraform-provider-tencentcloud.git +$ cd terraform-provider-tencentcloud +$ go build . +``` + +If you're building the provider, follow the instructions to [install it as a plugin.](https://www.terraform.io/docs/plugins/basics.html#installing-a-plugin) After placing it into your plugins directory, run `terraform init` to initialize it. + +## Configuration + +### Configure credentials + +You will need to have a pair of secret id and secret key to access Tencent Cloud resources, configure it in the provider arguments or export it in environment variables. If you don't have it yet, please access [Tencent Cloud Management Console](https://console.cloud.tencent.com/cam/capi) to create one. + +``` +export TENCENTCLOUD_SECRET_ID=AKID9HH4OpqLJ5f6LPr4iIm5GF2s-EXAMPLE +export TENCENTCLOUD_SECRET_KEY=72pQp14tWKUglrnX5RbaNEtN-EXAMPLE +``` + +### Configure proxy info (optional) + +If you are beind a proxy, for example, in a corporate network, you must set the proxy environment variables correctly. For example: + +``` +export http_proxy=http://your-proxy-host:your-proxy-port # This is just an example, use your real proxy settings! +export https_proxy=$http_proxy +export HTTP_PROXY=$http_proxy +export HTTPS_PROXY=$http_proxy +``` + +## Run demo + +You can edit your own terraform configuration files. Learn examples from examples directory. + +### Terrafrom it + +Now you can try your terraform demo: + +``` +terraform init +terraform plan +terraform apply +``` + +If you want to destroy the resource, make sure the instance is already in ``running`` status, otherwise the destroy might fail. + +``` +terraform destroy +``` + +## Developer Guide + +### DEBUG + +You will need to set an environment variable named ``TF_LOG``, for more info please refer to [Terraform official doc](https://www.terraform.io/docs/internals/debugging.html): + +``` +export TF_LOG=DEBUG +``` + +In your source file, import the standard package ``log`` and print the message such as: + +``` +log.Println("[DEBUG] the message and some import values: %v", importantValues) +``` + +### Test + +The quicker way for development and debug is writing test cases. +How to trigger running the test cases, please refer the `test.sh` script. +How to write test cases, check the `xxx_test.go` files. + +### Avoid ``terrafrom init`` + +``` +export TF_SKIP_PROVIDER_VERIFY=1 +``` + +This will disable the verify steps, so after you update this provider, you won't need to create new resources, but use previously saved state. + +### Document + +Keep in mind that document changes is also needed when resources, data sources, attributes changed in code. diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/terraform-provider-tencentcloud_v1.56.15 b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/terraform-provider-tencentcloud_v1.56.15 new file mode 100644 index 00000000..4be19d22 Binary files /dev/null and b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64/terraform-provider-tencentcloud_v1.56.15 differ diff --git a/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64_hcl/.terraform.lock.hcl b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64_hcl/.terraform.lock.hcl new file mode 100644 index 00000000..ba581243 --- /dev/null +++ b/terraform-server/data/terraform/providers/tencentcloud/1.56.15/linux_amd64_hcl/.terraform.lock.hcl @@ -0,0 +1,22 @@ +# This file is maintained automatically by "terraform init". +# Manual edits may be lost in future updates. + +provider "registry.terraform.io/tencentcloudstack/tencentcloud" { + version = "1.56.15" + hashes = [ + "h1:6ELU1CEGX5afIB75obKdU7DAPT6Cp2z8Y2YBRaPqxMA=", + "zh:27e11e1af3447b803ab7b79de32bf8a9970b39b8da5909c367607ad647d350d5", + "zh:2b1827cb0707130c3370052da658e979c156ca5c7b836fe9297382d5a93b9a25", + "zh:32094f8d878aab92055b828e86acac1cf84468f0e9407077a68b38df2e268f9d", + "zh:4d2e07904f3a11579fb818ed186e735f2a8ee3073587cc3a7803a2c14a2beaa4", + "zh:5298589208aa6a6af110951fcee6073c98f3126eea5a9860062df8d975ee1d0b", + "zh:9116269ab905f5de483c7bc149f1696f251c7c2bb79ad8773499e5e01c8c5e1f", + "zh:92f44bfd80d1a9cc5af6c82371c99cf2dfaff56dee30cc10a9cd0a753881cd1a", + "zh:9d7a1d0add38c8b1295e6e1edba4bf4591bdfe531ee4956db1a41ccba3877c9f", + "zh:9fbe25a6575de44e8a2f716d9c41a0a2e5ccec2914a3f824b1fbf5118ea4fead", + "zh:a652486f5ef22c3c8f9eca217ec85978aa1c9dd321e6a7317834ae75292e64e0", + "zh:b1edab515278f67cffbda6a53750d5ce97e2f278d6273dd9ddf9e60601da0255", + "zh:c983bd114899dfe6689d29b3d9e93fe5e0e49d54315460b2a2968a65c5ee39d1", + "zh:ca94a5624069dbf18d011f3a648a38d4b4518259bafe2013d3a065acf71f8778", + ] +} diff --git a/plugins/tencentcloud/versions.tf b/terraform-server/data/terraform/versiontf/tencentcloud/version.tf similarity index 100% rename from plugins/tencentcloud/versions.tf rename to terraform-server/data/terraform/versiontf/tencentcloud/version.tf diff --git a/terraform-server/go.mod b/terraform-server/go.mod new file mode 100644 index 00000000..3396a8dc --- /dev/null +++ b/terraform-server/go.mod @@ -0,0 +1,17 @@ +module github.com/WeBankPartners/wecube-plugins-terraform/terraform-server + +go 1.15 + +require ( + github.com/dgrijalva/jwt-go v3.2.0+incompatible + github.com/gin-gonic/gin v1.7.1 + github.com/glenn-brown/golang-pkg-pcre v0.0.0-20120522223659-48bb82a8b8ce + github.com/go-sql-driver/mysql v1.6.0 + github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect + github.com/modern-go/reflect2 v1.0.1 // indirect + github.com/ugorji/go v1.1.13 // indirect + go.uber.org/zap v1.16.0 + gopkg.in/natefinch/lumberjack.v2 v2.0.0 + xorm.io/core v0.7.3 + xorm.io/xorm v1.0.7 +) diff --git a/terraform-server/go.sum b/terraform-server/go.sum new file mode 100644 index 00000000..34bcf206 --- /dev/null +++ b/terraform-server/go.sum @@ -0,0 +1,154 @@ +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a h1:lSA0F4e9A2NcQSqGqTOXqu2aRi/XEQxDCBwM8yJtE6s= +gitea.com/xorm/sqlfiddle v0.0.0-20180821085327-62ce714f951a/go.mod h1:EXuID2Zs0pAQhH8yz+DNjUbjppKQzKFAn28TMYPB6IU= +github.com/BurntSushi/toml v0.3.1 h1:WXkYYl6Yr3qBf1K79EBnL4mak0OimBfB0XUf9Vl28OQ= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/PuerkitoBio/goquery v1.5.1/go.mod h1:GsLWisAFVj4WgDibEWF4pvYnkVQBpKBKeU+7zCJoLcc= +github.com/andybalholm/cascadia v1.1.0/go.mod h1:GsXiBklL0woXo1j/WYWtSYYC4ouU9PqHO0sqidkEA4Y= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/denisenkom/go-mssqldb v0.0.0-20200428022330-06a60b6afbbc/go.mod h1:xbL0rPBG9cCiLr28tMa8zpbdarY27NDyej4t/EjAShU= +github.com/dgrijalva/jwt-go v1.0.2 h1:KPldsxuKGsS2FPWsNeg9ZO18aCrGKujPoWXn2yo+KQM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible h1:7qlOGliEKZXTDg6OTjfoBKDXWrumCAMpl/TFQ4/5kLM= +github.com/dgrijalva/jwt-go v3.2.0+incompatible/go.mod h1:E3ru+11k8xSBh+hMPgOLZmtrrCbhqsmaPHjLKYnJCaQ= +github.com/fsnotify/fsnotify v1.4.7 h1:IXs+QLmnXW2CcXuY+8Mzv/fWEsPGWxqefPtCP5CnV9I= +github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/gin-gonic/gin v1.7.1 h1:qC89GU3p8TvKWMAVhEpmpB2CIb1hnqt2UdKZaP93mS8= +github.com/gin-gonic/gin v1.7.1/go.mod h1:jD2toBW3GZUr5UMcdrwQA10I7RuaFOl/SGeDjXkfUtY= +github.com/glenn-brown/golang-pkg-pcre v0.0.0-20120522223659-48bb82a8b8ce h1:MS/JOOAHf4U2iKl8+1+vzUcG9t9ru1hnZJ9NEBDvMnY= +github.com/glenn-brown/golang-pkg-pcre v0.0.0-20120522223659-48bb82a8b8ce/go.mod h1:5385NDJ+Gt5loLrAlc8Rr5lKA1L5BE5O94jfdwEX9kg= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/go-sql-driver/mysql v1.4.1/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w= +github.com/go-sql-driver/mysql v1.5.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/go-sql-driver/mysql v1.6.0 h1:BCTh4TKNUYmOmMUcQ3IipzF5prigylS7XXjEkfCHuOE= +github.com/go-sql-driver/mysql v1.6.0/go.mod h1:DCzpHaOWr8IXmIStZouvnhqoel9Qv2LBy8hT2VhHyBg= +github.com/golang-sql/civil v0.0.0-20190719163853-cb61b32ac6fe/go.mod h1:8vg3r2VgvsThLBIFL93Qb5yWzgyZWhEmBwUJWevAkK0= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db h1:woRePGFeVFfLKN/pOkfl+p/TAqKOfFu+7KPlMVpok/w= +github.com/golang/snappy v0.0.0-20180518054509-2e65f85255db/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI= +github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI= +github.com/hpcloud/tail v1.0.0/go.mod h1:ab1qPbhIpdTxEkNHXyeSf5vhxWSCs/tWer42PpOxQnU= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI= +github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo= +github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ= +github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE= +github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/lib/pq v1.7.0/go.mod h1:AlVN5x4E4T544tWzH6hKfbfQvm3HdbOxrmggDNAPY9o= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/mattn/go-sqlite3 v1.10.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc= +github.com/mattn/go-sqlite3 v1.14.0 h1:mLyGNKR8+Vv9CAU7PphKa2hkEqxxhn8i32J6FPj1/QA= +github.com/mattn/go-sqlite3 v1.14.0/go.mod h1:JIl7NbARA7phWnGvh0LKTyg7S9BA+6gx71ShQilpsus= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/modern-go/reflect2 v1.0.1 h1:9f412s+6RmYXLWZSEzVVgPGK7C2PphHj5RJrvfx9AWI= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs= +github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE= +github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU= +github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/syndtr/goleveldb v1.0.0 h1:fBdIW9lB4Iz0n9khmH8w27SJ3QEJ7+IgjPEwGSZiFdE= +github.com/syndtr/goleveldb v1.0.0/go.mod h1:ZVVdQEZoIme9iO1Ch2Jdy24qqXrMMOU6lpPAyBWyWuQ= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go v1.1.13 h1:nB3O5kBSQGjEQAcfe1aLUYuxmXdFKmYgBZhY32rQb6Q= +github.com/ugorji/go v1.1.13/go.mod h1:jxau1n+/wyTGLQoCkjok9r5zFa/FxT6eI5HiHKQszjc= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +github.com/ugorji/go/codec v1.1.13 h1:013LbFhocBoIqgHeIHKlV4JWYhqogATYWZhIcH0WHn4= +github.com/ugorji/go/codec v1.1.13/go.mod h1:oNVt3Dq+FO91WNQ/9JnHKQP2QJxTzoN7wCBFCq1OeuU= +github.com/ziutek/mymysql v1.5.4/go.mod h1:LMSpPZ6DbqWFxNCHW77HeMg9I646SAhApZ/wKdgO/C0= +go.uber.org/atomic v1.6.0 h1:Ezj3JGmsOnG1MoRWQkPBsKLe9DwWD9QeXzTRzzldNVk= +go.uber.org/atomic v1.6.0/go.mod h1:sABNBOSYdrvTF6hTgEIbc7YasKWGhgEQZyfxyTvoXHQ= +go.uber.org/multierr v1.5.0 h1:KCa4XfM8CWFCpxXRGok+Q0SS/0XBhMDbHHGABQLvD2A= +go.uber.org/multierr v1.5.0/go.mod h1:FeouvMocqHpRaaGuG9EjoKcStLC43Zu/fmqdUMPcKYU= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee h1:0mgffUl7nfd+FpvXMVz4IDEaUSmT1ysygQC7qYo7sG4= +go.uber.org/tools v0.0.0-20190618225709-2cfd321de3ee/go.mod h1:vJERXedbb3MVM5f9Ejo0C68/HhF8uaILCdgjnY+goOA= +go.uber.org/zap v1.16.0 h1:uFRZXykJGK9lLY4HtgSw44DnIcAM+kRBP7x5m+NpAOM= +go.uber.org/zap v1.16.0/go.mod h1:MA8QOfq0BHJwdXa996Y4dYkAqRKB8/1K1QMMZVaNZjQ= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190325154230-a5d413f7728c/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de h1:5hukYrvBGR8/eNkX5mdUezrA6JiaEZDtJb9Ei+1LlBs= +golang.org/x/lint v0.0.0-20190930215403-16217165b5de/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc= +golang.org/x/mod v0.0.0-20190513183733-4bf6d317e70e/go.mod h1:mXi4GBBbnImb6dmsKGUJ2LatrhH/nqhxcFungHvyanc= +golang.org/x/net v0.0.0-20180218175443-cbe0f9307d01/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200202094626-16171245cfb2/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e h1:3G+cUijn7XD+S4eJFddp53Pv7+slrESplyjG25HgL+k= +golang.org/x/net v0.0.0-20200324143707-d3edc9973b7e/go.mod h1:qpuaurCH72eLCgpAm/N6yyVIVM9cpaDIP3A8BGJEC5A= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd h1:xhmwyvizuTgC2qz7ZlMluP20uW+C3Rm0FD/WLDX8884= +golang.org/x/sys v0.0.0-20200323222414-85ca7c5b95cd/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs= +golang.org/x/tools v0.0.0-20190621195816-6e04913cbbac/go.mod h1:/rFqwRUd4F7ZHNgwSSTFct+R/Kf4OFW1sUzUTQQTgfc= +golang.org/x/tools v0.0.0-20191029041327-9cc4af7d6b2c/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5 h1:hKsoRgsbwY1NafxrwTs+k64bikrLBkAgPir1TNCj3Zs= +golang.org/x/tools v0.0.0-20191029190741-b9c20aec41a5/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= +golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= +google.golang.org/appengine v1.6.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY= +gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI= +gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4= +gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys= +gopkg.in/natefinch/lumberjack.v2 v2.0.0 h1:1Lc07Kr7qY4U2YPouBjpCLxpiyxIVoxqXgkXLknAOE8= +gopkg.in/natefinch/lumberjack.v2 v2.0.0/go.mod h1:l0ndWWf7gzL7RNwBG7wST/UCcT4T24xpD6X8LsfU/+k= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkepLTh2hOroT7a+7czfdQ= +gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.1-2019.2.3 h1:3JgtbtFHMiCmsznwGVTUWbgGov+pVqnlf1dEJTNAXeM= +honnef.co/go/tools v0.0.1-2019.2.3/go.mod h1:a3bituU0lyd329TUQxRnasdCoJDkEUEAqEt0JzvZhAg= +xorm.io/builder v0.3.7 h1:2pETdKRK+2QG4mLX4oODHEhn5Z8j1m8sXa7jfu+/SZI= +xorm.io/builder v0.3.7/go.mod h1:aUW0S9eb9VCaPohFCH3j7czOx1PMW3i1HrSzbLYGBSE= +xorm.io/core v0.7.3 h1:W8ws1PlrnkS1CZU1YWaYLMQcQilwAmQXU0BJDJon+H0= +xorm.io/core v0.7.3/go.mod h1:jJfd0UAEzZ4t87nbQYtVjmqpIODugN6PD2D9E+dJvdM= +xorm.io/xorm v1.0.7 h1:26yBTDVI+CfQpVz2Y88fISh+aiJXIPP4eNoTJlwzsC4= +xorm.io/xorm v1.0.7/go.mod h1:uF9EtbhODq5kNWxMbnBEj8hRRZnlcNSz2t2N7HW/+A4= diff --git a/terraform-server/main.go b/terraform-server/main.go new file mode 100644 index 00000000..27bdac56 --- /dev/null +++ b/terraform-server/main.go @@ -0,0 +1,32 @@ +package main + +import ( + "flag" + "fmt" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/api/v1/log_operation" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/services/db" +) + +// @title Terraform Server +// @version 1.0 +// @description Terraform 插件后台服务 +func main() { + configFile := flag.String("c", "conf/default.json", "config file path") + flag.Parse() + if initConfigMessage := models.InitConfig(*configFile); initConfigMessage != "" { + fmt.Printf("Init config file error,%s \n", initConfigMessage) + return + } + log.InitLogger() + if initDbError := db.InitDatabase(); initDbError != nil { + return + } + + go log_operation.StartConsumeOperationLog() + //start http + api.InitHttpServer() + +} diff --git a/terraform-server/models/config.go b/terraform-server/models/config.go new file mode 100644 index 00000000..78a8998c --- /dev/null +++ b/terraform-server/models/config.go @@ -0,0 +1,127 @@ +package models + +import ( + "encoding/json" + "io/ioutil" + "os" + "strings" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/cipher" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/token" +) + +type HttpServerConfig struct { + Port string `json:"port"` + Cross bool `json:"cross"` +} + +type LogConfig struct { + Level string `json:"level"` + LogDir string `json:"log_dir"` + AccessLogEnable bool `json:"access_log_enable"` + DbLogEnable bool `json:"db_log_enable"` + ArchiveMaxSize int `json:"archive_max_size"` + ArchiveMaxBackup int `json:"archive_max_backup"` + ArchiveMaxDay int `json:"archive_max_day"` + Compress bool `json:"compress"` +} + +type DatabaseConfig struct { + Server string `json:"server"` + Port string `json:"port"` + User string `json:"user"` + Password string `json:"password"` + DataBase string `json:"database"` + MaxOpen int `json:"maxOpen"` + MaxIdle int `json:"maxIdle"` + Timeout int `json:"timeout"` +} + +type WecubeConfig struct { + BaseUrl string `json:"base_url"` + JwtSigningKey string `json:"jwt_signing_key"` + SubSystemCode string `json:"sub_system_code"` + SubSystemKey string `json:"sub_system_key"` +} + +type AuthConfig struct { + Enable bool `json:"enable"` + PasswordSeed string `json:"password_seed"` + ExpireSec int64 `json:"expire_sec"` + FreshTokenExpire int64 `json:"fresh_token_expire"` +} + +type MenuApiMapConfig struct { + Enable bool `json:"enable"` + File string `json:"file"` +} + +type GlobalConfig struct { + IsPluginMode string `json:"is_plugin_mode"` + DefaultLanguage string `json:"default_language"` + HttpServer HttpServerConfig `json:"http_server"` + Log LogConfig `json:"log"` + Database DatabaseConfig `json:"database"` + RsaKeyPath string `json:"rsa_key_path"` + Wecube WecubeConfig `json:"wecube"` + Auth AuthConfig `json:"auth"` + MenuApiMap MenuApiMapConfig `json:"menu_api_map"` + DefaultReportObjAttr []*DefaultReportObjAttrConfig `json:"default_report_obj_attr"` + TerraformFilePath string `json:"terraform_file_path"` + TerraformCmdPath string `json:"terraform_cmd_path"` + TerraformProviderOsArch string `json:"terraform_provider_os_arch"` + Version string `json:"version"` + // default json +} + +type DefaultReportObjAttrConfig struct { + Id string `json:"id"` + Title string `json:"title"` + Querialbe string `json:"querialbe"` +} + +var ( + Config *GlobalConfig + PluginRunningMode bool + CoreToken *token.CoreToken + // MenuApiGlobalList []*MenuApiMapObj +) + +func InitConfig(configFile string) (errMessage string) { + if configFile == "" { + errMessage = "config file empty,use -c to specify configuration file" + return + } + _, err := os.Stat(configFile) + if os.IsExist(err) { + errMessage = "config file not found," + err.Error() + return + } + b, err := ioutil.ReadFile(configFile) + if err != nil { + errMessage = "read config file fail," + err.Error() + return + } + var c GlobalConfig + err = json.Unmarshal(b, &c) + if err != nil { + errMessage = "parse file to json fail," + err.Error() + return + } + c.Database.Password = cipher.DecryptRsa(c.Database.Password, c.RsaKeyPath) + Config = &c + c.IsPluginMode = strings.ToLower(c.IsPluginMode) + if c.IsPluginMode == "yes" || c.IsPluginMode == "y" || c.IsPluginMode == "true" { + tmpCoreToken := token.CoreToken{} + PluginRunningMode = true + tmpCoreToken.BaseUrl = Config.Wecube.BaseUrl + tmpCoreToken.JwtSigningKey = Config.Wecube.JwtSigningKey + tmpCoreToken.SubSystemCode = Config.Wecube.SubSystemCode + tmpCoreToken.SubSystemKey = Config.Wecube.SubSystemKey + tmpCoreToken.InitCoreToken() + CoreToken = &tmpCoreToken + } else { + PluginRunningMode = false + } + return +} diff --git a/terraform-server/models/const.go b/terraform-server/models/const.go new file mode 100644 index 00000000..7227e7a5 --- /dev/null +++ b/terraform-server/models/const.go @@ -0,0 +1,46 @@ +package models + +const ( + DateTimeFormat = "2006-01-02 15:04:05" + SysTableIdConnector = "__" + UrlPrefix = "/terraform" + MultiRefType = "multiRef" + AutofillRuleType = "autofillRule" + ObjectInputType = "object" + AutofillSuggest = "suggest#" + SystemUser = "system" + AdminUser = "admin" + AdminRole = "SUPER_ADMIN" + SystemRole = "SUB_SYSTEM" + PlatformUser = "SYS_PLATFORM" + PasswordDisplay = "****" + BashCmd = "/bin/sh" + RandomFlag = "{random}" + TerraformOutPutPrefix = "$_result_list$" + ParameterSourceDefault = "custom" + TfArgumentKeyArgumentDefault = "N" + CommandTimeOut = 300 + ResourceDataDebug = "$_resource_data_debug$" + ResourceIdDataConvert = "#resourceId#" + PGuid = "6101d5ff9c058ecd8d2dddd974d38f98" + ImportResourceDataTableId = "$_resource_data_table_id$" + SimulateResourceData = "$_simulate_resource_data$" + SimulateResourceDataReturn = "$_simulate_resource_data_return$" + SimulateResourceDataResult = "$_simulate_resource_data_result$" + SourceDataIdx = "$_source_data_idx$" +) + +var ( + SEPERATOR = string([]byte{0x01}) + ConvertWay = map[string]string{"Data": "data", "Template": "template", "ContextData": "context_data", "Attr": "attribute", "Direct": "direct", "Function": "function", "ContextDirect": "context_direct", "ContextAttr": "context_attribute", "ContextTemplate": "context_template"} + // TerraformProviderPathDiffMap = map[string]string{"tencentcloud": ".terraform/providers/registry.terraform.io/tencentcloudstack/tencentcloud/", + // "alicloud": ".terraform/providers/registry.terraform.io/hashicorp/alicloud/"} + TerraformProviderPathDiffMap = map[string]string{"tencentcloud": ".terraform/providers/registry.terraform.io/", + "alicloud": ".terraform/providers/registry.terraform.io/"} + FunctionConvertFunctionDefineName = map[string]string{"Split": "split", "Replace": "replace", "Regx": "regx", "Remove": "remove"} + + ExcludeFilterKeys = map[string]bool{"confirmToken":true, "callbackParameter":true, "id":true, "asset_id":true, + "provider_info":true, "region_id":true, "operator_user":true, "requestId":true, "requestSn":true, + SimulateResourceData:true, ResourceDataDebug:true, ResourceIdDataConvert:true, ImportResourceDataTableId: true, + SimulateResourceDataReturn:true, SimulateResourceDataResult:true, SourceDataIdx:true} +) diff --git a/terraform-server/models/interface.go b/terraform-server/models/interface.go new file mode 100644 index 00000000..b524533c --- /dev/null +++ b/terraform-server/models/interface.go @@ -0,0 +1,12 @@ +package models + +type InterfaceTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Plugin string `json:"plugin" xorm:"plugin"` + Description string `json:"description" xorm:"description"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} diff --git a/terraform-server/models/parameter.go b/terraform-server/models/parameter.go new file mode 100644 index 00000000..88cc8c80 --- /dev/null +++ b/terraform-server/models/parameter.go @@ -0,0 +1,38 @@ +package models + +type ParameterTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Type string `json:"type" xorm:"type"` + Multiple string `json:"multiple" xorm:"multiple"` + Interface string `json:"interface" xorm:"interface"` + Template string `json:"template" xorm:"template"` + DataType string `json:"dataType" xorm:"datatype"` + ObjectName string `json:"objectName" xorm:"object_name"` + Source string `json:"source" xorm:"source"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + Nullable string `json:"nullable" xorm:"nullable"` + Sensitive string `json:"sensitive" xorm:"sensitive"` +} + +type ParameterQuery struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Type string `json:"type" xorm:"type"` + Multiple string `json:"multiple" xorm:"multiple"` + Interface string `json:"interface" xorm:"interface"` + Template string `json:"template" xorm:"template"` + DataType string `json:"dataType" xorm:"datatype"` + ObjectName string `json:"objectName" xorm:"object_name"` + ObjectNameTitle string `json:"objectNameTitle" xorm:"object_name_title"` + Source string `json:"source" xorm:"source"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + Nullable string `json:"nullable" xorm:"nullable"` + Sensitive string `json:"sensitive" xorm:"sensitive"` +} diff --git a/terraform-server/models/plugin.go b/terraform-server/models/plugin.go new file mode 100644 index 00000000..8beb0dba --- /dev/null +++ b/terraform-server/models/plugin.go @@ -0,0 +1,10 @@ +package models + +type PluginTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} diff --git a/terraform-server/models/provider.go b/terraform-server/models/provider.go new file mode 100644 index 00000000..47b553d2 --- /dev/null +++ b/terraform-server/models/provider.go @@ -0,0 +1,29 @@ +package models + +type ProviderTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Version string `json:"version" xorm:"version"` + SecretIdAttrName string `json:"secretIdAttrName" xorm:"secret_id_attr_name"` + SecretKeyAttrName string `json:"secretKeyAttrName" xorm:"secret_key_attr_name"` + RegionAttrName string `json:"regionAttrName" xorm:"region_attr_name"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + Initialized string `json:"initialized" xorm:"Initialized"` + NameSpace string `json:"nameSpace" xorm:"name_space"` +} + +type ProviderPluginImportObj struct { + Provider []*ProviderTable `json:"provider"` + ProviderTemplateValue []*ProviderTemplateValueTable `json:"provider_template_value"` + Template []*TemplateTable `json:"template"` + TemplateValue []*TemplateValueTable `json:"template_value"` + Plugin []*PluginTable `json:"plugin"` + Interface []*InterfaceTable `json:"interface"` + Parameter []*ParameterTable `json:"parameter"` + Source []*SourceTable `json:"source"` + TfArgument []*TfArgumentTable `json:"tf_argument"` + TfstateAttribute []*TfstateAttributeTable `json:"tfstate_attribute"` +} diff --git a/terraform-server/models/provider_info.go b/terraform-server/models/provider_info.go new file mode 100644 index 00000000..f14e0cf1 --- /dev/null +++ b/terraform-server/models/provider_info.go @@ -0,0 +1,26 @@ +package models + +type ProviderInfoTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Provider string `json:"provider" xorm:"provider"` + SecretId string `json:"secretId" xorm:"secret_id"` + SecretKey string `json:"secretKey" xorm:"secret_key"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} + +type ProviderInfoQuery struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Provider string `json:"provider" xorm:"provider"` + ProviderTitle string `json:"providerTitle" xorm:"provider_title"` + SecretId string `json:"secretId" xorm:"secret_id"` + SecretKey string `json:"secretKey" xorm:"secret_key"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} diff --git a/terraform-server/models/provider_template_value.go b/terraform-server/models/provider_template_value.go new file mode 100644 index 00000000..7596506c --- /dev/null +++ b/terraform-server/models/provider_template_value.go @@ -0,0 +1,12 @@ +package models + +type ProviderTemplateValueTable struct { + Id string `json:"id" xorm:"id"` + Value string `json:"value" xorm:"value"` + Provider string `json:"provider" xorm:"provider"` + TemplateValue string `json:"templateValue" xorm:"template_value"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} diff --git a/terraform-server/models/request.go b/terraform-server/models/request.go new file mode 100644 index 00000000..e5f726e1 --- /dev/null +++ b/terraform-server/models/request.go @@ -0,0 +1,34 @@ +package models + +type QueryRequestFilterObj struct { + Name string `json:"name"` + Operator string `json:"operator"` + Value interface{} `json:"value"` +} + +type QueryRequestSorting struct { + Asc bool `json:"asc"` + Field string `json:"field"` +} + +type QueryRequestDialect struct { + AssociatedData map[string]string `json:"associatedData"` + QueryMode string `json:"queryMode"` +} + +type QueryRequestParam struct { + Filters []*QueryRequestFilterObj `json:"filters"` + Dialect *QueryRequestDialect `json:"dialect"` + Paging bool `json:"paging"` + Pageable *PageInfo `json:"pageable"` + Sorting *QueryRequestSorting `json:"sorting"` + ResultColumns []string `json:"resultColumns"` +} + +type TransFiltersParam struct { + IsStruct bool + StructObj interface{} + Prefix string + KeyMap map[string]string + PrimaryKey string +} diff --git a/terraform-server/models/resource_data.go b/terraform-server/models/resource_data.go new file mode 100644 index 00000000..e449c831 --- /dev/null +++ b/terraform-server/models/resource_data.go @@ -0,0 +1,79 @@ +package models + +type ResourceDataTable struct { + Id string `json:"id" xorm:"id"` + Resource string `json:"resource" xorm:"resource"` + ResourceId string `json:"resourceId" xorm:"resource_id"` + ResourceAssetId string `json:"resourceAssetId" xorm:"resource_asset_id"` + TfFile string `json:"tfFile" xorm:"tf_file"` + TfStateFile string `json:"tfStateFile" xorm:"tf_state_file"` + RegionId string `json:"regionId" xorm:"region_id"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} + +type ResourceDataQuery struct { + Id string `json:"id" xorm:"id"` + Resource string `json:"resource" xorm:"resource"` + ResourceTitle string `json:"resourceTitle" xorm:"resource_title"` + ResourceId string `json:"resourceId" xorm:"resource_id"` + ResourceAssetId string `json:"resourceAssetId" xorm:"resource_asset_id"` + TfFile string `json:"tfFile" xorm:"tf_file"` + TfStateFile string `json:"tfStateFile" xorm:"tf_state_file"` + RegionId string `json:"regionId" xorm:"region_id"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} + +type RegionProviderData struct { + ProviderName string `json:"providerName"` + ProviderVersion string `json:"providerVersion"` + SecretId string `json:"secretId"` + SecretKey string `json:"secretKey"` + SecretIdAttrName string `json:"secretIdAttrName"` + SecretKeyAttrName string `json:"secretKeyAttrName"` + RegionAttrName string `json:"regionAttrName"` + ProviderInfoId string `json:"providerInfoId"` +} + +type TfstateFileData struct { + Resources []TfstateFileResources `json:"resources"` +} + +type TfstateFileResources struct { + Instances []TfstateFileAttributes `json:"instances"` +} + +type TfstateFileAttributes struct { + Attributes map[string]interface{} `json:"attributes"` +} + +type SortTfstateAttributes struct { + TfstateAttr *TfstateAttributeTable `json:"tfstateAttr"` + Point int `json:"point"` + IsExist bool `json:"isExist"` +} + +type FunctionDefine struct { + Function string `json:"function"` + Args *FunctionDefineArgs `json:"args"` + Return string `json:"return"` +} + +type FunctionDefineArgs struct { + SplitChar []string `json:"splitChar"` + ReplaceVal []map[string]string `json:"replaceVal"` + RegExp []string `json:"regExp"` + RemoveKey []string `json:"keys"` +} + +type TfFileAttrFetchResult struct { + AttrBytes []byte + FileContent string + StartIndex int + EndIndex int +} \ No newline at end of file diff --git a/terraform-server/models/response.go b/terraform-server/models/response.go new file mode 100644 index 00000000..7cf3e66f --- /dev/null +++ b/terraform-server/models/response.go @@ -0,0 +1,37 @@ +package models + +type PageInfo struct { + StartIndex int `json:"startIndex"` + PageSize int `json:"pageSize"` + TotalRows int `json:"totalRows"` +} + +type ResponsePageData struct { + PageInfo PageInfo `json:"pageInfo"` + Contents interface{} `json:"contents"` +} + +type ResponseJson struct { + StatusCode string `json:"statusCode"` + Data interface{} `json:"data"` +} + +type ResponseErrorObj struct { + ErrorMessage string `json:"errorMessage"` +} + +type ResponseErrorJson struct { + StatusCode string `json:"statusCode"` + StatusMessage string `json:"statusMessage"` + Data interface{} `json:"data"` +} + +type SysLogTable struct { + LogCat string `json:"logCat" xorm:"log_cat"` + Operator string `json:"operator" xorm:"operator"` + Operation string `json:"operation" xorm:"operation"` + Content string `json:"content" xorm:"content"` + RequestUrl string `json:"requestUrl" xorm:"request_url"` + ClientHost string `json:"clientHost" xorm:"client_host"` + CreatedDate string `json:"createdDate" xorm:"created_date"` +} diff --git a/terraform-server/models/source.go b/terraform-server/models/source.go new file mode 100644 index 00000000..09c0ffda --- /dev/null +++ b/terraform-server/models/source.go @@ -0,0 +1,19 @@ +package models + +type SourceTable struct { + Id string `json:"id" xorm:"id"` + Interface string `json:"interface" xorm:"interface"` + Provider string `json:"provider" xorm:"provider"` + Name string `json:"name" xorm:"name"` + AssetIdAttribute string `json:"assetIdAttribute" xorm:"asset_id_attribute"` + TerraformUsed string `json:"terraformUsed" xorm:"terraform_used"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + ImportPrefix string `json:"importPrefix" xorm:"import_prefix"` + ImportSupport string `json:"importSupport" xorm:"import_support"` + ExecutionSeqNo int `json:"executionSeqNo" xorm:"execution_seq_no"` + SourceType string `json:"sourceType" xorm:"source_type"` + Remark string `json:"remark" xorm:"remark"` +} diff --git a/terraform-server/models/template.go b/terraform-server/models/template.go new file mode 100644 index 00000000..880a05af --- /dev/null +++ b/terraform-server/models/template.go @@ -0,0 +1,11 @@ +package models + +type TemplateTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Description string `json:"description" xorm:"description"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} diff --git a/terraform-server/models/template_value.go b/terraform-server/models/template_value.go new file mode 100644 index 00000000..f67454d4 --- /dev/null +++ b/terraform-server/models/template_value.go @@ -0,0 +1,22 @@ +package models + +type TemplateValueTable struct { + Id string `json:"id" xorm:"id"` + Value string `json:"value" xorm:"value"` + Template string `json:"template" xorm:"template"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} + +type TemplateValueQuery struct { + Id string `json:"id" xorm:"id"` + Value string `json:"value" xorm:"value"` + Template string `json:"template" xorm:"template"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + ProviderTemplateValueInfo map[string]map[string]string `json:"providerTemplateValueInfo"` +} diff --git a/terraform-server/models/tf_argument.go b/terraform-server/models/tf_argument.go new file mode 100644 index 00000000..3a6d2106 --- /dev/null +++ b/terraform-server/models/tf_argument.go @@ -0,0 +1,53 @@ +package models + +type TfArgumentTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Source string `json:"source" xorm:"source"` + Parameter string `json:"parameter" xorm:"parameter"` + DefaultValue string `json:"defaultValue" xorm:"default_value"` + IsNull string `json:"isNull" xorm:"is_null"` + Type string `json:"type" xorm:"type"` + ObjectName string `json:"objectName" xorm:"object_name"` + IsMulti string `json:"isMulti" xorm:"is_multi"` + ConvertWay string `json:"convertWay" xorm:"convert_way"` + RelativeSource string `json:"relativeSource" xorm:"relative_source"` + RelativeTfstateAttribute string `json:"relativeTfstateAttribute" xorm:"relative_tfstate_attribute"` + RelativeParameter string `json:"relativeParameter" xorm:"relative_parameter"` + RelativeParameterValue string `json:"relativeParameterValue" xorm:"relative_parameter_value"` + FunctionDefine string `json:"functionDefine" xorm:"function_define"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + KeyArgument string `json:"keyArgument" xorm:"key_argument"` +} + +type TfArgumentQuery struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Source string `json:"source" xorm:"source"` + SourceTitle string `json:"sourceTitle" xorm:"source_title"` + Parameter string `json:"parameter" xorm:"parameter"` + ParameterTitle string `json:"parameterTitle" xorm:"parameter_title"` + DefaultValue string `json:"defaultValue" xorm:"default_value"` + IsNull string `json:"isNull" xorm:"is_null"` + Type string `json:"type" xorm:"type"` + ObjectName string `json:"objectName" xorm:"object_name"` + ObjectNameTitle string `json:"objectNameTitle" xorm:"object_name_title"` + IsMulti string `json:"isMulti" xorm:"is_multi"` + ConvertWay string `json:"convertWay" xorm:"convert_way"` + RelativeSource string `json:"relativeSource" xorm:"relative_source"` + RelativeSourceTitle string `json:"relativeSourceTitle" xorm:"relative_source_title"` + RelativeTfstateAttribute string `json:"relativeTfstateAttribute" xorm:"relative_tfstate_attribute"` + RelativeTfstateAttributeTitle string `json:"relativeTfstateAttributeTitle" xorm:"relative_tfstate_attribute_title"` + RelativeParameter string `json:"relativeParameter" xorm:"relative_parameter"` + RelativeParameterTitle string `json:"relativeParameterTitle" xorm:"relative_parameter_title"` + RelativeParameterValue string `json:"relativeParameterValue" xorm:"relative_parameter_value"` + FunctionDefine string `json:"functionDefine" xorm:"function_define"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` + KeyArgument string `json:"keyArgument" xorm:"key_argument"` +} diff --git a/terraform-server/models/tfstate_attribute.go b/terraform-server/models/tfstate_attribute.go new file mode 100644 index 00000000..62d0a5ac --- /dev/null +++ b/terraform-server/models/tfstate_attribute.go @@ -0,0 +1,51 @@ +package models + +type TfstateAttributeTable struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Source string `json:"source" xorm:"source"` + Parameter string `json:"parameter" xorm:"parameter"` + DefaultValue string `json:"defaultValue" xorm:"default_value"` + IsNull string `json:"isNull" xorm:"is_null"` + Type string `json:"type" xorm:"type"` + ObjectName string `json:"objectName" xorm:"object_name"` + IsMulti string `json:"isMulti" xorm:"is_multi"` + ConvertWay string `json:"convertWay" xorm:"convert_way"` + RelativeSource string `json:"relativeSource" xorm:"relative_source"` + RelativeTfstateAttribute string `json:"relativeTfstateAttribute" xorm:"relative_tfstate_attribute"` + RelativeParameter string `json:"relativeParameter" xorm:"relative_parameter"` + RelativeParameterValue string `json:"relativeParameterValue" xorm:"relative_parameter_value"` + FunctionDefine string `json:"functionDefine" xorm:"function_define"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} + +type TfstateAttributeQuery struct { + Id string `json:"id" xorm:"id"` + Name string `json:"name" xorm:"name"` + Source string `json:"source" xorm:"source"` + SourceTitle string `json:"sourceTitle" xorm:"source_title"` + Parameter string `json:"parameter" xorm:"parameter"` + ParameterTitle string `json:"parameterTitle" xorm:"parameter_title"` + DefaultValue string `json:"defaultValue" xorm:"default_value"` + IsNull string `json:"isNull" xorm:"is_null"` + Type string `json:"type" xorm:"type"` + ObjectName string `json:"objectName" xorm:"object_name"` + ObjectNameTitle string `json:"objectNameTitle" xorm:"object_name_title"` + IsMulti string `json:"isMulti" xorm:"is_multi"` + ConvertWay string `json:"convertWay" xorm:"convert_way"` + RelativeSource string `json:"relativeSource" xorm:"relative_source"` + RelativeSourceTitle string `json:"relativeSourceTitle" xorm:"relative_source_title"` + RelativeTfstateAttribute string `json:"relativeTfstateAttribute" xorm:"relative_tfstate_attribute"` + RelativeTfstateAttributeTitle string `json:"relativeTfstateAttributeTitle" xorm:"relative_tfstate_attribute_title"` + RelativeParameter string `json:"relativeParameter" xorm:"relative_parameter"` + RelativeParameterTitle string `json:"relativeParameterTitle" xorm:"relative_parameter_title"` + RelativeParameterValue string `json:"relativeParameterValue" xorm:"relative_parameter_value"` + FunctionDefine string `json:"functionDefine" xorm:"function_define"` + CreateTime string `json:"createTime" xorm:"create_time"` + CreateUser string `json:"createUser" xorm:"create_user"` + UpdateTime string `json:"updateTime" xorm:"update_time"` + UpdateUser string `json:"updateUser" xorm:"update_user"` +} diff --git a/terraform-server/models/wecube.go b/terraform-server/models/wecube.go new file mode 100644 index 00000000..a7d17035 --- /dev/null +++ b/terraform-server/models/wecube.go @@ -0,0 +1,120 @@ +package models + +type EntityQueryParam struct { + Criteria EntityQueryObj `json:"criteria"` + AdditionalFilters []*EntityQueryObj `json:"additionalFilters"` +} + +type EntityQueryObj struct { + AttrName string `json:"attrName"` + Op string `json:"op"` + Condition interface{} `json:"condition"` +} + +type EntityResponse struct { + Status string `json:"status"` + Message string `json:"message"` + Data []map[string]interface{} `json:"data"` +} + +type SyncDataModelResponse struct { + Status string `json:"status"` + Message string `json:"message"` + Data []*SyncDataModelCiType `json:"data"` +} + +type SyncDataModelCiType struct { + Name string `json:"name" xorm:"id"` + DisplayName string `json:"displayName" xorm:"display_name"` + Description string `json:"description" xorm:"description"` + Attributes []*SyncDataModelCiAttr `json:"attributes" xorm:"-"` +} + +type SyncDataModelCiAttr struct { + Name string `json:"name" xorm:"name"` + EntityName string `json:"entityName" xorm:"ci_type"` + Description string `json:"description" xorm:"description"` + DataType string `json:"dataType" xorm:"input_type"` + RefPackageName string `json:"refPackageName" xorm:"-"` + RefEntityName string `json:"refEntityName" xorm:"ref_ci_type"` + RefAttributeName string `json:"refAttributeName" xorm:"-"` +} + +type PluginCiDataOperationRequest struct { + RequestId string `json:"requestId"` + Inputs []*PluginCiDataOperationRequestObj `json:"inputs"` +} + +type PluginCiDataOperationRequestObj struct { + CallbackParameter string `json:"callbackParameter"` + CiType string `json:"ciType"` + Operation string `json:"operation"` + JsonData string `json:"jsonData"` +} + +type PluginCiDataAttrValueRequest struct { + RequestId string `json:"requestId"` + Inputs []*PluginCiDataAttrValueRequestObj `json:"inputs"` +} + +type PluginCiDataAttrValueRequestObj struct { + CallbackParameter string `json:"callbackParameter"` + CiType string `json:"ciType"` + Guid string `json:"guid"` + CiTypeAttr string `json:"ciTypeAttr"` + Value string `json:"value"` +} + +type PluginCiDataOperationResp struct { + ResultCode string `json:"resultCode"` + ResultMessage string `json:"resultMessage"` + Results PluginCiDataOperationOutput `json:"results"` +} + +type PluginCiDataOperationOutput struct { + Outputs []*PluginCiDataOperationOutputObj `json:"outputs"` +} + +type PluginCiDataOperationOutputObj struct { + CallbackParameter string `json:"callbackParameter"` + Guid string `json:"guid"` + ErrorCode string `json:"errorCode"` + ErrorMessage string `json:"errorMessage"` + ErrorDetail string `json:"errorDetail,omitempty"` +} + +type CoreRoleDto struct { + Status string `json:"status"` + Message string `json:"message"` + Data []CoreRoleDataObj `json:"data"` +} + +type CoreRoleDataObj struct { + Id string `json:"id"` + Name string `json:"name"` + Email string `json:"email"` + DisplayName string `json:"displayName"` +} + +type PluginInterfaceResultObj struct { + ResultCode string `json:"resultCode"` + ResultMessage string `json:"resultMessage"` + Results PluginInterfaceResultOutput `json:"results"` +} + +type PluginInterfaceResultOutput struct { + Outputs []map[string]interface{} `json:"outputs"` +} + +type PluginInterfaceResultOutputObj struct { + ErrorCode string `json:"errorCode"` + ErrorMessage string `json:"errorMessage"` + CallbackParameter string `json:"callbackParameter"` +} + +type PluginInterfaceResultObjDebug struct { + StatusCode string `json:"statusCode"` + ResultCode string `json:"resultCode"` + ResultMessage string `json:"resultMessage"` + Results PluginInterfaceResultOutput `json:"results"` +} \ No newline at end of file diff --git a/terraform-server/models/xml.go b/terraform-server/models/xml.go new file mode 100644 index 00000000..428aa8e0 --- /dev/null +++ b/terraform-server/models/xml.go @@ -0,0 +1,81 @@ +package models + +import "encoding/xml" + +type XmlPackage struct { + XMLName xml.Name `xml:"package"` + Name string `xml:"name,attr"` + Version string `xml:"version,attr"` + ParamObjects XmlParamObjects + Plugins XmlPlugins +} + +type XmlPlugins struct { + XMLName xml.Name `xml:"plugins"` + Plugins []*XmlPlugin +} + +type XmlPlugin struct { + XMLName xml.Name `xml:"plugin"` + Name string `xml:"name,attr"` + TargetPackage string `xml:"targetPackage,attr"` + TargetEntity string `xml:"targetEntity,attr"` + RegisterName string `xml:"registerName,attr"` + TargetEntityFilterRule string `xml:"targetEntityFilterRule,attr"` + Interfaces []*XmlInterface +} + +type XmlInterface struct { + XMLName xml.Name `xml:"interface"` + Action string `xml:"action,attr"` + Path string `xml:"path,attr"` + FilterRule string `xml:"filterRule,attr"` + InputParameters XmlInputParameters + OutputParameters XmlOutputParameters +} + +type XmlInputParameters struct { + XMLName xml.Name `xml:"inputParameters"` + Parameters []*XmlParameter +} + +type XmlOutputParameters struct { + XMLName xml.Name `xml:"outputParameters"` + Parameters []*XmlParameter +} + +type XmlParameter struct { + XMLName xml.Name `xml:"parameter"` + Datatype string `xml:"datatype,attr"` + Required string `xml:"required,attr"` + SensitiveData string `xml:"sensitiveData,attr"` + MappingType string `xml:"mappingType,attr"` + MappingEntityExpression string `xml:"mappingEntityExpression,attr,omitempty"` + Multiple string `xml:"multiple,attr"` + RefObjectName string `xml:"refObjectName,attr,omitempty"` + Value string `xml:",chardata"` +} + +type XmlParamObjects struct { + XMLName xml.Name `xml:"paramObjects"` + ParamObjects []*XmlParamObject +} + +type XmlParamObject struct { + XMLName xml.Name `xml:"paramObject"` + Name string `xml:"name,attr"` + MapExpr string `xml:"mapExpr,attr,omitempty"` + Properties []*XmlParamProperty +} + +type XmlParamProperty struct { + XMLName xml.Name `xml:"property"` + Name string `xml:"name,attr"` + DataType string `xml:"dataType,attr"` + RefObjectName string `xml:"refObjectName,attr,omitempty"` + Multiple string `xml:"multiple,attr"` + MapType string `xml:"mapType,attr,omitempty"` + MapExpr string `xml:"mapExpr,attr,omitempty"` + Required string `xml:"required,attr"` + SensitiveData string `xml:"sensitiveData,attr"` +} diff --git a/terraform-server/public/index.html b/terraform-server/public/index.html new file mode 100644 index 00000000..9de3e577 --- /dev/null +++ b/terraform-server/public/index.html @@ -0,0 +1,10 @@ + + + + + WeTerraform + + +

Main Page

+ + \ No newline at end of file diff --git a/terraform-server/services/db/db.go b/terraform-server/services/db/db.go new file mode 100644 index 00000000..56dafce9 --- /dev/null +++ b/terraform-server/services/db/db.go @@ -0,0 +1,432 @@ +package db + +import ( + "database/sql" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + _ "github.com/go-sql-driver/mysql" + "go.uber.org/zap" + "xorm.io/core" + "xorm.io/xorm" + xorm_log "xorm.io/xorm/log" +) + +const HistoryTablePrefix = "history_" + +var ( + x *xorm.Engine + _ xorm_log.Logger = &dbLogger{} +) + +func InitDatabase() error { + connStr := fmt.Sprintf("%s:%s@%s(%s)/%s?collation=utf8mb4_unicode_ci&allowNativePasswords=true", + models.Config.Database.User, models.Config.Database.Password, "tcp", fmt.Sprintf("%s:%s", models.Config.Database.Server, models.Config.Database.Port), models.Config.Database.DataBase) + engine, err := xorm.NewEngine("mysql", connStr) + if err != nil { + log.Logger.Error("Init database connect fail", log.Error(err)) + return err + } + engine.SetMaxIdleConns(models.Config.Database.MaxIdle) + engine.SetMaxOpenConns(models.Config.Database.MaxOpen) + engine.SetConnMaxLifetime(time.Duration(models.Config.Database.Timeout) * time.Second) + if models.Config.Log.DbLogEnable { + engine.SetLogger(&dbLogger{LogLevel: 1, ShowSql: true, Logger: log.DatabaseLogger}) + } + // 使用驼峰式映射 + engine.SetMapper(core.SnakeMapper{}) + x = engine + log.Logger.Info("Success init database connect !!") + return nil +} + +type dbLogger struct { + LogLevel xorm_log.LogLevel + ShowSql bool + Logger *zap.Logger +} + +func (d *dbLogger) Debug(v ...interface{}) { + d.Logger.Debug(fmt.Sprint(v...)) +} + +func (d *dbLogger) Debugf(format string, v ...interface{}) { + d.Logger.Debug(fmt.Sprintf(format, v...)) +} + +func (d *dbLogger) Error(v ...interface{}) { + d.Logger.Error(fmt.Sprint(v...)) +} + +func (d *dbLogger) Errorf(format string, v ...interface{}) { + d.Logger.Error(fmt.Sprintf(format, v...)) +} + +func (d *dbLogger) Info(v ...interface{}) { + d.Logger.Info(fmt.Sprint(v...)) +} + +func (d *dbLogger) Infof(format string, v ...interface{}) { + if len(v) < 4 { + d.Logger.Info(fmt.Sprintf(format, v...)) + return + } + var costMs float64 = 0 + costTime := fmt.Sprintf("%s", v[3]) + if strings.Contains(costTime, "µs") { + costMs, _ = strconv.ParseFloat(strings.ReplaceAll(costTime, "µs", ""), 64) + costMs = costMs / 1000 + } else if strings.Contains(costTime, "ms") { + costMs, _ = strconv.ParseFloat(costTime[:len(costTime)-2], 64) + } else if strings.Contains(costTime, "m") { + costTime = costTime[:len(costTime)-1] + mIndex := strings.Index(costTime, "m") + minTime, _ := strconv.ParseFloat(costTime[:mIndex], 64) + secTime, _ := strconv.ParseFloat(costTime[mIndex+1:], 64) + costMs = (minTime*60 + secTime) * 1000 + } else { + costMs, _ = strconv.ParseFloat(costTime, 64) + } + d.Logger.Info("db_log", log.String("sql", fmt.Sprintf("%s", v[1])), log.String("param", fmt.Sprintf("%v", v[2])), log.Float64("cost_ms", costMs)) +} + +func (d *dbLogger) Warn(v ...interface{}) { + d.Logger.Warn(fmt.Sprint(v...)) +} + +func (d *dbLogger) Warnf(format string, v ...interface{}) { + d.Logger.Warn(fmt.Sprintf(format, v...)) +} + +func (d *dbLogger) Level() xorm_log.LogLevel { + return d.LogLevel +} + +func (d *dbLogger) SetLevel(l xorm_log.LogLevel) { + d.LogLevel = l +} + +func (d *dbLogger) ShowSQL(b ...bool) { + d.ShowSql = b[0] +} + +func (d *dbLogger) IsShowSQL() bool { + return d.ShowSql +} + +func queryCount(sql string, params ...interface{}) int { + sql = "SELECT COUNT(1) FROM ( " + sql + " ) sub_query" + resultMap := make(map[string]int) + _, err := x.SQL(sql, params...).Get(&resultMap) + if err != nil { + log.Logger.Error("Query sql count message fail", log.Error(err)) + return 0 + } + if _, b := resultMap["COUNT(1)"]; b { + return resultMap["COUNT(1)"] + } + return 0 +} + +func getJsonToXormMap(input interface{}) (resultMap map[string]string, idKeyName string) { + resultMap = make(map[string]string) + t := reflect.TypeOf(input) + for i := 0; i < t.NumField(); i++ { + resultMap[t.Field(i).Tag.Get("json")] = t.Field(i).Tag.Get("xorm") + if i == 0 { + idKeyName = t.Field(i).Tag.Get("xorm") + } + } + return resultMap, idKeyName +} + +func transFiltersToSQL(queryParam *models.QueryRequestParam, transParam *models.TransFiltersParam) (filterSql, queryColumn string, param []interface{}) { + if transParam.Prefix != "" && !strings.HasSuffix(transParam.Prefix, ".") { + transParam.Prefix = transParam.Prefix + "." + } + if transParam.IsStruct { + transParam.KeyMap, transParam.PrimaryKey = getJsonToXormMap(transParam.StructObj) + } + for _, filter := range queryParam.Filters { + if transParam.KeyMap[filter.Name] == "" || transParam.KeyMap[filter.Name] == "-" { + continue + } + if filter.Operator == "eq" { + filterSql += fmt.Sprintf(" AND %s%s=? ", transParam.Prefix, transParam.KeyMap[filter.Name]) + param = append(param, filter.Value) + } else if filter.Operator == "contains" || filter.Operator == "like" { + filterSql += fmt.Sprintf(" AND %s%s LIKE ? ", transParam.Prefix, transParam.KeyMap[filter.Name]) + param = append(param, fmt.Sprintf("%%%s%%", filter.Value)) + } else if filter.Operator == "in" { + inValueList := filter.Value.([]interface{}) + inValueStringList := []string{} + for _, inValueInterfaceObj := range inValueList { + if inValueInterfaceObj == nil { + inValueStringList = append(inValueStringList, "") + } else { + inValueStringList = append(inValueStringList, inValueInterfaceObj.(string)) + } + } + tmpSpecSql, tmpListParams := createListParams(inValueStringList, "") + filterSql += fmt.Sprintf(" AND %s%s in (%s) ", transParam.Prefix, transParam.KeyMap[filter.Name], tmpSpecSql) + param = append(param, tmpListParams...) + } else if filter.Operator == "lt" { + filterSql += fmt.Sprintf(" AND %s%s<=? ", transParam.Prefix, transParam.KeyMap[filter.Name]) + param = append(param, filter.Value) + } else if filter.Operator == "gt" { + filterSql += fmt.Sprintf(" AND %s%s>=? ", transParam.Prefix, transParam.KeyMap[filter.Name]) + param = append(param, filter.Value) + } else if filter.Operator == "ne" || filter.Operator == "neq" { + filterSql += fmt.Sprintf(" AND %s%s!=? ", transParam.Prefix, transParam.KeyMap[filter.Name]) + param = append(param, filter.Value) + } else if filter.Operator == "notNull" || filter.Operator == "isnot" { + filterSql += fmt.Sprintf(" AND %s%s is not null ", transParam.Prefix, transParam.KeyMap[filter.Name]) + } else if filter.Operator == "null" || filter.Operator == "is" { + filterSql += fmt.Sprintf(" AND %s%s is null ", transParam.Prefix, transParam.KeyMap[filter.Name]) + } + } + if queryParam.Sorting != nil { + if transParam.KeyMap[queryParam.Sorting.Field] == "" || transParam.KeyMap[queryParam.Sorting.Field] == "-" { + queryParam.Sorting.Field = transParam.PrimaryKey + } else { + queryParam.Sorting.Field = transParam.KeyMap[queryParam.Sorting.Field] + } + if queryParam.Sorting.Asc { + filterSql += fmt.Sprintf(" ORDER BY %s%s ASC ", transParam.Prefix, queryParam.Sorting.Field) + } else { + filterSql += fmt.Sprintf(" ORDER BY %s%s DESC ", transParam.Prefix, queryParam.Sorting.Field) + } + } + if len(queryParam.ResultColumns) > 0 { + for _, resultColumn := range queryParam.ResultColumns { + if transParam.KeyMap[resultColumn] == "" || transParam.KeyMap[resultColumn] == "-" { + continue + } + queryColumn += fmt.Sprintf("%s%s,", transParam.Prefix, transParam.KeyMap[resultColumn]) + } + } + if queryColumn == "" { + queryColumn = " * " + } else { + queryColumn = queryColumn[:len(queryColumn)-1] + } + return +} + +func transPageInfoToSQL(pageInfo models.PageInfo) (pageSql string, param []interface{}) { + pageSql = " LIMIT ?,? " + param = append(param, pageInfo.StartIndex) + param = append(param, pageInfo.PageSize) + return +} + +type execAction struct { + Sql string + Param []interface{} +} + +func transaction(actions []*execAction) error { + if len(actions) == 0 { + log.Logger.Warn("Transaction is empty,nothing to do") + return fmt.Errorf("SQL exec transaction is empty,nothing to do,please check server log ") + } + for i, action := range actions { + if action == nil { + return fmt.Errorf("SQL exec transaction index%d is nill error,please check server log", i) + } + } + session := x.NewSession() + err := session.Begin() + for _, action := range actions { + params := make([]interface{}, 0) + params = append(params, action.Sql) + for _, v := range action.Param { + params = append(params, v) + } + _, err = session.Exec(params...) + if err != nil { + session.Rollback() + break + } + } + if err == nil { + err = session.Commit() + } + session.Close() + return err +} + +func getDefaultInsertSqlByStruct(obj interface{}, tableName string, ignoreColumns []string) string { + var columnList, valueList []string + t := reflect.TypeOf(obj) + for i := 0; i < t.NumField(); i++ { + tmpXormTag := t.Field(i).Tag.Get("xorm") + ignoreFlag := false + for _, ignoreColumn := range ignoreColumns { + if ignoreColumn == tmpXormTag { + ignoreFlag = true + break + } + } + if ignoreFlag { + continue + } + if tmpXormTag == "-" || tmpXormTag == "" { + continue + } + columnList = append(columnList, fmt.Sprintf("`%s`", tmpXormTag)) + valueList = append(valueList, "?") + } + return fmt.Sprintf("INSERT INTO %s(%s) VALUE (%s)", tableName, strings.Join(columnList, ","), strings.Join(valueList, ",")) +} + +func transactionWithoutForeignCheck(actions []*execAction) error { + if len(actions) == 0 { + log.Logger.Warn("Transaction is empty,nothing to do") + return fmt.Errorf("SQL exec transaction is empty,nothing to do,please check server log ") + } + for i, action := range actions { + if action == nil { + return fmt.Errorf("SQL exec transaction index%d is nill error,please check server log", i) + } + } + session := x.NewSession() + err := session.Begin() + if err != nil { + return err + } + session.Exec("SET FOREIGN_KEY_CHECKS=0") + for _, action := range actions { + params := make([]interface{}, 0) + params = append(params, action.Sql) + for _, v := range action.Param { + params = append(params, v) + } + _, err = session.Exec(params...) + if err != nil { + session.Rollback() + break + } + } + if err == nil { + err = session.Commit() + } + session.Exec("SET FOREIGN_KEY_CHECKS=1") + session.Close() + return err +} + +func createListParams(inputList []string, prefix string) (specSql string, paramList []interface{}) { + if len(inputList) > 0 { + var specList []string + for _, v := range inputList { + specList = append(specList, "?") + paramList = append(paramList, prefix+v) + } + specSql = strings.Join(specList, ",") + } + return +} + +func GetInsertTableExecAction(tableName string, data interface{}, transNullStr map[string]string) (action *execAction, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + + execParams := []interface{}{} + columnStr := "" + valueStr := "" + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + columnStr += "," + valueStr += "," + } + // columnStr += t.Field(i).Tag.Get("xorm") + columnStr += "`" + t.Field(i).Tag.Get("xorm") + "`" + valueStr += "?" + + if len(transNullStr) > 0 { + if _, ok := transNullStr[t.Field(i).Tag.Get("xorm")]; ok { + execParams = append(execParams, NewNullString(v.FieldByName(t.Field(i).Name).String())) + } else { + execParams = append(execParams, v.FieldByName(t.Field(i).Name).Interface()) + } + } else { + execParams = append(execParams, v.FieldByName(t.Field(i).Name).Interface()) + } + } + execSqlCmd := "INSERT INTO " + tableName + "(" + execSqlCmd += columnStr + ") VALUE (" + valueStr + ")" + action = &execAction{Sql: execSqlCmd, Param: execParams} + return +} + +func GetUpdateTableExecAction(tableName string, primeKey string, primeKeyVal string, data interface{}, transNullStr map[string]string) (action *execAction, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + + execParams := []interface{}{} + columnStr := "" + t := reflect.TypeOf(data) + v := reflect.ValueOf(data) + for i := 0; i < t.NumField(); i++ { + if i > 0 { + columnStr += "," + } + // columnStr += t.Field(i).Tag.Get("xorm") + columnStr += "`" + t.Field(i).Tag.Get("xorm") + "`" + columnStr += "=?" + + if len(transNullStr) > 0 { + if _, ok := transNullStr[t.Field(i).Tag.Get("xorm")]; ok { + execParams = append(execParams, NewNullString(v.FieldByName(t.Field(i).Name).String())) + } else { + execParams = append(execParams, v.FieldByName(t.Field(i).Name).Interface()) + } + } else { + execParams = append(execParams, v.FieldByName(t.Field(i).Name).Interface()) + } + } + execSqlCmd := "UPDATE " + tableName + " SET " + execSqlCmd += columnStr + execSqlCmd += " WHERE " + primeKey + "=?" + execParams = append(execParams, primeKeyVal) + action = &execAction{Sql: execSqlCmd, Param: execParams} + return +} + +func GetDeleteTableExecAction(tableName string, primeKey string, primeKeyVal string) (action *execAction, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("%v", r) + } + }() + + execParams := []interface{}{} + execSqlCmd := "DELETE FROM " + tableName + " WHERE " + primeKey + "=?" + execParams = append(execParams, primeKeyVal) + action = &execAction{Sql: execSqlCmd, Param: execParams} + return +} + +func NewNullString(s string) sql.NullString { + if len(s) == 0 { + return sql.NullString{} + } + return sql.NullString{ + String: s, + Valid: true, + } +} diff --git a/terraform-server/services/db/interfaces.go b/terraform-server/services/db/interfaces.go new file mode 100644 index 00000000..bfea4416 --- /dev/null +++ b/terraform-server/services/db/interfaces.go @@ -0,0 +1,146 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func InterfaceList(paramsMap map[string]interface{}) (rowData []*models.InterfaceTable, err error) { + sqlCmd := "SELECT * FROM interface WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY id DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get interface list error", log.Error(err)) + } + return +} + +func InterfaceBatchCreate(user string, param []*models.InterfaceTable) (rowData []*models.InterfaceTable, err error) { + actions := []*execAction{} + tableName := "interface" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.InterfaceTable{Id: id, Name: param[i].Name, Plugin: param[i].Plugin, Description: param[i].Description, + CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create interface fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + + // Auto insert system parameters + systemParams := make(map[string]map[string][]string) + systemParams["apply"] = make(map[string][]string) + systemParams["query"] = make(map[string][]string) + systemParams["destroy"] = make(map[string][]string) + + systemParams["apply"]["input"] = []string{"id", "asset_id", "region_id", "provider_info"} + systemParams["query"]["input"] = []string{"id", "asset_id", "region_id", "provider_info"} + systemParams["destroy"]["input"] = []string{"id", "region_id", "provider_info"} + + systemParams["apply"]["output"] = []string{"id", "asset_id"} + systemParams["query"]["output"] = []string{"id", "asset_id"} + systemParams["destroy"]["output"] = []string{"id"} + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["template"] = "true" + transNullStr["object_name"] = "true" + paramTableName := "parameter" + for paramType, _ := range systemParams[rowData[i].Name] { + for _, paramName := range systemParams[rowData[i].Name][paramType] { + paramId := guid.CreateGuid() + paramData := &models.ParameterTable{Id: paramId, Name: paramName, Type: paramType, Multiple: "N", Interface: rowData[i].Id, DataType: "string", Source: "system", CreateUser: user, CreateTime: createTime, UpdateTime: createTime, UpdateUser: user, Sensitive: "N", Nullable: "N"} + action, tmpErr := GetInsertTableExecAction(paramTableName, *paramData, transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to create parameter fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + } + } + + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create interface fail,%s ", err.Error()) + } + return +} + +func InterfaceBatchDelete(ids []string) (err error) { + actions := []*execAction{} + + // get the parameter by interface id + interfaceidsStr := strings.Join(ids, "','") + sqlCmd := "SELECT * FROM parameter WHERE interface IN ('" + interfaceidsStr + "')" + "ORDER BY object_name DESC" + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd).Find(¶meterList) + if err != nil { + log.Logger.Error("Get parameter list error", log.Error(err)) + } + tableName := "parameter" + for i := range parameterList { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", parameterList[i].Id) + if tmpErr != nil { + err = fmt.Errorf("Try to delete parameter fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + tableName = "interface" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete interface fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete interface fail,%s ", err.Error()) + } + return +} + +func InterfaceBatchUpdate(user string, param []*models.InterfaceTable) (err error) { + actions := []*execAction{} + tableName := "interface" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update interface fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update interface fail,%s ", err.Error()) + } + return +} diff --git a/terraform-server/services/db/operation_log.go b/terraform-server/services/db/operation_log.go new file mode 100644 index 00000000..9ae75727 --- /dev/null +++ b/terraform-server/services/db/operation_log.go @@ -0,0 +1,46 @@ +package db + +import ( + "fmt" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func SaveOperationLog(param *models.SysLogTable) { + _, err := x.Exec("INSERT INTO sys_log(log_cat,operator,operation,content,request_url,client_host,created_date) value (?,?,?,?,?,?,?)", + param.LogCat, param.Operator, param.Operation, param.Content, param.RequestUrl, param.ClientHost, time.Now().Format(models.DateTimeFormat)) + if err != nil { + log.Logger.Error("Save operation log fail", log.Error(err)) + } +} + +func QueryOperationLog(param *models.QueryRequestParam) (pageInfo models.PageInfo, rowData []*models.SysLogTable, err error) { + rowData = []*models.SysLogTable{} + filterSql, queryColumn, queryParam := transFiltersToSQL(param, &models.TransFiltersParam{IsStruct: true, StructObj: models.SysLogTable{}, PrimaryKey: "id"}) + baseSql := fmt.Sprintf("SELECT %s FROM sys_log WHERE 1=1 %s ", queryColumn, filterSql) + if param.Paging { + pageInfo.StartIndex = param.Pageable.StartIndex + pageInfo.PageSize = param.Pageable.PageSize + pageInfo.TotalRows = queryCount(baseSql, queryParam...) + pageSql, pageParam := transPageInfoToSQL(*param.Pageable) + baseSql += pageSql + queryParam = append(queryParam, pageParam...) + } + err = x.SQL(baseSql, queryParam...).Find(&rowData) + return +} + +func GetAllLogOperation() []string { + result := []string{"POST", "PUT", "DELETE"} + queryRows, err := x.QueryString("select distinct operation_en from sys_state_transition") + if err != nil { + log.Logger.Error("Try to get all log operation fail", log.Error(err)) + } else { + for _, row := range queryRows { + result = append(result, row["operation_en"]) + } + } + return result +} diff --git a/terraform-server/services/db/parameter.go b/terraform-server/services/db/parameter.go new file mode 100644 index 00000000..444bbefb --- /dev/null +++ b/terraform-server/services/db/parameter.go @@ -0,0 +1,182 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func ParameterList(paramsMap map[string]interface{}) (rowData []*models.ParameterQuery, err error) { + // sqlCmd := "SELECT * FROM parameter WHERE 1=1" + // paramArgs := []interface{}{} + //for k, v := range paramsMap { + // sqlCmd += " AND " + k + "=?" + // paramArgs = append(paramArgs, v) + //} + //sqlCmd += " ORDER BY create_time DESC" + + sqlCmd := "SELECT t1.*, t2.name AS object_name_title FROM parameter t1 LEFT JOIN parameter t2 ON t1.object_name=t2.id WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + "t1." + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY t1.id DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get parameter list error", log.Error(err)) + } + return +} + +func ParameterBatchCreate(user string, param []*models.ParameterTable) (rowData []*models.ParameterTable, err error) { + actions := []*execAction{} + tableName := "parameter" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.ParameterTable{Id: id, Name: param[i].Name, Type: param[i].Type, Multiple: param[i].Multiple, + Interface: param[i].Interface, Template: param[i].Template, DataType: param[i].DataType, ObjectName: param[i].ObjectName, + Source: models.ParameterSourceDefault, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime, + Nullable: param[i].Nullable, Sensitive: param[i].Sensitive} + rowData = append(rowData, data) + } + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["template"] = "true" + transNullStr["object_name"] = "true" + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to create parameter fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create parameter fail,%s ", err.Error()) + } + return +} + +func ParameterBatchDelete(ids []string) (err error) { + actions := []*execAction{} + + idsStr := strings.Join(ids, "','") + sqlCmd := "SELECT * FROM parameter WHERE id IN ('" + idsStr + "')" + "ORDER BY object_name DESC" + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd).Find(¶meterList) + if err != nil { + log.Logger.Error("Get parameter list error", log.Error(err)) + } + + tmpIds := []string{} + for i := range parameterList { + tmpIds = append(tmpIds, parameterList[i].Id) + } + ids = tmpIds + + tableName := "parameter" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete parameter fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete parameter fail,%s ", err.Error()) + } + return +} + +func ParameterBatchUpdate(user string, param []*models.ParameterTable) (err error) { + actions := []*execAction{} + tableName := "parameter" + updateTime := time.Now().Format(models.DateTimeFormat) + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["template"] = "true" + transNullStr["object_name"] = "true" + transNullStr["source"] = "true" + + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to update parameter fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update parameter fail,%s ", err.Error()) + } + return +} + +func ParameterBatchCreateUpdate(user string, param []*models.ParameterTable) (rowData []*models.ParameterTable, err error) { + actions := []*execAction{} + tableName := "parameter" + createTime := time.Now().Format(models.DateTimeFormat) + updateDataIds := make(map[string]bool) + var parameterId string + + for i := range param { + var data *models.ParameterTable + if param[i].Id == "" { + parameterId = guid.CreateGuid() + data = &models.ParameterTable{Id: parameterId, Name: param[i].Name, Type: param[i].Type, Multiple: param[i].Multiple, Interface: param[i].Interface, Template: param[i].Template, DataType: param[i].DataType, ObjectName: param[i].ObjectName, Source: models.ParameterSourceDefault, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime, Nullable: param[i].Nullable, Sensitive: param[i].Sensitive} + } else { + updateDataIds[param[i].Id] = true + parameterId = param[i].Id + data = &models.ParameterTable{Id: parameterId, Name: param[i].Name, Type: param[i].Type, Multiple: param[i].Multiple, Interface: param[i].Interface, Template: param[i].Template, DataType: param[i].DataType, ObjectName: param[i].ObjectName, CreateUser: param[i].CreateUser, CreateTime: param[i].CreateTime, UpdateUser: user, UpdateTime: createTime, Nullable: param[i].Nullable, Sensitive: param[i].Sensitive} + } + rowData = append(rowData, data) + } + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["template"] = "true" + transNullStr["object_name"] = "true" + + var tmpErr error + for i := range rowData { + var action *execAction + if _, ok := updateDataIds[rowData[i].Id]; ok { + action, tmpErr = GetUpdateTableExecAction(tableName, "id", rowData[i].Id, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to get update_parameter execAction fail,%s ", tmpErr.Error()) + return + } + } else { + action, tmpErr = GetInsertTableExecAction(tableName, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to get create_parameter execAction fail,%s ", tmpErr.Error()) + return + } + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create or update parameter fail,%s ", err.Error()) + } + return +} \ No newline at end of file diff --git a/terraform-server/services/db/plugin.go b/terraform-server/services/db/plugin.go new file mode 100644 index 00000000..b42e69db --- /dev/null +++ b/terraform-server/services/db/plugin.go @@ -0,0 +1,275 @@ +package db + +import ( + "bytes" + "encoding/xml" + "fmt" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func PluginCreate(param *models.PluginTable) (rowData *models.PluginTable, err error) { + id := guid.CreateGuid() + createTime := time.Now().Format(models.DateTimeFormat) + _, err = x.Exec("INSERT INTO plugin(id,name,create_user,create_time) VALUE (?,?,?,?)", + id, param.Name, param.CreateUser, createTime) + + rowData = &models.PluginTable{Id: id, Name: param.Name, CreateUser: param.CreateUser, CreateTime: createTime} + + if err != nil { + err = fmt.Errorf("Try to create plugin fail,%s ", err.Error()) + } + return +} + +func PluginList(paramsMap map[string]interface{}) (rowData []*models.PluginTable, err error) { + sqlCmd := "SELECT * FROM plugin WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY name ASC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get plugin list error", log.Error(err)) + } + return +} + +func PluginDelete(pluginId string) (err error) { + var pluginList []*models.PluginTable + err = x.SQL("SELECT id FROM plugin WHERE id=?", pluginId).Find(&pluginList) + if err != nil { + log.Logger.Error("Try to query plugin fail", log.String("pluginId", pluginId), log.Error(err)) + return + } + if len(pluginList) == 0 { + return + } + _, err = x.Exec("DELETE FROM plugin WHERE id=?", pluginId) + return +} + +func PluginUpdate(pluginId string, param *models.PluginTable) (err error) { + var pluginList []*models.PluginTable + err = x.SQL("SELECT id FROM plugin WHERE id=?", pluginId).Find(&pluginList) + if err != nil { + log.Logger.Error("Try to query plugin fail", log.String("pluginId", pluginId), log.Error(err)) + return + } + if len(pluginList) == 0 { + return + } + updateTime := time.Now().Format(models.DateTimeFormat) + _, err = x.Exec("UPDATE plugin SET name=?,update_time=?,update_user=? WHERE id=?", + param.Name, updateTime, param.UpdateUser, pluginId) + return +} + +func PluginBatchCreate(user string, param []*models.PluginTable) (rowData []*models.PluginTable, err error) { + actions := []*execAction{} + tableName := "plugin" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.PluginTable{Id: id, Name: param[i].Name, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create plugin fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create plugin fail,%s ", err.Error()) + } + return +} + +func PluginBatchDelete(ids []string) (err error) { + actions := []*execAction{} + tableName := "plugin" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete plugin fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete plugin fail,%s ", err.Error()) + } + return +} + +func PluginBatchUpdate(user string, param []*models.PluginTable) (err error) { + actions := []*execAction{} + tableName := "plugin" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update plugin fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update plugin fail,%s ", err.Error()) + } + return +} + +func PluginXmlExport() (result []byte, err error) { + var pluginTable []*models.PluginTable + err = x.SQL("select id,name from plugin").Find(&pluginTable) + if err != nil { + return result, fmt.Errorf("Try to query plugin table fail,%s ", err.Error()) + } + if len(pluginTable) == 0 { + return result, fmt.Errorf("Plugin table is empty ") + } + var interfaceTable []*models.InterfaceTable + err = x.SQL("select id,name,plugin,description from interface order by plugin,name").Find(&interfaceTable) + if err != nil { + return result, fmt.Errorf("Try to query interface table fail,%s ", err.Error()) + } + resultBuffer := bytes.NewBuffer(result) + resultBuffer.WriteString("\n") + var packageXmlObj = models.XmlPackage{Name: "terraform", Version: models.Config.Version} + interfaceNameMap := make(map[string]string) + pluginNameMap := make(map[string]string) + for _, v := range pluginTable { + pluginNameMap[v.Id] = v.Name + } + pluginInterfaceMap := make(map[string][]*models.InterfaceTable) + for _, v := range interfaceTable { + interfaceNameMap[v.Id] = fmt.Sprintf("%s_%s", pluginNameMap[v.Plugin], v.Name) + if _, b := pluginInterfaceMap[v.Plugin]; !b { + pluginInterfaceMap[v.Plugin] = []*models.InterfaceTable{v} + } else { + pluginInterfaceMap[v.Plugin] = append(pluginInterfaceMap[v.Plugin], v) + } + } + paramObjects, xmlParamObjectMap, err := buildXmlParamObject(interfaceNameMap) + if err != nil { + return result, err + } + packageXmlObj.ParamObjects = paramObjects + var parameterTable []*models.ParameterTable + err = x.SQL("select name,`type`,multiple,interface,datatype,nullable,`sensitive` from `parameter` where object_name is null order by interface,`type`,name").Find(¶meterTable) + if err != nil { + return result, fmt.Errorf("Try to query paramether table fail,%s ", err.Error()) + } + parameterMap := make(map[string][]*models.ParameterTable) + for _, v := range parameterTable { + if _, b := parameterMap[v.Interface]; !b { + parameterMap[v.Interface] = []*models.ParameterTable{v} + } else { + parameterMap[v.Interface] = append(parameterMap[v.Interface], v) + } + } + var xmlPlugins = models.XmlPlugins{Plugins: []*models.XmlPlugin{}} + for _, plugin := range pluginTable { + tmpPlugin := models.XmlPlugin{Name: plugin.Name, Interfaces: []*models.XmlInterface{}} + for _, interfaceObj := range pluginInterfaceMap[plugin.Id] { + tmpInterface := models.XmlInterface{Action: interfaceObj.Name, InputParameters: models.XmlInputParameters{Parameters: []*models.XmlParameter{}}, OutputParameters: models.XmlOutputParameters{Parameters: []*models.XmlParameter{}}} + tmpInterface.Path = fmt.Sprintf("%s/api/v1/terraform/%s/%s", models.UrlPrefix, plugin.Name, interfaceObj.Name) + for _, parameter := range parameterMap[interfaceObj.Id] { + tmpParameter := models.XmlParameter{Datatype: parameter.DataType, Multiple: parameter.Multiple, SensitiveData: parameter.Sensitive, Required: "N", Value: parameter.Name, MappingType: "entity"} + if parameter.Nullable == "N" { + tmpParameter.Required = "Y" + } + if parameter.DataType == "object" { + tmpParameter.RefObjectName = fmt.Sprintf("%s_%s_%s", interfaceNameMap[parameter.Interface], parameter.Type, parameter.Name) + if _, b := xmlParamObjectMap[tmpParameter.RefObjectName]; !b { + tmpParameter.RefObjectName = "" + } + } + if parameter.Type == "input" { + tmpInterface.InputParameters.Parameters = append(tmpInterface.InputParameters.Parameters, &tmpParameter) + } else { + tmpInterface.OutputParameters.Parameters = append(tmpInterface.OutputParameters.Parameters, &tmpParameter) + } + } + tmpPlugin.Interfaces = append(tmpPlugin.Interfaces, &tmpInterface) + } + xmlPlugins.Plugins = append(xmlPlugins.Plugins, &tmpPlugin) + } + packageXmlObj.Plugins = xmlPlugins + pluginXmlBytes, marshalErr := xml.MarshalIndent(packageXmlObj, "", "\t") + if marshalErr != nil { + return result, fmt.Errorf("Xml marshal plugins fail,%s ", marshalErr.Error()) + } + resultBuffer.Write(pluginXmlBytes) + return resultBuffer.Bytes(), nil +} + +func buildXmlParamObject(interfaceNameMap map[string]string) (xmlParamObjects models.XmlParamObjects, paramObjectMap map[string]bool, err error) { + var objectParams, objectPropertyParams []*models.ParameterTable + xmlParamObjects = models.XmlParamObjects{ParamObjects: []*models.XmlParamObject{}} + paramObjectMap = make(map[string]bool) + err = x.SQL("select * from `parameter` where id in (select distinct object_name from `parameter` where object_name is not null)").Find(&objectParams) + if err != nil { + err = fmt.Errorf("Try to query object paramether fail,%s ", err.Error()) + return + } + if len(objectParams) == 0 { + return + } + var objectParamsNameMap = make(map[string]string) + for _, v := range objectParams { + objectParamsNameMap[v.Id] = fmt.Sprintf("%s_%s_%s", interfaceNameMap[v.Interface], v.Type, v.Name) + } + err = x.SQL("select * from `parameter` where object_name is not null order by object_name").Find(&objectPropertyParams) + if err != nil { + err = fmt.Errorf("Try to query object property paramether fail,%s ", err.Error()) + return + } + var objectPropertyMap = make(map[string][]*models.ParameterTable) + for _, objectProperty := range objectPropertyParams { + if _, b := objectPropertyMap[objectProperty.ObjectName]; !b { + objectPropertyMap[objectProperty.ObjectName] = []*models.ParameterTable{objectProperty} + } else { + objectPropertyMap[objectProperty.ObjectName] = append(objectPropertyMap[objectProperty.ObjectName], objectProperty) + } + } + for _, object := range objectParams { + if _, b := objectPropertyMap[object.Id]; !b { + continue + } + tmpParamObject := models.XmlParamObject{Name: fmt.Sprintf("%s_%s_%s", interfaceNameMap[object.Interface], object.Type, object.Name), Properties: []*models.XmlParamProperty{}} + paramObjectMap[tmpParamObject.Name] = true + for _, property := range objectPropertyMap[object.Id] { + tmpProperty := models.XmlParamProperty{Name: property.Name, Multiple: property.Multiple, DataType: property.DataType, SensitiveData: property.Sensitive, Required: "N", MapType: "entity"} + if property.DataType == "object" { + if _, b := objectParamsNameMap[property.Id]; b { + tmpProperty.RefObjectName = objectParamsNameMap[property.Id] + } + } + if property.Nullable == "N" { + tmpProperty.Required = "Y" + } + tmpParamObject.Properties = append(tmpParamObject.Properties, &tmpProperty) + } + xmlParamObjects.ParamObjects = append(xmlParamObjects.ParamObjects, &tmpParamObject) + } + return xmlParamObjects, paramObjectMap, nil +} diff --git a/terraform-server/services/db/provider.go b/terraform-server/services/db/provider.go new file mode 100644 index 00000000..b9242a7d --- /dev/null +++ b/terraform-server/services/db/provider.go @@ -0,0 +1,247 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func ProviderList(paramsMap map[string]interface{}) (rowData []*models.ProviderTable, err error) { + sqlCmd := "SELECT * FROM provider WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get provider list error", log.Error(err)) + } + return +} + +func ProviderBatchCreate(user string, param []*models.ProviderTable) (rowData []*models.ProviderTable, err error) { + actions := []*execAction{} + tableName := "provider" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.ProviderTable{Id: id, Name: param[i].Name, Version: param[i].Version, SecretIdAttrName: param[i].SecretIdAttrName, + SecretKeyAttrName: param[i].SecretKeyAttrName, RegionAttrName: param[i].RegionAttrName, CreateUser: user, CreateTime: createTime, + UpdateUser: user, UpdateTime: createTime, NameSpace: param[i].NameSpace} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create provider fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create provider fail,%s ", err.Error()) + } + return +} + +func ProviderBatchDelete(ids []string) (err error) { + actions := []*execAction{} + tableName := "provider" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete provider fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete provider fail,%s ", err.Error()) + } + return +} + +func ProviderBatchUpdate(user string, param []*models.ProviderTable) (err error) { + actions := []*execAction{} + tableName := "provider" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update provider fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update provider fail,%s ", err.Error()) + } + return +} + +func ProviderPluginExport(providerNameList, pluginNameList []string) (result models.ProviderPluginImportObj, err error) { + result = models.ProviderPluginImportObj{Provider: []*models.ProviderTable{}, Plugin: []*models.PluginTable{}} + result.ProviderTemplateValue = []*models.ProviderTemplateValueTable{} + result.Template = []*models.TemplateTable{} + result.TemplateValue = []*models.TemplateValueTable{} + result.Interface = []*models.InterfaceTable{} + result.Parameter = []*models.ParameterTable{} + result.Source = []*models.SourceTable{} + result.TfArgument = []*models.TfArgumentTable{} + result.TfstateAttribute = []*models.TfstateAttributeTable{} + specSql, paramList := createListParams(providerNameList, "") + err = x.SQL("select * from provider where name in ("+specSql+")", paramList...).Find(&result.Provider) + if err != nil { + err = fmt.Errorf("Query database provider table fail,%s ", err.Error()) + return + } + if len(result.Provider) == 0 { + err = fmt.Errorf("Can not find any provider with name:%s ", providerNameList) + return + } + var providerIdList, pluginIdList []string + var providerIdFilterSql, pluginIdFilterSql string + for _, v := range result.Provider { + providerIdList = append(providerIdList, v.Id) + } + providerIdFilterSql = fmt.Sprintf("('%s')", strings.Join(providerIdList, "','")) + specSql, paramList = createListParams(pluginNameList, "") + err = x.SQL("select * from plugin where name in ("+specSql+")", paramList...).Find(&result.Plugin) + if err == nil && len(result.Plugin) == 0 { + err = fmt.Errorf("Can not find any plugin with name:%s ", pluginNameList) + return + } + for _, v := range result.Plugin { + pluginIdList = append(pluginIdList, v.Id) + } + pluginIdFilterSql = fmt.Sprintf("('%s')", strings.Join(pluginIdList, "','")) + x.SQL("select * from interface where plugin in " + pluginIdFilterSql).Find(&result.Interface) + x.SQL("select * from `parameter` where interface in (select id from interface where plugin in " + pluginIdFilterSql + ")").Find(&result.Parameter) + templateSql := "select * from template where id in (select template_value from provider_template_value where provider in " + providerIdFilterSql + ")" + templateSql += " union " + templateSql += "select * from template where id in (select template from `parameter` where interface in (select id from interface where plugin in " + pluginIdFilterSql + "))" + x.SQL(templateSql).Find(&result.Template) + x.SQL("select * from template_value where template in (" + strings.ReplaceAll(templateSql, "*", "id") + ")").Find(&result.TemplateValue) + sourceSql := "select * from source where interface in (select id from interface where plugin in " + pluginIdFilterSql + ") and provider in " + providerIdFilterSql + x.SQL(sourceSql).Find(&result.Source) + x.SQL("select * from tf_argument where source in (" + strings.ReplaceAll(sourceSql, "*", "id") + ")").Find(&result.TfArgument) + x.SQL("select * from tfstate_attribute where source in (" + strings.ReplaceAll(sourceSql, "*", "id") + ")").Find(&result.TfstateAttribute) + var sourceMap, tfStateAttrMap, parameterMap = make(map[string]bool), make(map[string]bool), make(map[string]bool) + for _, v := range result.Source { + sourceMap[v.Id] = true + } + for _, v := range result.TfstateAttribute { + tfStateAttrMap[v.Id] = true + } + for _, v := range result.Parameter { + parameterMap[v.Id] = true + } + for i, v := range result.TfArgument { + if _, b := sourceMap[v.RelativeSource]; !b { + result.TfArgument[i].RelativeSource = "" + } + if _, b := tfStateAttrMap[v.RelativeTfstateAttribute]; !b { + result.TfArgument[i].RelativeTfstateAttribute = "" + } + if _, b := parameterMap[v.RelativeParameter]; !b { + result.TfArgument[i].RelativeParameter = "" + result.TfArgument[i].RelativeParameterValue = "" + } + } + for i, v := range result.TfstateAttribute { + if _, b := sourceMap[v.RelativeSource]; !b { + result.TfstateAttribute[i].RelativeSource = "" + } + if _, b := tfStateAttrMap[v.RelativeTfstateAttribute]; !b { + result.TfstateAttribute[i].RelativeTfstateAttribute = "" + } + if _, b := parameterMap[v.RelativeParameter]; !b { + result.TfstateAttribute[i].RelativeParameter = "" + result.TfstateAttribute[i].RelativeParameterValue = "" + } + } + return +} + +func ProviderPluginImport(input models.ProviderPluginImportObj, updateUser string) error { + updateTime := time.Now().Format(models.DateTimeFormat) + var actions []*execAction + for _, v := range input.Provider { + tmpAction := execAction{Sql: "replace into provider(id,name,`version`,secret_id_attr_name,secret_key_attr_name,region_attr_name,Initialized,create_time,create_user,update_time,update_user,name_space) values (?,?,?,?,?,?,?,?,?,?,?,?)"} + tmpAction.Param = []interface{}{v.Id, v.Name, v.Version, v.SecretIdAttrName, v.SecretKeyAttrName, v.RegionAttrName, v.Initialized, v.CreateTime, v.CreateUser, updateTime, updateUser, v.NameSpace} + actions = append(actions, &tmpAction) + } + for _, v := range input.Plugin { + actions = append(actions, &execAction{Sql: "replace into plugin(id,name,create_time,create_user,update_time,update_user) values (?,?,?,?,?,?)", Param: []interface{}{v.Id, v.Name, v.CreateTime, v.CreateUser, updateTime, updateUser}}) + } + for _, v := range input.Interface { + actions = append(actions, &execAction{Sql: "replace into interface(id,name,description,plugin,create_time,create_user,update_time,update_user) values (?,?,?,?,?,?,?,?)", Param: []interface{}{v.Id, v.Name, v.Description, v.Plugin, v.CreateTime, v.CreateUser, updateTime, updateUser}}) + } + for _, v := range input.Template { + actions = append(actions, &execAction{Sql: "replace into template(id,name,description,create_time,create_user,update_time,update_user) values (?,?,?,?,?,?,?)", Param: []interface{}{v.Id, v.Name, v.Description, v.CreateTime, v.CreateUser, updateTime, updateUser}}) + } + for _, v := range input.ProviderTemplateValue { + actions = append(actions, &execAction{Sql: "replace into provider_template_value(id,value,provider,template_value,create_time,create_user,update_time,update_user) values (?,?,?,?,?,?,?,?)", Param: []interface{}{v.Id, v.Value, v.Provider, v.TemplateValue, v.CreateTime, v.CreateUser, updateTime, updateUser}}) + } + for _, v := range input.TemplateValue { + actions = append(actions, &execAction{Sql: "replace into template_value(id,value,template,create_time,create_user,update_time,update_user) values (?,?,?,?,?,?,?)", Param: []interface{}{v.Id, v.Value, v.Template, v.CreateTime, v.CreateUser, updateTime, updateUser}}) + } + for _, v := range input.Parameter { + tmpAction := execAction{Sql: "replace into `parameter`(id,name,`type`,multiple,interface,datatype,source,create_time,create_user,update_time,update_user,nullable,`sensitive`,template,object_name) values (?,?,?,?,?,?,?,?,?,?,?,?,?"} + tmpAction.Param = []interface{}{v.Id, v.Name, v.Type, v.Multiple, v.Interface, v.DataType, v.Source, v.CreateTime, v.CreateUser, updateTime, updateUser, v.Nullable, v.Sensitive} + tmpAction.Sql += "," + getRelativeNullValue(v.Template) + tmpAction.Sql += "," + getRelativeNullValue(v.ObjectName) + ")" + actions = append(actions, &tmpAction) + } + for _, v := range input.Source { + tmpAction := execAction{Sql: "replace into source(id,interface,provider,name,asset_id_attribute,terraform_used,import_prefix,execution_seq_no,import_support,source_type,create_time,create_user,update_time,update_user) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?)"} + tmpAction.Param = []interface{}{v.Id, v.Interface, v.Provider, v.Name, v.AssetIdAttribute, v.TerraformUsed, v.ImportPrefix, v.ExecutionSeqNo, v.ImportSupport, v.SourceType, v.CreateTime, v.CreateUser, updateTime, updateUser} + actions = append(actions, &tmpAction) + } + for _, v := range input.TfArgument { + tmpAction := execAction{Sql: "replace into tf_argument(id,name,source,default_value,is_null,`type`,is_multi,convert_way,function_define,key_argument,create_time,create_user,update_time,update_user,`parameter`,object_name,relative_source,relative_tfstate_attribute,relative_parameter,relative_parameter_value) values (?,?,?,?,?,?,?,?,?,?,?,?,?,?"} + tmpAction.Param = []interface{}{v.Id, v.Name, v.Source, v.DefaultValue, v.IsNull, v.Type, v.IsMulti, v.ConvertWay, v.FunctionDefine, v.KeyArgument, v.CreateTime, v.CreateUser, updateTime, updateUser} + tmpAction.Sql += "," + getRelativeNullValue(v.Parameter) + tmpAction.Sql += "," + getRelativeNullValue(v.ObjectName) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeSource) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeTfstateAttribute) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeParameter) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeParameterValue) + ")" + actions = append(actions, &tmpAction) + } + for _, v := range input.TfstateAttribute { + tmpAction := execAction{Sql: "replace into tfstate_attribute(id,name,source,default_value,is_null,`type`,is_multi,convert_way,function_define,create_time,create_user,update_time,update_user,`parameter`,object_name,relative_source,relative_tfstate_attribute,relative_parameter,relative_parameter_value) values (?,?,?,?,?,?,?,?,?,?,?,?,?"} + tmpAction.Param = []interface{}{v.Id, v.Name, v.Source, v.DefaultValue, v.IsNull, v.Type, v.IsMulti, v.ConvertWay, v.FunctionDefine, v.CreateTime, v.CreateUser, updateTime, updateUser} + tmpAction.Sql += "," + getRelativeNullValue(v.Parameter) + tmpAction.Sql += "," + getRelativeNullValue(v.ObjectName) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeSource) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeTfstateAttribute) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeParameter) + tmpAction.Sql += "," + getRelativeNullValue(v.RelativeParameterValue) + ")" + actions = append(actions, &tmpAction) + } + return transactionWithoutForeignCheck(actions) +} + +func getRelativeNullValue(input string) string { + output := "NULL" + if input != "" { + output = "'" + input + "'" + } + return output +} diff --git a/terraform-server/services/db/provider_info.go b/terraform-server/services/db/provider_info.go new file mode 100644 index 00000000..3a5a31d6 --- /dev/null +++ b/terraform-server/services/db/provider_info.go @@ -0,0 +1,97 @@ +package db + +import ( + "fmt" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func ProviderInfoList(paramsMap map[string]interface{}) (rowData []*models.ProviderInfoQuery, err error) { + /* + sqlCmd := "SELECT * FROM provider_info WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + */ + sqlCmd := "SELECT t1.*,t2.name AS provider_title FROM provider_info t1 LEFT JOIN provider t2 ON t1.provider=t2.id" + sqlCmd += " ORDER BY t1.id DESC" + err = x.SQL(sqlCmd).Find(&rowData) + if err != nil { + log.Logger.Error("Get providerInfo list error", log.Error(err)) + } + return +} + +func ProviderInfoBatchCreate(user string, param []*models.ProviderInfoTable) (rowData []*models.ProviderInfoTable, err error) { + actions := []*execAction{} + tableName := "provider_info" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.ProviderInfoTable{Id: id, Name: param[i].Name, Provider: param[i].Provider, SecretId: param[i].SecretId, + SecretKey: param[i].SecretKey, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create provider_info fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create provider_info fail,%s ", err.Error()) + } + return +} + +func ProviderInfoBatchDelete(ids []string) (err error) { + actions := []*execAction{} + tableName := "provider_info" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete provider_info fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete provider_info fail,%s ", err.Error()) + } + return +} + +func ProviderInfoBatchUpdate(user string, param []*models.ProviderInfoTable) (err error) { + actions := []*execAction{} + tableName := "provider_info" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update provider_info fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update provider_info fail,%s ", err.Error()) + } + return +} diff --git a/terraform-server/services/db/provider_template_value.go b/terraform-server/services/db/provider_template_value.go new file mode 100644 index 00000000..2d303e45 --- /dev/null +++ b/terraform-server/services/db/provider_template_value.go @@ -0,0 +1,181 @@ +package db + +import ( + "fmt" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func ProviderTemplateValueList(paramsMap map[string]interface{}) (rowData []*models.ProviderTemplateValueTable, err error) { + sqlCmd := "SELECT * FROM provider_template_value WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get providerTemplateValue list error", log.Error(err)) + } + return +} + +func ProviderTemplateValueBatchCreate(user string, param []*models.ProviderTemplateValueTable) (rowData []*models.ProviderTemplateValueTable, err error) { + actions := []*execAction{} + tableName := "provider_template_value" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.ProviderTemplateValueTable{Id: id, Value: param[i].Value, Provider: param[i].Provider, TemplateValue: param[i].TemplateValue, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create provider_template_value fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create provider_template_value fail,%s ", err.Error()) + } + return +} + +func ProviderTemplateValueBatchDelete(ids []string) (err error) { + actions := []*execAction{} + tableName := "provider_template_value" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete provider_template_value fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete provider_template_value fail,%s ", err.Error()) + } + return +} + +func ProviderTemplateValueBatchUpdate(user string, param []*models.ProviderTemplateValueTable) (err error) { + actions := []*execAction{} + tableName := "provider_template_value" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update provider_template_value fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update provider_template_value fail,%s ", err.Error()) + } + return +} + +func ProviderTemplateValueListByTemplate(templateId string) (rowData []*models.TemplateValueQuery, err error) { + sqlCmd := "SELECT * FROM template_value WHERE template=? ORDER BY id DESC" + paramArgs := []interface{}{} + paramArgs = append(paramArgs, templateId) + var templateValueList []*models.TemplateValueQuery + err = x.SQL(sqlCmd, paramArgs...).Find(&templateValueList) + if err != nil { + log.Logger.Error("Get template_value list by template error", log.String("template", templateId), log.Error(err)) + return + } + if len(templateValueList) == 0 { + log.Logger.Warn("template_value list can not be found by template", log.String("template", templateId)) + return + } + + m := make(map[string]*models.TemplateValueQuery) + for i := range templateValueList { + templateValueList[i].ProviderTemplateValueInfo = make(map[string]map[string]string) + m[templateValueList[i].Id] = templateValueList[i] + } + + sqlCmd = "SELECT t1.id AS providerTemplateValueId,t1.value AS providerTemplateValue,t4.name AS provider,t1.create_time AS providerTemplateValueCreateTime,t1.create_user AS providerTemplateValueCreateUser,t2.id " + + "AS templateValueId,t2.value AS templateValue,t2.template AS templateId FROM provider_template_value t1 LEFT " + + "JOIN template_value t2 on t1.template_value=t2.id LEFT JOIN template t3 on t2.template=t3.id LEFT JOIN provider t4 on t4.id=t1.provider WHERE t3.id=? ORDER BY t2.id DESC" + sqlOrArgs := []interface{}{sqlCmd, templateId} + providerTemplateValueList, err := x.QueryString(sqlOrArgs...) + if err != nil { + log.Logger.Error("Get provider_template_value list by template error", log.String("template", templateId), log.Error(err)) + return + } + for _, ptv := range providerTemplateValueList { + templateValueInfo := m[ptv["templateValueId"]] + if _, ok := templateValueInfo.ProviderTemplateValueInfo[ptv["provider"]]; !ok { + templateValueInfo.ProviderTemplateValueInfo[ptv["provider"]] = make(map[string]string) + } + templateValueInfo.ProviderTemplateValueInfo[ptv["provider"]]["id"] = ptv["providerTemplateValueId"] + templateValueInfo.ProviderTemplateValueInfo[ptv["provider"]]["value"] = ptv["providerTemplateValue"] + templateValueInfo.ProviderTemplateValueInfo[ptv["provider"]]["createTime"] = ptv["providerTemplateValueCreateTime"] + templateValueInfo.ProviderTemplateValueInfo[ptv["provider"]]["createUser"] = ptv["providerTemplateValueCreateUser"] + } + rowData = templateValueList + return +} + +func ProviderTemplateValueBatchCreateUpdate(user string, param []*models.ProviderTemplateValueTable) (rowData []*models.ProviderTemplateValueTable, err error) { + actions := []*execAction{} + tableName := "provider_template_value" + createTime := time.Now().Format(models.DateTimeFormat) + updateDataIds := make(map[string]bool) + var providerTemplateValueId string + for i := range param { + var data *models.ProviderTemplateValueTable + if param[i].Id == "" { + providerTemplateValueId = guid.CreateGuid() + data = &models.ProviderTemplateValueTable{Id: providerTemplateValueId, Value: param[i].Value, Provider: param[i].Provider, TemplateValue: param[i].TemplateValue, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + } else { + updateDataIds[param[i].Id] = true + providerTemplateValueId = param[i].Id + data = &models.ProviderTemplateValueTable{Id: providerTemplateValueId, Value: param[i].Value, Provider: param[i].Provider, TemplateValue: param[i].TemplateValue, CreateUser: param[i].CreateUser, CreateTime: param[i].CreateTime, UpdateUser: user, UpdateTime: createTime} + } + rowData = append(rowData, data) + } + + var tmpErr error + for i := range rowData { + var action *execAction + if _, ok := updateDataIds[rowData[i].Id]; ok { + action, tmpErr = GetUpdateTableExecAction(tableName, "id", rowData[i].Id, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to get update_provider_template_value execAction fail,%s ", tmpErr.Error()) + return + } + } else { + action, tmpErr = GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to get create_provider_template_value execAction fail,%s ", tmpErr.Error()) + return + } + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create or update provider_template_value fail,%s ", err.Error()) + } + return +} \ No newline at end of file diff --git a/terraform-server/services/db/resource_data.go b/terraform-server/services/db/resource_data.go new file mode 100644 index 00000000..6bdc9a3e --- /dev/null +++ b/terraform-server/services/db/resource_data.go @@ -0,0 +1,105 @@ +package db + +import ( + "fmt" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + "time" +) + +func ResourceDataList(paramsMap map[string]interface{}) (rowData []*models.ResourceDataTable, err error) { + sqlCmd := "SELECT * FROM resource_data WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get resource_data list error", log.Error(err)) + } + return +} + +func ResourceDataBatchCreate(user string, param []*models.ResourceDataTable) (rowData []*models.ResourceDataTable, err error) { + actions := []*execAction{} + tableName := "resource_data" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.ResourceDataTable{Id: id, Resource: param[i].Resource, ResourceId: param[i].ResourceId, ResourceAssetId: param[i].ResourceAssetId, + RegionId: param[i].RegionId, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create resource_data fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create resource_data fail,%s ", err.Error()) + } + return +} + +func ResourceDataBatchDelete(ids []string) (err error) { + actions := []*execAction{} + tableName := "resource_data" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete resource_data fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete resource_data fail,%s ", err.Error()) + } + return +} + +func ResourceDataBatchUpdate(user string, param []*models.ResourceDataTable) (err error) { + actions := []*execAction{} + tableName := "resource_data" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update resource_data fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update resource_data fail,%s ", err.Error()) + } + return +} + +func ResourceDataDebugList(ids string) (rowData []*models.ResourceDataQuery, err error) { + sqlCmd := "SELECT t1.*,t2.name AS resource_title FROM resource_data_debug t1 LEFT JOIN source t2 ON t1.resource=t2.id WHERE 1=1" + if ids != "" { + sqlCmd += " AND t1.id IN ('" + ids + "')" + } + sqlCmd += " ORDER BY t1.id DESC" + err = x.SQL(sqlCmd).Find(&rowData) + if err != nil { + log.Logger.Error("Get resource_data_debug list error", log.Error(err)) + } + return +} \ No newline at end of file diff --git a/terraform-server/services/db/source.go b/terraform-server/services/db/source.go new file mode 100644 index 00000000..0cda68e8 --- /dev/null +++ b/terraform-server/services/db/source.go @@ -0,0 +1,132 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func SourceList(paramsMap map[string]interface{}) (rowData []*models.SourceTable, err error) { + sqlCmd := "SELECT * FROM source WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get source list error", log.Error(err)) + } + return +} + +func SourceBatchCreate(user string, param []*models.SourceTable) (rowData []*models.SourceTable, err error) { + actions := []*execAction{} + tableName := "source" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.SourceTable{Id: id, Interface: param[i].Interface, Provider: param[i].Provider, Name: param[i].Name, + AssetIdAttribute: param[i].AssetIdAttribute, TerraformUsed: param[i].TerraformUsed, CreateUser: user, CreateTime: createTime, + UpdateUser: user, UpdateTime: createTime, ImportPrefix: param[i].ImportPrefix, ImportSupport: param[i].ImportSupport, + SourceType: param[i].SourceType, Remark: param[i].Remark} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create source fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create source fail,%s ", err.Error()) + } + return +} + +func SourceBatchDelete(ids []string) (err error) { + actions := []*execAction{} + + // get the tfArgument by source id + sourceidsStr := strings.Join(ids, "','") + sqlCmd := "SELECT * FROM tf_argument WHERE source IN ('" + sourceidsStr + "')" + "ORDER BY object_name DESC" + var tfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd).Find(&tfArgumentList) + if err != nil { + log.Logger.Error("Get tfArgument list error", log.Error(err)) + } + tableName := "tf_argument" + for i := range tfArgumentList { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", tfArgumentList[i].Id) + if tmpErr != nil { + err = fmt.Errorf("Try to delete tfArgument fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + // get the tfstateAttribute by source id + sqlCmd = "SELECT * FROM tfstate_attribute WHERE source IN ('" + sourceidsStr + "')" + "ORDER BY object_name DESC" + var tfstateAttributeList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd).Find(&tfstateAttributeList) + if err != nil { + log.Logger.Error("Get tfstateAttribute list error", log.Error(err)) + } + tableName = "tfstate_attribute" + for i := range tfstateAttributeList { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", tfstateAttributeList[i].Id) + if tmpErr != nil { + err = fmt.Errorf("Try to delete tfstateAttribute fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + tableName = "source" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete source fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete source fail,%s ", err.Error()) + } + return +} + +func SourceBatchUpdate(user string, param []*models.SourceTable) (err error) { + actions := []*execAction{} + tableName := "source" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update source fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update source fail,%s ", err.Error()) + } + return +} diff --git a/terraform-server/services/db/template.go b/terraform-server/services/db/template.go new file mode 100644 index 00000000..0daa6abe --- /dev/null +++ b/terraform-server/services/db/template.go @@ -0,0 +1,104 @@ +package db + +import ( + "fmt" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func TemplateList(paramsMap map[string]interface{}) (rowData []*models.TemplateTable, err error) { + sqlCmd := "SELECT * FROM template WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get template list error", log.Error(err)) + } + return +} + +func TemplateBatchCreate(user string, param []*models.TemplateTable) (rowData []*models.TemplateTable, err error) { + actions := []*execAction{} + tableName := "template" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.TemplateTable{Id: id, Name: param[i].Name, Description: param[i].Description, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create template fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create template fail,%s ", err.Error()) + } + return +} + +func TemplateBatchDelete(ids []string) (err error) { + actions := []*execAction{} + tableName := "template" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete template fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete template fail,%s ", err.Error()) + } + return +} + +func TemplateBatchUpdate(user string, param []*models.TemplateTable) (err error) { + actions := []*execAction{} + tableName := "template" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update template fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update template fail,%s ", err.Error()) + } + return +} + +func TemplateListByPlugin(pluginId string) (rowData []*models.TemplateTable, err error) { + sqlCmd := "SELECT t1.* FROM template t1 LEFT JOIN parameter t2 on t1.id=t2.template LEFT JOIN interface t3 on " + + "t2.interface=t3.id LEFT JOIN plugin t4 on t3.plugin=t4.id WHERE t4.id=? GROUP BY t1.id ORDER BY t1.id DESC" + paramArgs := []interface{}{} + paramArgs = append(paramArgs, pluginId) + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get template by plugin list error", log.String("plugin", pluginId), log.Error(err)) + } + return +} \ No newline at end of file diff --git a/terraform-server/services/db/template_value.go b/terraform-server/services/db/template_value.go new file mode 100644 index 00000000..423757ac --- /dev/null +++ b/terraform-server/services/db/template_value.go @@ -0,0 +1,169 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func TemplateValueList(paramsMap map[string]interface{}) (rowData []*models.TemplateValueTable, err error) { + sqlCmd := "SELECT * FROM template_value WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get template_value list error", log.Error(err)) + } + return +} + +func TemplateValueBatchCreate(user string, param []*models.TemplateValueTable) (rowData []*models.TemplateValueTable, err error) { + actions := []*execAction{} + tableName := "template_value" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.TemplateValueTable{Id: id, Value: param[i].Value, Template: param[i].Template, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to create template_value fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create template_value fail,%s ", err.Error()) + } + return +} + +func TemplateValueBatchDelete(ids []string) (err error) { + actions := []*execAction{} + + // find all providerTemplateValue by templateValueId + templateValueIdsStr := strings.Join(ids, "','") + sqlCmd := "SELECT id FROM provider_template_value WHERE template_value IN ('" + templateValueIdsStr + "')" + providerTemplateValueList, err := x.QueryString(sqlCmd) + if err != nil { + log.Logger.Error("Try to query provider_template_value list by template_value fail", log.String("templateValueIds", templateValueIdsStr), log.Error(err)) + return + } + + tableName := "provider_template_value" + for i := range providerTemplateValueList { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", providerTemplateValueList[i]["id"]) + if tmpErr != nil { + err = fmt.Errorf("Try to get delete provider_template_value execAction fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + tableName = "template_value" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to get delete template_value execAction fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete template_value fail,%s ", err.Error()) + } + return +} + +func TemplateValueBatchUpdate(user string, param []*models.TemplateValueTable) (err error) { + actions := []*execAction{} + tableName := "template_value" + updateTime := time.Now().Format(models.DateTimeFormat) + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to update template_value fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update template_value fail,%s ", err.Error()) + } + return +} + +func TemplateValueListByParameter(parameterId string) (rowData []*models.TemplateValueTable, err error) { + sqlCmd := "SELECT t1.* FROM template_value t1 LEFT JOIN template t2 on t1.template=t2.id LEFT JOIN parameter t3 on " + + "t2.id=t3.template WHERE t3.id=? ORDER BY t1.id DESC" + paramArgs := []interface{}{} + paramArgs = append(paramArgs, parameterId) + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get template_value by parameter list error", log.String("parameter", parameterId), log.Error(err)) + } + return +} + +func TemplateValueBatchCreateUpdate(user string, param []*models.TemplateValueTable) (rowData []*models.TemplateValueTable, err error) { + actions := []*execAction{} + tableName := "template_value" + createTime := time.Now().Format(models.DateTimeFormat) + updateDataIds := make(map[string]bool) + var templateValueId string + for i := range param { + var data *models.TemplateValueTable + if param[i].Id == "" { + templateValueId = guid.CreateGuid() + data = &models.TemplateValueTable{Id: templateValueId, Value: param[i].Value, Template: param[i].Template, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + } else { + updateDataIds[param[i].Id] = true + templateValueId = param[i].Id + data = &models.TemplateValueTable{Id: templateValueId, Value: param[i].Value, Template: param[i].Template, CreateUser: param[i].CreateUser, CreateTime: param[i].CreateTime, UpdateUser: user, UpdateTime: createTime} + } + rowData = append(rowData, data) + } + + var tmpErr error + for i := range rowData { + var action *execAction + if _, ok := updateDataIds[rowData[i].Id]; ok { + action, tmpErr = GetUpdateTableExecAction(tableName, "id", rowData[i].Id, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to get update_template_value execAction fail,%s ", tmpErr.Error()) + return + } + } else { + action, tmpErr = GetInsertTableExecAction(tableName, *rowData[i], nil) + if tmpErr != nil { + err = fmt.Errorf("Try to get create_template_value execAction fail,%s ", tmpErr.Error()) + return + } + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create or update template_value fail,%s ", err.Error()) + } + return +} \ No newline at end of file diff --git a/terraform-server/services/db/terraform.go b/terraform-server/services/db/terraform.go new file mode 100644 index 00000000..38f90bf4 --- /dev/null +++ b/terraform-server/services/db/terraform.go @@ -0,0 +1,5236 @@ +package db + +import ( + "bufio" + "bytes" + "encoding/json" + "fmt" + "io/ioutil" + "os" + "os/exec" + "reflect" + "regexp" + "sort" + "strconv" + "strings" + "syscall" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/cipher" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func GenFile(content []byte, filePath string) (err error) { + // 覆盖写入需加:os.O_TRUNC + file, err := os.OpenFile(filePath, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, 0666) + if err != nil { + log.Logger.Error("Open file error", log.String("file", filePath), log.Error(err)) + return + } + defer file.Close() + + _, err = file.Write(content) + if err != nil { + log.Logger.Error("Write file error", log.String("file", filePath), log.Error(err)) + } + return +} + +func ReadFile(filePath string) (content []byte, err error) { + file, err := os.Open(filePath) + if err != nil { + log.Logger.Error("Open file error", log.String("file", filePath), log.Error(err)) + return + } + defer file.Close() + content, err = ioutil.ReadAll(file) + return +} + +func DelFile(filePath string) (err error) { + _, err = os.Stat(filePath) + if err != nil { + if os.IsNotExist(err) { + return + } else { + err = fmt.Errorf("Os stat filePath: %s error: %s", filePath, err.Error()) + log.Logger.Error("Os stat filePath error", log.String("filePath", filePath), log.Error(err)) + return + } + } + err = os.Remove(filePath) + if err != nil { + err = fmt.Errorf("Delete file: %s error: %s", filePath, err.Error()) + log.Logger.Error("Delete file error", log.String("filePath", filePath), log.Error(err)) + } + return +} + +func GenDir(dirPath string) (err error) { + _, err = os.Stat(dirPath) + if err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(dirPath, os.ModePerm) + if err != nil { + err = fmt.Errorf("Make dir: %s error: %s", dirPath, err.Error()) + log.Logger.Error("Make dir error", log.String("dirPath", dirPath), log.Error(err)) + return + } + } else { + err = fmt.Errorf("Os stat dir: %s error: %s", dirPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("dirPath", dirPath), log.Error(err)) + return + } + } + return +} + +/* +func DelDir(dirPath string) (err error) { + _, err = os.Stat(dirPath) + if err != nil { + if os.IsNotExist(err) == false { + err = fmt.Errorf("Os stat dir: %s error: %s", dirPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("dirPath", dirPath), log.Error(err)) + return + } + } + // clear dir + if dirPath[len(dirPath)-1] == '/' { + dirPath = dirPath[:len(dirPath)-1] + } + dir, err := ioutil.ReadDir(dirPath) + for _, d := range dir { + tmpPath := path.Join([]string{dirPath, d.Name()}...) + os.RemoveAll(tmpPath) + } + return +} + */ + +func GenTfFile(dirPath string, sourceData *models.SourceTable, action string, resourceId string, tfArguments map[string]interface{}) (tfFileContentStr string, err error) { + var tfFilePath string + tfFilePath = dirPath + "/" + sourceData.Name + ".tf.json" + + tfFileData := make(map[string]map[string]map[string]map[string]interface{}) + if action == "apply" { + tfFileData["resource"] = make(map[string]map[string]map[string]interface{}) + tfFileData["resource"][sourceData.Name] = make(map[string]map[string]interface{}) + tfFileData["resource"][sourceData.Name][resourceId] = tfArguments + } else { + tfFileData["data"] = make(map[string]map[string]map[string]interface{}) + tfFileData["data"][sourceData.Name] = make(map[string]map[string]interface{}) + if resourceId == "" { + resourceId = "_" + guid.CreateGuid() + } + tfFileData["data"][sourceData.Name][resourceId] = tfArguments + } + + tfFileContent, err := json.Marshal(tfFileData) + err = GenFile((tfFileContent), tfFilePath) + if err != nil { + err = fmt.Errorf("Gen tfFile: %s error: %s", tfFilePath, err.Error()) + log.Logger.Error("Gen tfFile error", log.String("tfFilePath", tfFilePath), log.Error(err)) + return + } + tfFileContentStr = string(tfFileContent) + return +} + +func GenProviderFile(dirPath string, providerData *models.ProviderTable, providerInfo *models.ProviderInfoTable, regionData *models.ResourceDataTable) (err error) { + /* + providerFileData := make(map[string]map[string]map[string]interface{}) + providerFileData["provider"] = make(map[string]map[string]interface{}) + providerFileData["provider"][providerData.Name] = make(map[string]interface{}) + providerFileData["provider"][providerData.Name][providerData.SecretIdAttrName] = providerInfo.SecretId + providerFileData["provider"][providerData.Name][providerData.SecretKeyAttrName] = providerInfo.SecretKey + providerFileData["provider"][providerData.Name][providerData.RegionAttrName] = regionData.ResourceAssetId + */ + // provider + providerContentData := make(map[string]map[string]interface{}) + providerContentData[providerData.Name] = make(map[string]interface{}) + providerContentData[providerData.Name][providerData.SecretIdAttrName] = providerInfo.SecretId + providerContentData[providerData.Name][providerData.SecretKeyAttrName] = providerInfo.SecretKey + providerContentData[providerData.Name][providerData.RegionAttrName] = regionData.ResourceAssetId + + // terraform + terraformData := make(map[string]map[string]map[string]interface{}) + terraformData["required_providers"] = make(map[string]map[string]interface{}) + terraformData["required_providers"][providerData.Name] = make(map[string]interface{}) + terraformData["required_providers"][providerData.Name]["source"] = providerData.NameSpace + "/" + providerData.Name + terraformData["required_providers"][providerData.Name]["version"] = providerData.Version + + providerFileData := make(map[string]interface{}) + providerFileData["terraform"] = terraformData + providerFileData["provider"] = providerContentData + + providerFileContent, err := json.Marshal(providerFileData) + if err != nil { + err = fmt.Errorf("Marshal providerFileData error: %s", err.Error()) + log.Logger.Error("Marshal providerFileData error", log.Error(err)) + return + } + providerFilePath := dirPath + "/provider.tf.json" + err = GenFile(providerFileContent, providerFilePath) + if err != nil { + err = fmt.Errorf("Gen providerFile: %s error: %s", providerFilePath, err.Error()) + log.Logger.Error("Gen providerFile error", log.String("providerFilePath", providerFilePath), log.Error(err)) + return + } + return +} + +func GenVersionFile(dirPath string, providerData *models.ProviderTable) (err error) { + return + terraformFilePath := models.Config.TerraformFilePath + if terraformFilePath[len(terraformFilePath)-1] != '/' { + terraformFilePath += "/" + } + if providerData.Name == "tencentcloud" { + versionTfFilePath := terraformFilePath + "versiontf/" + providerData.Name + "/version.tf" + versionTfFileContent, tmpErr := ReadFile(versionTfFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read versionTfFile: %s error: %s", versionTfFilePath, tmpErr.Error()) + log.Logger.Error("Read versionTfFile error", log.String("versionTfFilePath", versionTfFilePath), log.Error(err)) + return + } + + genVersionTfFilePath := dirPath + "/version.tf" + err = GenFile(versionTfFileContent, genVersionTfFilePath) + if err != nil { + err = fmt.Errorf("Gen versionTfFile: %s error: %s", genVersionTfFilePath, err.Error()) + log.Logger.Error("Gen versionTfFile error", log.String("genVersionTfFilePath", genVersionTfFilePath), log.Error(err)) + return + } + } + return +} + +func GenTerraformProviderSoftLink(dirPath string, providerData *models.ProviderTable) (err error) { + // targetTerraformProviderPath := dirPath + "/" + models.TerraformProviderPathDiffMap[providerData.Name] + providerData.Version + "/" + models.Config.TerraformProviderOsArch + // targetTerraformProviderPath := dirPath + "/" + models.TerraformProviderPathDiffMap[providerData.Name] + providerData.Version + targetTerraformProviderPath := dirPath + "/" + models.TerraformProviderPathDiffMap[providerData.Name] + providerData.NameSpace + "/" + providerData.Name + "/" + providerData.Version + + terraformFilePath := models.Config.TerraformFilePath + if terraformFilePath[len(terraformFilePath)-1] != '/' { + terraformFilePath += "/" + } + + _, err = os.Stat(targetTerraformProviderPath) + if err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(targetTerraformProviderPath, os.ModePerm) + if err != nil { + err = fmt.Errorf("Make dir: %s error: %s", dirPath, err.Error()) + log.Logger.Error("Make dir error", log.String("dirPath", dirPath), log.Error(err)) + return + } + terraformProviderPath := terraformFilePath + "providers/" + providerData.Name + "/" + providerData.Version + "/" + models.Config.TerraformProviderOsArch + err = os.Symlink(terraformProviderPath, targetTerraformProviderPath+"/"+models.Config.TerraformProviderOsArch) + if err != nil { + err = fmt.Errorf("Make soft link : %s error: %s", targetTerraformProviderPath, err.Error()) + log.Logger.Error("Make soft link error", log.String("softLink", targetTerraformProviderPath), log.Error(err)) + return + } + } else { + err = fmt.Errorf("Os stat dir: %s error: %s", targetTerraformProviderPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("targetTerraformProviderPath", targetTerraformProviderPath), log.Error(err)) + return + } + } + return +} + +func GenTerraformLockHclSoftLink(dirPath string, providerData *models.ProviderTable) (err error) { + targetTerraformLockHclPath := dirPath + "/.terraform.lock.hcl" + terraformFilePath := models.Config.TerraformFilePath + if terraformFilePath[len(terraformFilePath)-1] != '/' { + terraformFilePath += "/" + } + _, err = os.Stat(targetTerraformLockHclPath) + if err != nil { + if os.IsNotExist(err) { + terraformLockHclPath := terraformFilePath + "providers/" + providerData.Name + "/" + providerData.Version + "/" + models.Config.TerraformProviderOsArch + "_hcl" + "/.terraform.lock.hcl" + err = os.Symlink(terraformLockHclPath, targetTerraformLockHclPath) + if err != nil { + err = fmt.Errorf("Make soft link : %s error: %s", targetTerraformLockHclPath, err.Error()) + log.Logger.Error("Make soft link error", log.String("softLink", targetTerraformLockHclPath), log.Error(err)) + return + } + } else { + err = fmt.Errorf("Os stat dir: %s error: %s", targetTerraformLockHclPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("targetTerraformLockHclPath", targetTerraformLockHclPath), log.Error(err)) + return + } + } + return +} + +func DelProviderFile(dirPath string) (err error) { + providerFilePath := dirPath + "/provider.tf.json" + err = DelFile(providerFilePath) + if err != nil { + err = fmt.Errorf("Delete provider.tf.json file:%s error:%s", providerFilePath, err.Error()) + log.Logger.Error("Delete provider.tf.json file error", log.String("providerFilePath", providerFilePath), log.Error(err)) + return + } + return +} + +func DelTfstateFile(dirPath string) (err error) { + tfstateFilePath := dirPath + "/terraform.tfstate" + err = DelFile(tfstateFilePath) + if err != nil { + err = fmt.Errorf("Delete terraform.tfstate file:%s error:%s", tfstateFilePath, err.Error()) + log.Logger.Error("Delete terraform.tfstate file error", log.String("providerFilePath", tfstateFilePath), log.Error(err)) + return + } + // delete the terraform.tfstate.backup + tfstateFilePath = dirPath + "/terraform.tfstate.backup" + err = DelFile(tfstateFilePath) + if err != nil { + err = fmt.Errorf("Delete terraform.tfstate.backup file:%s error:%s", tfstateFilePath, err.Error()) + log.Logger.Error("Delete terraform.tfstate.backup file error", log.String("providerFilePath", tfstateFilePath), log.Error(err)) + return + } + return +} + +func GenWorkDirPath(resourceId string, + requestSn string, + requestId string, + providerData *models.ProviderTable, + regionData *models.ResourceDataTable, + plugin string, + sourceData *models.SourceTable) (workDirPath string) { + + terraformFilePath := models.Config.TerraformFilePath + if terraformFilePath[len(terraformFilePath)-1] != '/' { + terraformFilePath += "/" + } + dirPathResourceId := resourceId + if dirPathResourceId == "" { + dirPathResourceId = requestSn + } + workDirPath = terraformFilePath + providerData.Name + "/" + regionData.ResourceAssetId + "/" + plugin + "/" + + requestId + "/" + dirPathResourceId + "/" + sourceData.Name + return +} + +func execRemoteWithTimeout(cmdStr []string, timeOut int) (out string, err error) { + if len(cmdStr) == 0 { + err = fmt.Errorf("cmdStr can not be empty") + return + } + cmdStr = append([]string{"-c"}, cmdStr...) + doneChan := make(chan string) + // defer close(doneChan) + + tmpCmd := exec.Command(models.BashCmd, cmdStr...) + tmpCmd.SysProcAttr = &syscall.SysProcAttr{Setpgid: true} + go func(a chan string, ct *exec.Cmd) { + /* + b, err := ct.Output() + if err != nil { + a <- "error:" + err.Error() + } + a <- string(b) + */ + var stdout, stderr bytes.Buffer + ct.Stdout = &stdout + ct.Stderr = &stderr + cmdErr := ct.Run() + if cmdErr != nil { + a <- "error:" + string(stderr.Bytes()) + } else { + a <- string(stdout.Bytes()) + } + }(doneChan, tmpCmd) + select { + case tmpVal := <-doneChan: + out = tmpVal + case <-time.After(time.Duration(timeOut) * time.Second): + out = fmt.Sprintf("error: %s timeout %d(s)", cmdStr, timeOut) + syscall.Kill(-tmpCmd.Process.Pid, syscall.SIGKILL) + } + if strings.HasPrefix(out, "error:") { + err = fmt.Errorf(out) + } + return +} + +func TerraformImport(dirPath, address, resourceAssetId string) (err error) { + cmdStr := models.Config.TerraformCmdPath + " -chdir=" + dirPath + " import " + address + " " + resourceAssetId + /* + cmd := exec.Command(models.BashCmd, "-c", cmdStr) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmdErr := cmd.Run() + */ + _, cmdErr := execRemoteWithTimeout([]string{cmdStr}, models.CommandTimeOut) + if cmdErr != nil { + // outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + // outPutStr := string(stderr.Bytes()) + outPutStr := cmdErr.Error() + errorMsgRegx := regexp.MustCompile(`Error: ([\S\s]*)`) + errorMsg := errorMsgRegx.FindStringSubmatch(outPutStr) + errMsg := "Error:" + for i := 1; i < len(errorMsg); i++ { + errMsg += " " + errMsg += errorMsg[i] + } + colorsCharRegx := regexp.MustCompile(`\[\d+m`) + outPutErrMsg := colorsCharRegx.ReplaceAllLiteralString(errMsg, "") + err = fmt.Errorf("Cmd:%s run failed: %s, ErrorMsg: %s", cmdStr, cmdErr.Error(), outPutErrMsg) + log.Logger.Error("Cmd run failed", log.String("cmd", cmdStr), log.String("Error: ", outPutErrMsg), log.Error(cmdErr)) + return + } + return +} + +func TerraformPlan(dirPath string) (destroyCnt int, err error) { + // cmdStr := models.Config.TerraformCmdPath + " -chdir=" + dirPath + " plan -input=false -out=" + dirPath + "/planfile" + cmdStr := models.Config.TerraformCmdPath + " -chdir=" + dirPath + " plan -input=false" + /* + cmd := exec.Command(models.BashCmd, "-c", cmdStr) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmdErr := cmd.Run() + */ + output, cmdErr := execRemoteWithTimeout([]string{cmdStr}, models.CommandTimeOut) + if cmdErr != nil { + // outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + // outPutStr := string(stderr.Bytes()) + outPutStr := cmdErr.Error() + errorMsgRegx := regexp.MustCompile(`Error: ([\S\s]*)`) + errorMsg := errorMsgRegx.FindStringSubmatch(outPutStr) + errMsg := "Error:" + for i := 1; i < len(errorMsg); i++ { + errMsg += " " + errMsg += errorMsg[i] + } + colorsCharRegx := regexp.MustCompile(`\[\d+m`) + outPutErrMsg := colorsCharRegx.ReplaceAllLiteralString(errMsg, "") + err = fmt.Errorf("Cmd:%s run failed: %s, ErrorMsg: %s", cmdStr, cmdErr.Error(), outPutErrMsg) + log.Logger.Error("Cmd run failed", log.String("cmd", cmdStr), log.String("Error: ", outPutErrMsg), log.Error(cmdErr)) + return + } + // outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + filePath := dirPath + "/planfile" + err = GenFile([]byte(output), filePath) + if err != nil { + err = fmt.Errorf("Write planfile error:%s", err.Error()) + log.Logger.Error("Write planfile error", log.String("planfile", filePath), log.Error(err)) + return + } + + planFile, err := os.Open(filePath) + if err != nil { + err = fmt.Errorf("Open planfile error:%s", err.Error()) + log.Logger.Error("Open planfile error", log.String("planfile", filePath), log.Error(err)) + return + } + defer planFile.Close() + + // 每行读取 + scanner := bufio.NewScanner(planFile) + for scanner.Scan() { + if strings.Contains(scanner.Text(), "Plan:") && strings.Contains(scanner.Text(), "to add") && strings.Contains(scanner.Text(), "to change") && strings.Contains(scanner.Text(), "to destroy") { + // 获取 to destroy 前面的数值 + planStr := scanner.Text() + idx := strings.Index(planStr, "to destroy") + sIdx, eIdx := -1, -1 + for idx >= 0 { + if planStr[idx] >= '0' && planStr[idx] <= '9' { + break + } + eIdx = idx + idx-- + } + for idx >= 0 { + if planStr[idx] >= '0' && planStr[idx] <= '9' { + sIdx = idx + idx-- + } else { + break + } + } + if sIdx != -1 && eIdx != -1 { + destroyNumStr := planStr[sIdx:eIdx] + destroyCnt, err = strconv.Atoi(destroyNumStr) + if err != nil { + err = fmt.Errorf("Plan text:%s error", planStr) + log.Logger.Error("Plan text error", log.String("planText", planStr), log.Error(err)) + } + } else { + err = fmt.Errorf("Plan text:%s error", planStr) + log.Logger.Error("Plan text error", log.String("planText", planStr), log.Error(err)) + } + return + } + } + return +} + +func TerraformApply(dirPath string) (err error) { + cmdStr := models.Config.TerraformCmdPath + " -chdir=" + dirPath + " apply -auto-approve" + /* + cmd := exec.Command(models.BashCmd, "-c", cmdStr) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmdErr := cmd.Run() + */ + _, cmdErr := execRemoteWithTimeout([]string{cmdStr}, models.CommandTimeOut) + if cmdErr != nil { + // outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + // outPutStr := string(stderr.Bytes()) + outPutStr := cmdErr.Error() + errorMsgRegx := regexp.MustCompile(`Error: ([\S\s]*)`) + errorMsg := errorMsgRegx.FindStringSubmatch(outPutStr) + errMsg := "Error:" + for i := 1; i < len(errorMsg); i++ { + errMsg += " " + errMsg += errorMsg[i] + } + colorsCharRegx := regexp.MustCompile(`\[\d+m`) + outPutErrMsg := colorsCharRegx.ReplaceAllLiteralString(errMsg, "") + err = fmt.Errorf("Cmd:%s run failed: %s, ErrorMsg: %s", cmdStr, cmdErr.Error(), outPutErrMsg) + log.Logger.Error("Cmd run failed", log.String("cmd", cmdStr), log.String("Error: ", outPutErrMsg), log.Error(cmdErr)) + return + } + return +} + +func TerraformDestroy(dirPath string) (err error) { + cmdStr := models.Config.TerraformCmdPath + " -chdir=" + dirPath + " destroy -auto-approve" + /* + cmd := exec.Command(models.BashCmd, "-c", cmdStr) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmdErr := cmd.Run() + */ + _, cmdErr := execRemoteWithTimeout([]string{cmdStr}, models.CommandTimeOut) + if cmdErr != nil { + // outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + // outPutStr := string(stderr.Bytes()) + outPutStr := cmdErr.Error() + errorMsgRegx := regexp.MustCompile(`Error: ([\S\s]*)`) + errorMsg := errorMsgRegx.FindStringSubmatch(outPutStr) + errMsg := "Error:" + for i := 1; i < len(errorMsg); i++ { + errMsg += " " + errMsg += errorMsg[i] + } + colorsCharRegx := regexp.MustCompile(`\[\d+m`) + outPutErrMsg := colorsCharRegx.ReplaceAllLiteralString(errMsg, "") + err = fmt.Errorf("Cmd:%s run failed: %s, ErrorMsg: %s", cmdStr, cmdErr.Error(), outPutErrMsg) + log.Logger.Error("Cmd run failed", log.String("cmd", cmdStr), log.String("Error: ", outPutErrMsg), log.Error(cmdErr)) + return + } + return +} + +func TerraformInit(dirPath string) (err error) { + cmdStr := models.Config.TerraformCmdPath + " -chdir=" + dirPath + " init" + /* + cmd := exec.Command(models.BashCmd, "-c", cmdStr) + var stdout, stderr bytes.Buffer + cmd.Stdout = &stdout + cmd.Stderr = &stderr + cmdErr := cmd.Run() + */ + _, cmdErr := execRemoteWithTimeout([]string{cmdStr}, models.CommandTimeOut) + if cmdErr != nil { + // outStr, errStr := string(stdout.Bytes()), string(stderr.Bytes()) + // outPutStr := string(stderr.Bytes()) + outPutStr := cmdErr.Error() + errorMsgRegx := regexp.MustCompile(`Error: ([\S\s]*)`) + errorMsg := errorMsgRegx.FindStringSubmatch(outPutStr) + errMsg := "Error:" + for i := 1; i < len(errorMsg); i++ { + errMsg += " " + errMsg += errorMsg[i] + } + colorsCharRegx := regexp.MustCompile(`\[\d+m`) + outPutErrMsg := colorsCharRegx.ReplaceAllLiteralString(errMsg, "") + err = fmt.Errorf("Cmd:%s run failed: %s, ErrorMsg: %s", cmdStr, cmdErr.Error(), outPutErrMsg) + log.Logger.Error("Cmd run failed", log.String("cmd", cmdStr), log.String("Error: ", outPutErrMsg), log.Error(cmdErr)) + return + } + return +} + +func handleTerraformApplyOrQuery(reqParam map[string]interface{}, + sourceData *models.SourceTable, + providerData *models.ProviderTable, + providerInfo *models.ProviderInfoTable, + regionData *models.ResourceDataTable, + action string, plugin string, dirPath string, + interfaceData *models.InterfaceTable, + curDebugFileContent map[string]interface{}) (retOutput map[string]interface{}, err error) { + + retOutput = make(map[string]interface{}) + retOutput["callbackParameter"] = reqParam["callbackParameter"].(string) + retOutput["errorCode"] = "1" + retOutput["errorMessage"] = "" + + if plugin == "az" { + retData, tmpErr := handleAzQuery(reqParam, dirPath, providerData, providerInfo, regionData, sourceData) + if tmpErr != nil { + err = fmt.Errorf("Handle Az query error:%s", tmpErr.Error()) + log.Logger.Warn("Handle Az query error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + for k, v := range retData { + retOutput[k] = v + } + retOutput["errorCode"] = "0" + return + } + + // Get tf_argument_list by sourceId + sourceIdStr := sourceData.Id + sqlCmd := "SELECT * FROM tf_argument WHERE source IN ('" + sourceIdStr + "')" + var tfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd).Find(&tfArgumentList) + if err != nil { + err = fmt.Errorf("Get tf_argument list error:%s", err.Error()) + log.Logger.Error("Get tf_argument list error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + if len(tfArgumentList) == 0 { + err = fmt.Errorf("Tf_argument list can not be found by source:%s", sourceIdStr) + log.Logger.Warn("Tf_argument list can not be found by source", log.String("source", sourceIdStr), log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Get tfstate_attribute by sourceId + sqlCmd = "SELECT * FROM tfstate_attribute WHERE source IN ('" + sourceIdStr + "')" + var tfstateAttributeList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd).Find(&tfstateAttributeList) + if err != nil { + err = fmt.Errorf("Get tfstate_attribute list error:%s", err.Error()) + log.Logger.Error("Get tfstate_attribute list error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + if len(tfstateAttributeList) == 0 { + err = fmt.Errorf("Tfstate_attribute list can not be found by source:%s", sourceIdStr) + log.Logger.Warn("Tfstate_attribute list can not be found by source", log.String("source", sourceIdStr), log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Get parameter by interfaceId and type=out + sqlCmd = "SELECT * FROM parameter WHERE interface=? and type=?" + paramArgs := []interface{}{interfaceData.Id, "output"} + var outPutParameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(&outPutParameterList) + if err != nil { + err = fmt.Errorf("Get outPutParameter list error:%s", err.Error()) + log.Logger.Error("Get outPutParameter list error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + if len(outPutParameterList) == 0 { + err = fmt.Errorf("OutPutParameter can not be found by interface:%s and type=out", interfaceData.Id) + log.Logger.Warn("OutPutParameter can not be found by interface and type", log.String("interface", interfaceData.Id), log.String("type", "out"), log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + resourceId := "" + resourceAssetId := "" + + if _, ok := reqParam["id"].(string); ok { + resourceId = reqParam["id"].(string) + } + + if _, ok := reqParam["asset_id"].(string); ok { + resourceAssetId = reqParam["asset_id"].(string) + } + + var tfArguments map[string]interface{} + tfArguments, _, err = handleConvertParams(action, sourceData, tfArgumentList, reqParam, providerData, regionData) + if err != nil { + err = fmt.Errorf("HandleConvertParams error:%s", err.Error()) + log.Logger.Warn("HandleConvertParams error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Gen the terraform workdir + err = GenDir(dirPath) + if err != nil { + err = fmt.Errorf("Gen the terraform workdir: %s error: %s", dirPath, err.Error()) + log.Logger.Error("Gen the terraform workdir error", log.String("dirPath", dirPath), log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Gen tf.json file + var tfFileContentStr string + tfFileContentStr, err = GenTfFile(dirPath, sourceData, action, resourceId, tfArguments) + if err != nil { + err = fmt.Errorf("Gen tfFile error: %s", err.Error()) + log.Logger.Error("Gen tfFile error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Gen provider.tf.json + err = GenProviderFile(dirPath, providerData, providerInfo, regionData) + if err != nil { + err = fmt.Errorf("Gen providerFile error: %s", err.Error()) + log.Logger.Error("Gen providerFile error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Gen version.tf + err = GenVersionFile(dirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen versionFile error: %s", err.Error()) + log.Logger.Error("Gen versionFile error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Gen softlink of terraform provider file + err = GenTerraformProviderSoftLink(dirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen terraform provider soft link error: %s", err.Error()) + log.Logger.Error("Gen terraform provider soft link error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Gen soft link for .terraform.lock.hcl + err = GenTerraformLockHclSoftLink(dirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen terraform lock soft link error: %s", err.Error()) + log.Logger.Error("Gen terraform lock soft link error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // test + ///* + // terraform init + err = TerraformInit(dirPath) + if err != nil { + err = fmt.Errorf("Do TerraformInit error:%s", err.Error()) + log.Logger.Error("Do TerraformInit error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + canDoImport := true + if providerData.Name == "tencentcloud" && plugin == "security_rule" { + canDoImport = false + } + + if resourceAssetId != "" && action == "apply" && canDoImport == true { + // terraform import when assetId has value in apply + err = TerraformImport(dirPath, sourceData.Name+"."+resourceId, resourceAssetId) + if err != nil { + err = fmt.Errorf("Do TerraformImport error:%s", err.Error()) + retOutput["errorMessage"] = err.Error() + return + } + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // resource_data debug mode, get the terraform.state file after terraform import + tfstateFilePath := dirPath + "/terraform.tfstate" + tfstateFileData, tmpErr := ReadFile(tfstateFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read import_tfstate file error:%s", tmpErr.Error()) + log.Logger.Error("Read import_tfstate file error", log.Error(err)) + // retOutput["errorMessage"] = err.Error() + return + } + tfstateFileContentStr := string(tfstateFileData) + curDebugFileContent["tf_state_import"] = tfstateFileContentStr + } + } + + // terraform plan + destroyCnt, err := TerraformPlan(dirPath) + if err != nil { + err = fmt.Errorf("Do TerraformPlan error:%s", err.Error()) + log.Logger.Error("Do TerraformPlan error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // resource_data debug mode, get the plan file after terraform plan + planFilePath := dirPath + "/planfile" + planFileData, tmpErr := ReadFile(planFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read plan file error:%s", tmpErr.Error()) + log.Logger.Error("Read plan file error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + planFileContentStr := string(planFileData) + curDebugFileContent["plan_message"] = planFileContentStr + } + + if destroyCnt > 0 && reqParam["confirmToken"] != "Y" { + // 二次确认 + destroyCntStr := strconv.Itoa(destroyCnt) + retOutput["errorMessage"] = destroyCntStr + " resource(s) will be destroy, please confirm again!" + return + } + + // terraform apply + err = TerraformApply(dirPath) + if err != nil { + err = fmt.Errorf("Do TerraformApply error:%s", err.Error()) + log.Logger.Error("Do TerraformApply error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + //*/ + isInternalAction := false + // handle tfstate output + err = handleTfstateOutPut(sourceData, + interfaceData, + reqParam, + regionData, + providerData, + action, + dirPath, + tfFileContentStr, + resourceId, + retOutput, + curDebugFileContent, + isInternalAction) + + // delete provider.tf.json + err = DelProviderFile(dirPath) + if err != nil { + err = fmt.Errorf("Delete provider.tf.json file error:%s", err.Error()) + log.Logger.Error("Delete provider.tf.json file error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + retOutput["errorCode"] = "0" + return +} + +func handleOutPutArgs(outPutArgs map[string]interface{}, + outPutParameterNameMap map[string]*models.ParameterTable, + tfstateAttrParamMap map[string]*models.TfstateAttributeTable, + reqParam map[string]interface{}, + isInternalAction bool) (outPutResultList []map[string]interface{}, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("HandleOutPutArgs error, error:%v", r) + } + }() + outPutResultList = []map[string]interface{}{} + // flat outPutParam data + flatOutPutArgs, hasTerraformOutPutPrefix, _ := handleFlatOutPutParam(outPutArgs) + + // delete the item that id is nil or nil []interface{} + tmpOutPutResultList := []map[string]interface{}{} + for i := range flatOutPutArgs { + /* + if _, ok := flatOutPutArgs[i]["id"]; ok && flatOutPutArgs[i]["id"] == nil { + flatOutPutArgs[i]["id"] = "" + } + if isResultIdValid(flatOutPutArgs[i]["id"]) == false { + continue + } + */ + tmpOutPutResultList = append(tmpOutPutResultList, flatOutPutArgs[i]) + } + + var mapOutPutArgs []map[string]interface{} + if hasTerraformOutPutPrefix { + // 将数组类型的值进行一一映射 + mapOutPutArgs, _ = handleSliceMapOutPutParam(tmpOutPutResultList) + } else { + mapOutPutArgs = tmpOutPutResultList + } + + for i := range mapOutPutArgs { + if !isInternalAction { + for k, v := range outPutParameterNameMap { + if _, okParam := tfstateAttrParamMap[v.Id]; !okParam { + if _, ok := reqParam[k]; ok { + mapOutPutArgs[i][k] = reqParam[k] + } + } + } + } + outPutResultList = append(outPutResultList, mapOutPutArgs[i]) + } + + // construct the object + + return +} + +func isResultIdValid(outPutIdVal interface{}) bool { + if outPutIdVal == nil { + return false + } + if _, ok := outPutIdVal.([]interface{}); ok { + tmpVal := outPutIdVal.([]interface{}) + if len(tmpVal) == 0 { + return false + } + for i := range tmpVal { + if isResultIdValid(tmpVal[i]) == false { + return false + } + } + } + return true +} + +func handleFlatOutPutParam(outPutArgs map[string]interface{}) (retOutPutArgs []map[string]interface{}, hasTerraformOutPutPrefix bool, err error) { + flatParams := make(map[string]interface{}) + for k := range outPutArgs { + if strings.Contains(k, models.TerraformOutPutPrefix) == false { + flatParams[k] = outPutArgs[k] + } + } + hasResultList := false + for k, v := range outPutArgs { + if strings.Contains(k, models.TerraformOutPutPrefix) { + hasResultList = true + var tmpData []map[string]interface{} + tmpMarshal, _ := json.Marshal(v) + json.Unmarshal(tmpMarshal, &tmpData) + for i := range tmpData { + ret, _, _ := handleFlatOutPutParam(tmpData[i]) + for j := range ret { + for fp := range flatParams { + ret[j][fp] = flatParams[fp] + } + retOutPutArgs = append(retOutPutArgs, ret[j]) + } + } + } + } + if !hasResultList { + retOutPutArgs = append(retOutPutArgs, flatParams) + } + return +} + +func handleSliceMapOutPutParam(outPutArgs []map[string]interface{}) (retOutPutArgs []map[string]interface{}, err error) { + for _, retArg := range outPutArgs { + sliceKeys := make(map[string]interface{}) + nonSliceKeys := make(map[string]interface{}) + + cnt := 0 + for k, v := range retArg { + if _, ok := v.([]interface{}); ok /*&& len(v.([]interface{})) > 0*/ { + sliceKeys[k] = v + cnt = len(v.([]interface{})) + } else { + nonSliceKeys[k] = v + } + } + + if cnt == 0 { + retOutPutArgs = append(retOutPutArgs, nonSliceKeys) + continue + } + + for i := 0; i < cnt; i++ { + curResult := make(map[string]interface{}) + hasSliceVal := false + for k1, v1 := range sliceKeys { + curResult[k1] = v1.([]interface{})[i] + if _, ok := curResult[k1].([]interface{}); ok /*&& len(curResult[k1].([]interface{})) > 0*/ { + hasSliceVal = true + } + } + for k1, v1 := range nonSliceKeys { + curResult[k1] = v1 + } + if hasSliceVal { + tmpInput := []map[string]interface{}{curResult} + tmpRet, _ := handleSliceMapOutPutParam(tmpInput) + retOutPutArgs = append(retOutPutArgs, tmpRet...) + } else { + retOutPutArgs = append(retOutPutArgs, curResult) + } + } + } + return +} + +func handleAzQuery(reqParam map[string]interface{}, + workDirPath string, + providerData *models.ProviderTable, + providerInfo *models.ProviderInfoTable, + regionData *models.ResourceDataTable, + sourceData *models.SourceTable) (rowData map[string]interface{}, err error) { + rowData = make(map[string]interface{}) + _, err = os.Stat(workDirPath) + if err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(workDirPath, os.ModePerm) + if err != nil { + err = fmt.Errorf("Make dir: %s error: %s", workDirPath, err.Error()) + log.Logger.Error("Make dir error", log.String("workDirPath", workDirPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } else { + err = fmt.Errorf("Os stat dir: %s error: %s", workDirPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("workDirPath", workDirPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } + + // Gen provider.tf.json + providerFileData := make(map[string]map[string]map[string]interface{}) + providerFileData["provider"] = make(map[string]map[string]interface{}) + providerFileData["provider"][providerData.Name] = make(map[string]interface{}) + providerFileData["provider"][providerData.Name][providerData.SecretIdAttrName] = providerInfo.SecretId + providerFileData["provider"][providerData.Name][providerData.SecretKeyAttrName] = providerInfo.SecretKey + providerFileData["provider"][providerData.Name][providerData.RegionAttrName] = regionData.ResourceAssetId + + providerFileContent, tmpErr := json.Marshal(providerFileData) + if tmpErr != nil { + err = fmt.Errorf("Marshal providerFileData error: %s", tmpErr.Error()) + log.Logger.Error("Marshal providerFileData error", log.Error(err)) + return + } + providerFilePath := workDirPath + "/provider.tf.json" + err = GenFile(providerFileContent, providerFilePath) + if err != nil { + err = fmt.Errorf("Gen providerFile: %s error: %s", providerFilePath, err.Error()) + log.Logger.Error("Gen providerFile error", log.String("providerFilePath", providerFilePath), log.Error(err)) + return + } + + // Gen version.tf + terraformFilePath := models.Config.TerraformFilePath + if terraformFilePath[len(terraformFilePath)-1] != '/' { + terraformFilePath += "/" + } + if providerData.Name == "tencentcloud" { + versionTfFilePath := terraformFilePath + "versiontf/" + providerData.Name + "/version.tf" + versionTfFileContent, tmpErr := ReadFile(versionTfFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read versionTfFile: %s error: %s", versionTfFilePath, tmpErr.Error()) + log.Logger.Error("Read versionTfFile error", log.String("versionTfFilePath", versionTfFilePath), log.Error(err)) + return + } + + genVersionTfFilePath := workDirPath + "/version.tf" + err = GenFile(versionTfFileContent, genVersionTfFilePath) + if err != nil { + err = fmt.Errorf("Gen versionTfFile: %s error: %s", genVersionTfFilePath, err.Error()) + log.Logger.Error("Gen versionTfFile error", log.String("genVersionTfFilePath", genVersionTfFilePath), log.Error(err)) + return + } + } + + // Gen softlink of terraform provider file + // targetTerraformProviderPath := workDirPath + "/" + models.TerraformProviderPathDiffMap[providerData.Name] + providerData.Version + "/" + models.Config.TerraformProviderOsArch + // targetTerraformProviderPath := workDirPath + "/" + models.TerraformProviderPathDiffMap[providerData.Name] + providerData.Version + targetTerraformProviderPath := workDirPath + "/" + models.TerraformProviderPathDiffMap[providerData.Name] + providerData.NameSpace + "/" + providerData.Name + "/" + providerData.Version + + _, err = os.Stat(targetTerraformProviderPath) + if err != nil { + if os.IsNotExist(err) { + err = os.MkdirAll(targetTerraformProviderPath, os.ModePerm) + if err != nil { + err = fmt.Errorf("Make dir: %s error: %s", workDirPath, err.Error()) + log.Logger.Error("Make dir error", log.String("dirPath", workDirPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + terraformProviderPath := terraformFilePath + "providers/" + providerData.Name + "/" + providerData.Version + "/" + models.Config.TerraformProviderOsArch + err = os.Symlink(terraformProviderPath, targetTerraformProviderPath+"/"+models.Config.TerraformProviderOsArch) + if err != nil { + err = fmt.Errorf("Make soft link : %s error: %s", targetTerraformProviderPath, err.Error()) + log.Logger.Error("Make soft link error", log.String("softLink", targetTerraformProviderPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } else { + err = fmt.Errorf("Os stat dir: %s error: %s", targetTerraformProviderPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("targetTerraformProviderPath", targetTerraformProviderPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } + // Gen soft link for .terraform.lock.hcl + targetTerraformLockHclPath := workDirPath + "/.terraform.lock.hcl" + _, err = os.Stat(targetTerraformLockHclPath) + if err != nil { + if os.IsNotExist(err) { + terraformLockHclPath := terraformFilePath + "providers/" + providerData.Name + "/" + providerData.Version + "/" + models.Config.TerraformProviderOsArch + "_hcl" + "/.terraform.lock.hcl" + err = os.Symlink(terraformLockHclPath, targetTerraformLockHclPath) + if err != nil { + err = fmt.Errorf("Make soft link : %s error: %s", targetTerraformLockHclPath, err.Error()) + log.Logger.Error("Make soft link error", log.String("softLink", targetTerraformLockHclPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } else { + err = fmt.Errorf("Os stat dir: %s error: %s", targetTerraformLockHclPath, err.Error()) + log.Logger.Error("Os stat dir error", log.String("targetTerraformLockHclPath", targetTerraformLockHclPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } + + sourceName := sourceData.Name + // Gen .tf 文件, 然后执行 terraform import cmd + uuid := "_" + guid.CreateGuid() + tfFilePath := workDirPath + "/" + sourceName + ".tf" + tfFileContent := "data " + sourceName + " " + uuid + " {}" + + GenFile([]byte(tfFileContent), tfFilePath) + err = TerraformInit(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformInit error:%s", err.Error()) + log.Logger.Error("Do TerraformInit error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + err = TerraformApply(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformApply error:%s", err.Error()) + log.Logger.Error("Do TerraformApply error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Read terraform.tfstate 文件 + var tfstateFilePath string + tfstateFilePath = workDirPath + "/terraform.tfstate" + tfstateFileData, err := ReadFile(tfstateFilePath) + if err != nil { + err = fmt.Errorf("Read tfstate file error:%s", err.Error()) + log.Logger.Error("Read tfstate file error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + //tfstateFileContentStr := string(tfstateFileData) + var unmarshalTfstateFileData models.TfstateFileData + err = json.Unmarshal(tfstateFileData, &unmarshalTfstateFileData) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + var tfstateFileAttributes map[string]interface{} + tfstateFileAttributes = unmarshalTfstateFileData.Resources[0].Instances[0].Attributes + rowData["az"] = tfstateFileAttributes["zones"] + + // Del provider file + err = DelFile(providerFilePath) + if err != nil { + err = fmt.Errorf("Do delete provider file error: %s", err.Error()) + log.Logger.Error("Do delete provider file error", log.Error(err)) + rowData["errorMessage"] = err.Error() + } + return +} + +func RegionApply(reqParam map[string]interface{}, interfaceData *models.InterfaceTable) (rowData map[string]interface{}, err error) { + rowData = make(map[string]interface{}) + rowData["callbackParameter"] = reqParam["callbackParameter"].(string) + rowData["errorCode"] = "1" + rowData["errorMessage"] = "" + + providerInfoId := reqParam["provider_info"].(string) + // Get providerInfo data + // sqlCmd := `SELECT * FROM provider_info WHERE id=?` + sqlCmd := `SELECT * FROM provider_info WHERE name=?` + paramArgs := []interface{}{providerInfoId} + var providerInfoList []*models.ProviderInfoTable + err = x.SQL(sqlCmd, paramArgs...).Find(&providerInfoList) + if err != nil { + err = fmt.Errorf("Get providerInfo by id:%s error:%s", providerInfoId, err.Error()) + log.Logger.Error("Get providerInfo by id error", log.String("id", providerInfoId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(providerInfoList) == 0 { + err = fmt.Errorf("ProviderInfo can not be found by id:%s", providerInfoId) + log.Logger.Warn("ProviderInfo can not be found by id", log.String("id", providerInfoId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + providerInfoData := providerInfoList[0] + + // Get provider data + sqlCmd = `SELECT * FROM provider WHERE id=?` + paramArgs = []interface{}{providerInfoData.Provider} + var providerList []*models.ProviderTable + err = x.SQL(sqlCmd, paramArgs...).Find(&providerList) + if err != nil { + err = fmt.Errorf("Get provider by id:%s error:%s", providerInfoData.Provider, err.Error()) + log.Logger.Error("Get provider by id error", log.String("id", providerInfoData.Provider), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(providerList) == 0 { + err = fmt.Errorf("Provider can not be found by id:%s", providerInfoData.Provider) + log.Logger.Warn("Provider can not be found by id", log.String("id", providerInfoData.Provider), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + providerData := providerList[0] + + // get source data by interfaceId and providerId + sqlCmd = `SELECT * FROM source WHERE interface=? AND provider=?` + paramArgs = []interface{}{interfaceData.Id, providerData.Id} + var sourceList []*models.SourceTable + err = x.SQL(sqlCmd, paramArgs...).Find(&sourceList) + if err != nil { + err = fmt.Errorf("Get source by interface:%s and provider:%s error:%s", interfaceData.Id, providerData.Id, err.Error()) + log.Logger.Error("Get source by interface and provider error", log.String("interface", interfaceData.Id), log.String("provider", providerData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(sourceList) == 0 { + err = fmt.Errorf("Provider can not be found by interface:%s and provider:%s", interfaceData.Id, providerData.Id) + log.Logger.Warn("Provider can not be found by interface and provider", log.String("interface", interfaceData.Id), log.String("provider", providerData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + sourceData := sourceList[0] + + uuid := guid.CreateGuid() + createTime := time.Now().Format(models.DateTimeFormat) + resourceId := reqParam["id"].(string) + resourceAssetId := reqParam["asset_id"].(string) + createUser := reqParam["operator_user"].(string) + tfFile := fmt.Sprintf("{\"resource\":{\"%s\":{\"%s\":{\"name\":\"%s\"}}}}", sourceData.Name, resourceId, resourceId) + tfstateFile := fmt.Sprintf("{\"resources\":{\"instances\":{\"attributes\":{\"name\":\"%s\"}}}}", resourceAssetId) + + // get old resource data + var oldResourceDataList []*models.ResourceDataTable + sqlCmd = "SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = "SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + } + + paramArgs = []interface{}{sourceData.Id, resourceId, resourceId, resourceAssetId} + err = x.SQL(sqlCmd, paramArgs...).Find(&oldResourceDataList) + if err != nil { + err = fmt.Errorf("Get old_resource data by resource:%s and resource_id:%s error: %s", sourceData.Id, resourceId, err.Error()) + log.Logger.Error("Get old_resource_data by resource and resource_id error", log.String("resource", sourceData.Id), log.String("resource_id", resourceId), log.Error(err)) + return + } + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + if len(oldResourceDataList) == 0 { + _, err = x.Exec("INSERT INTO resource_data_debug(id,resource,resource_id,resource_asset_id,tf_file,tf_state_file,region_id,create_time,create_user,update_time,update_user) VALUE (?,?,?,?,?,?,?,?,?,?,?)", + uuid, sourceData.Id, resourceId, resourceAssetId, tfFile, tfstateFile, resourceId, createTime, createUser, createTime, createUser) + } else { + //err = fmt.Errorf("the region:%s is existed", resourceId) + } + } else { + if len(oldResourceDataList) == 0 { + _, err = x.Exec("INSERT INTO resource_data(id,resource,resource_id,resource_asset_id,tf_file,tf_state_file,region_id,create_time,create_user,update_time,update_user) VALUE (?,?,?,?,?,?,?,?,?,?,?)", + uuid, sourceData.Id, resourceId, resourceAssetId, tfFile, tfstateFile, resourceId, createTime, createUser, createTime, createUser) + } else { + //err = fmt.Errorf("the region:%s is existed", resourceId) + } + } + + if err != nil { + err = fmt.Errorf("Try to create resource_data fail,%s ", err.Error()) + log.Logger.Error("Try to create resource_data fail", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + rowData["errorCode"] = "0" + rowData["asset_id"] = resourceAssetId + rowData["id"] = resourceId + return +} + +func handleApplyOrQuery(action string, reqParam map[string]interface{}, sourceData *models.SourceTable, regionData *models.ResourceDataTable) (rowData map[string]interface{}, err error) { + rowData = make(map[string]interface{}) + rowData["callbackParameter"] = reqParam["callbackParameter"].(string) + rowData["errorCode"] = "1" + rowData["errorMessage"] = "" + + if action == "apply" { + uuid := guid.CreateGuid() + createTime := time.Now().Format(models.DateTimeFormat) + resourceId := reqParam["id"].(string) + resourceAssetId := reqParam["asset_id"].(string) + createUser := reqParam["operator_user"].(string) + regionId := reqParam["region_id"].(string) + tfFile := fmt.Sprintf("{\"resource\":{\"%s\":{\"%s\":{\"name\":\"%s\"}}}}", sourceData.Name, resourceId, resourceId) + tfstateFile := fmt.Sprintf("{\"resources\":{\"instances\":{\"attributes\":{\"name\":\"%s\"}}}}", resourceAssetId) + + // get old resource data + var oldResourceDataList []*models.ResourceDataTable + sqlCmd := "SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = "SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + } + + paramArgs := []interface{}{sourceData.Id, resourceId, regionData.RegionId, resourceAssetId} + err = x.SQL(sqlCmd, paramArgs...).Find(&oldResourceDataList) + if err != nil { + err = fmt.Errorf("Get old_resource data by resource:%s and resource_id:%s error: %s", sourceData.Id, resourceId, err.Error()) + log.Logger.Error("Get old_resource_data by resource and resource_id error", log.String("resource", sourceData.Id), log.String("resource_id", resourceId), log.Error(err)) + return + } + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + if len(oldResourceDataList) == 0 { + _, err = x.Exec("INSERT INTO resource_data_debug(id,resource,resource_id,resource_asset_id,tf_file,tf_state_file,region_id,create_time,create_user,update_time,update_user) VALUE (?,?,?,?,?,?,?,?,?,?,?)", + uuid, sourceData.Id, resourceId, resourceAssetId, tfFile, tfstateFile, regionId, createTime, createUser, createTime, createUser) + } else { + // err = fmt.Errorf("the resource_id:%s is existed", resourceId) + } + } else { + if len(oldResourceDataList) == 0 { + _, err = x.Exec("INSERT INTO resource_data(id,resource,resource_id,resource_asset_id,tf_file,tf_state_file,region_id,create_time,create_user,update_time,update_user) VALUE (?,?,?,?,?,?,?,?,?,?,?)", + uuid, sourceData.Id, resourceId, resourceAssetId, tfFile, tfstateFile, regionId, createTime, createUser, createTime, createUser) + } else { + // err = fmt.Errorf("the resource_id:%s is existed", resourceId) + } + } + + if err != nil { + err = fmt.Errorf("Try to create resource_data fail,%s ", err.Error()) + log.Logger.Error("Try to create resource_data fail", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + rowData["errorCode"] = "0" + rowData["asset_id"] = resourceAssetId + rowData["id"] = resourceId + } else if action == "query" { + resourceId := reqParam["id"].(string) + sqlCmd := `SELECT * FROM resource_data WHERE resource_id= AND region_id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource_id=? AND region_id=?` + } + + paramArgs := []interface{}{resourceId, regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource_data by resourceId:%s error:%s", resourceId, err.Error()) + log.Logger.Error("Get resource_data by resourceId error", log.String("resourceId", resourceId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("ResourceData can not be found by resourceId:%s", resourceId) + log.Logger.Warn("ResourceData can not be found by resourceId", log.String("resourceId", resourceId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + resourceData := resourceDataList[0] + rowData["errorCode"] = "0" + rowData["asset_id"] = resourceData.ResourceAssetId + rowData["id"] = resourceId + rowData["region_id"] = resourceData.RegionId + } + return +} + +func handleDestroy(workDirPath string, + sourceData *models.SourceTable, + providerData *models.ProviderTable, + providerInfo *models.ProviderInfoTable, + regionData *models.ResourceDataTable, + reqParam map[string]interface{}, + plugin string, + inputResourceData *models.ResourceDataTable) (rowData map[string]interface{}, err error) { + + rowData = make(map[string]interface{}) + rowData["callbackParameter"] = reqParam["callbackParameter"].(string) + rowData["errorCode"] = "1" + rowData["errorMessage"] = "" + + var resourceId string + var resourceData *models.ResourceDataTable + toDestroyResourceData := []*models.ResourceDataTable{} + if inputResourceData == nil { + // Get resource_asset_id by resourceId + resourceId = reqParam["id"].(string) + sqlCmd := `SELECT * FROM resource_data WHERE resource_id=? AND region_id=? AND resource=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource_id=? AND region_id=? AND resource=?` + } + + paramArgs := []interface{}{resourceId, regionData.RegionId, sourceData.Id} + var resourceDataInfoList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataInfoList) + if err != nil { + err = fmt.Errorf("Get resourceDataInfo by resource_id:%s error:%s", resourceId, err.Error()) + log.Logger.Error("Get resourceDataInfo by resource_id error", log.String("resource_id", resourceId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(resourceDataInfoList) == 0 { + err = fmt.Errorf("ResourceDataInfo can not be found by resource_id:%s", resourceId) + log.Logger.Warn("ResourceDataInfo can not be found by resource_id", log.String("resource_id", resourceId), log.Error(err)) + rowData["errorMessage"] = err.Error() + err = nil + rowData["errorCode"] = "0" + return + } + // resourceData = resourceDataInfoList[0] + toDestroyResourceData = append(toDestroyResourceData, resourceDataInfoList...) + } else { + sqlCmd := `SELECT * FROM resource_data WHERE id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE id=?` + } + + paramArgs := []interface{}{inputResourceData.Id} + var resourceDataInfoList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataInfoList) + if err != nil { + err = fmt.Errorf("Get resourceDataInfo by resource_id:%s error:%s", resourceId, err.Error()) + log.Logger.Error("Get resourceDataInfo by resource_id error", log.String("resource_id", resourceId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(resourceDataInfoList) == 0 { + // err = fmt.Errorf("ResourceDataInfo can not be found by resource_id:%s", resourceId) + // log.Logger.Warn("ResourceDataInfo can not be found by resource_id", log.String("resource_id", resourceId), log.Error(err)) + // rowData["errorMessage"] = err.Error() + err = nil + rowData["errorCode"] = "0" + return + } + inputResourceData = resourceDataInfoList[0] + resourceId = inputResourceData.ResourceId + resourceData = inputResourceData + toDestroyResourceData = append(toDestroyResourceData, resourceData) + } + + // rowData["id"] = resourceId + for _, resourceData := range toDestroyResourceData { + if sourceData.TerraformUsed != "N" { + // Gen the terraform workdir + err = GenDir(workDirPath) + if err != nil { + err = fmt.Errorf("Gen the terraform workdir: %s error: %s", workDirPath, err.Error()) + log.Logger.Error("Gen the terraform workdir error", log.String("workDirPath", workDirPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Gen provider.tf.json + err = GenProviderFile(workDirPath, providerData, providerInfo, regionData) + if err != nil { + err = fmt.Errorf("Gen providerFile error: %s", err.Error()) + log.Logger.Error("Gen providerFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + defer DelProviderFile(workDirPath) + + // Gen version.tf + err = GenVersionFile(workDirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen versionFile error: %s", err.Error()) + log.Logger.Error("Gen versionFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Gen softlink of terraform provider file + err = GenTerraformProviderSoftLink(workDirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen terraform provider soft link error: %s", err.Error()) + log.Logger.Error("Gen terraform provider soft link error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Gen soft link for .terraform.lock.hcl + err = GenTerraformLockHclSoftLink(workDirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen terraform lock soft link error: %s", err.Error()) + log.Logger.Error("Gen terraform lock soft link error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + sourceName := sourceData.Name + // Gen .tf 文件, 然后执行 terraform import cmd + uuid := "_" + guid.CreateGuid() + tfFilePath := workDirPath + "/" + sourceName + ".tf" + tfFileContent := "resource " + sourceName + " " + uuid + " {}" + + GenFile([]byte(tfFileContent), tfFilePath) + err = TerraformInit(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformInit error:%s", err.Error()) + log.Logger.Error("Do TerraformInit error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + // continue + } + resourceAssetId := resourceData.ResourceAssetId + DelTfstateFile(workDirPath) + if sourceData.ImportSupport != "N" { + err = TerraformImport(workDirPath, sourceName+"."+uuid, resourceAssetId) + if err != nil { + errMsg := err.Error() + if strings.Contains(errMsg, "Cannot import non-existent remote object") { + // delet resource_data item + if _, ok := reqParam[models.ResourceDataDebug]; ok { + _, err = x.Exec("DELETE FROM resource_data_debug WHERE id=?", resourceData.Id) + } else { + _, err = x.Exec("DELETE FROM resource_data WHERE id=?", resourceData.Id) + } + DelProviderFile(workDirPath) + continue + } + + err = fmt.Errorf("Do TerraformImport error:%s", err.Error()) + log.Logger.Error("Do TerraformImport error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // continue + return + } + } else { + // get tfstate file from resource_data table and gen it + tfstateFileContent := resourceData.TfStateFile + tfstateFilePath := workDirPath + "/terraform.tfstate" + GenFile([]byte(tfstateFileContent), tfstateFilePath) + } + + // clear tf file + os.Truncate(tfFilePath, 0) + + err = TerraformDestroy(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformDestroy error: %s", err.Error()) + log.Logger.Error("Do TerraformDestroy error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + // continue + } + + // Del provider file + err = DelProviderFile(workDirPath) + // err = DelFile(providerFilePath) + if err != nil { + err = fmt.Errorf("Do delete provider file error: %s", err.Error()) + log.Logger.Error("Do delete provider file error", log.Error(err)) + rowData["errorMessage"] = err.Error() + } + } + + // delet resource_data item + if _, ok := reqParam[models.ResourceDataDebug]; ok { + _, err = x.Exec("DELETE FROM resource_data_debug WHERE id=?", resourceData.Id) + } else { + _, err = x.Exec("DELETE FROM resource_data WHERE id=?", resourceData.Id) + } + if err != nil { + err = fmt.Errorf("Delete resource data by id:%s error: %s", resourceData.Id, err.Error()) + log.Logger.Error("Delete resource data by id error", log.String("id", resourceData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } + + rowData["errorCode"] = "0" + return +} + +func TerraformOperation(plugin string, action string, reqParam map[string]interface{}, debugFileContent *[]map[string]interface{}, operationProviderData *models.ProviderTable) (rowData map[string]interface{}, err error) { + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("TerraformOperation error: %v", r) + rowData["errorMessage"] = err.Error() + } + if rowData["errorMessage"].(string) != "" && rowData["errorCode"].(string) == "0" { + rowData["errorCode"] = "1" + } + }() + + rowData = make(map[string]interface{}) + rowData["callbackParameter"] = reqParam["callbackParameter"].(string) + rowData["errorCode"] = "1" + rowData["errorMessage"] = "" + + // Get interface by plugin and action + var actionName string + actionName = action + if actionName == "destroy" { + actionName = "apply" + } + // sqlCmd := `SELECT * FROM interface WHERE plugin=? AND name=?` + sqlCmd := `SELECT * FROM interface WHERE plugin IN (SELECT id FROM plugin WHERE name=?) AND name=?` + paramArgs := []interface{}{plugin, actionName} + var interfaceInfoList []*models.InterfaceTable + err = x.SQL(sqlCmd, paramArgs...).Find(&interfaceInfoList) + if err != nil { + err = fmt.Errorf("Get interfaceInfo by plugin:%s and name:%s error:%s", plugin, action, err.Error()) + log.Logger.Error("Get interfaceInfo error", log.String("plugin", plugin), log.String("name", action), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(interfaceInfoList) == 0 { + err = fmt.Errorf("InterfaceInfo can not be found by plugin:%s and name:%s", plugin, action) + log.Logger.Warn("InterfaceInfo can not be found by plugin and name", log.String("plugin", plugin), log.String("name", action), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + interfaceData := interfaceInfoList[0] + + if plugin == "region" && action == "apply" { + rowData, err = RegionApply(reqParam, interfaceData) + return + } + + // Get regionInfo by regionId + regionId := reqParam["region_id"].(string) + sqlCmd = `SELECT * FROM resource_data WHERE resource_id=? AND region_id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource_id=? AND region_id=?` + } + + paramArgs = []interface{}{regionId, regionId} + var resourceDataInfoList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataInfoList) + if err != nil { + err = fmt.Errorf("Get resourceDataInfo by regionId:%s error:%s", regionId, err.Error()) + log.Logger.Error("Get resourceDataInfo by regionId error", log.String("regionId", regionId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(resourceDataInfoList) == 0 { + err = fmt.Errorf("ResourceDataInfo can not be found by regionId:%s", regionId) + log.Logger.Warn("ResourceDataInfo can not be found by regionId", log.String("regionId", regionId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + regionData := resourceDataInfoList[0] + + // Get providerInfo data + providerInfoId := reqParam["provider_info"].(string) + // sqlCmd = `SELECT * FROM provider_info WHERE id=?` + sqlCmd = `SELECT * FROM provider_info WHERE name=?` + paramArgs = []interface{}{providerInfoId} + var providerInfoList []*models.ProviderInfoTable + err = x.SQL(sqlCmd, paramArgs...).Find(&providerInfoList) + if err != nil { + err = fmt.Errorf("Get providerInfo by id:%s error:%s", providerInfoId, err.Error()) + log.Logger.Error("Get providerInfo by id error", log.String("id", providerInfoId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(providerInfoList) == 0 { + err = fmt.Errorf("ProviderInfo can not be found by id:%s", providerInfoId) + log.Logger.Warn("ProviderInfo can not be found by id", log.String("id", providerInfoId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + providerInfoData := providerInfoList[0] + providerSecretId, decodeErr := cipher.AesDePasswordByGuid(models.PGuid, models.Config.Auth.PasswordSeed, providerInfoData.SecretId) + if decodeErr != nil { + err = fmt.Errorf("Try to decode secretId fail: %s", decodeErr.Error()) + log.Logger.Error("Try to decode secretId fail", log.Error(decodeErr)) + rowData["errorMessage"] = err.Error() + return + } + providerSecretKey, decodeErr := cipher.AesDePasswordByGuid(models.PGuid, models.Config.Auth.PasswordSeed, providerInfoData.SecretKey) + if decodeErr != nil { + err = fmt.Errorf("Try to decode secretKey fail: %s", decodeErr.Error()) + log.Logger.Error("Try to decode secretKey fail", log.Error(decodeErr)) + rowData["errorMessage"] = err.Error() + return + } + providerInfoData.SecretId = providerSecretId + providerInfoData.SecretKey = providerSecretKey + + // Get provider data + providerId := providerInfoData.Provider + sqlCmd = `SELECT * FROM provider WHERE id=?` + paramArgs = []interface{}{providerId} + var providerList []*models.ProviderTable + err = x.SQL(sqlCmd, paramArgs...).Find(&providerList) + if err != nil { + err = fmt.Errorf("Get provider by id:%s error:%s", providerId, err.Error()) + log.Logger.Error("Get provider by id error", log.String("id", providerId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(providerList) == 0 { + err = fmt.Errorf("Provider can not be found by id:%s", providerId) + log.Logger.Warn("Provider can not be found by id", log.String("id", providerId), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + providerData := providerList[0] + operationProviderData.Name = providerData.Name + /* + defer func() { + if _, ok := reqParam[models.ResourceDataDebug]; !ok { + // clear the workpath + DelDir(models.Config.TerraformFilePath + providerData.Name) + } + }() + */ + + // Get sourceData by interface and provider + sqlCmd = `SELECT * FROM source WHERE interface=? AND provider=?` + paramArgs = []interface{}{interfaceData.Id, providerData.Id} + var sourceList []*models.SourceTable + err = x.SQL(sqlCmd, paramArgs...).Find(&sourceList) + if err != nil { + err = fmt.Errorf("Get source data by interface:%s and provider:%s error:%s", interfaceData.Id, providerData.Id, err.Error()) + log.Logger.Error("Get source data by interface and provider error", log.String("interface", interfaceData.Id), log.String("provider", providerData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(sourceList) == 0 { + err = fmt.Errorf("Source data can not be found by interface:%s and provider:%s", interfaceData.Id, providerData.Id) + log.Logger.Warn("Source data can not be found by interface and provider", log.String("interface", interfaceData.Id), log.String("provider", providerData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Get the sorted source list + var sortedSourceList []*models.SourceTable + sortedSourceList, err = getSortedSourceList(sourceList, interfaceData, providerData) + if err != nil { + err = fmt.Errorf("Get sorted source list error: %s", err.Error()) + log.Logger.Warn("Get sorted source list error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + simulateResourceData := make(map[string][]map[string]interface{}) + if (action == "apply" || action == "query") && sourceList[0].TerraformUsed != "N" { + // fmt.Printf("%v\n", sortedSourceList) + + resourceId := reqParam["id"].(string) + var rootResourceAssetId interface{} + rootResourceAssetId = "" + // resourceAssetId := reqParam["asset_id"].(string) + // fmt.Printf("%v\n", resourceAssetId) + + reqParam[models.SimulateResourceDataResult] = make(map[string][]map[string]interface{}) + toDestroyList := make(map[string]*models.ResourceDataTable) + for sourceDataIdx, sortedSourceData := range sortedSourceList { + simulateResourceData[sortedSourceData.Id] = []map[string]interface{}{} + reqParam[models.SimulateResourceData] = simulateResourceData + reqParam[models.SourceDataIdx] = sourceDataIdx + + isInternalAction := false + // Get all tfArguments of source + sqlCmd = `SELECT * FROM tf_argument WHERE source=?` + paramArgs = []interface{}{sortedSourceData.Id} + var allTfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd, paramArgs...).Find(&allTfArgumentList) + if err != nil { + err = fmt.Errorf("Get tfArgument data by source:%s error:%s", sortedSourceData.Id, err.Error()) + log.Logger.Error("Get tfArgument data by source error", log.String("source", sortedSourceData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(allTfArgumentList) == 0 { + err = fmt.Errorf("TfArgument data can not be got by source:%s ", sortedSourceData.Id) + log.Logger.Error("TfArgument data by can not be got by source error", log.String("source", sortedSourceData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Get root tfArguments of source + tfArgName := "ROOT" + sqlCmd = `SELECT * FROM tf_argument WHERE source=? AND name=?` + paramArgs = []interface{}{sortedSourceData.Id, tfArgName} + var rootTfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd, paramArgs...).Find(&rootTfArgumentList) + if err != nil { + err = fmt.Errorf("Get tfArgument data by source:%s and name:%s error:%s", sortedSourceData.Id, tfArgName, err.Error()) + log.Logger.Error("Get tfArgument data by source and name error", log.String("source", sortedSourceData.Id), log.String("name", tfArgName), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + conStructObject := []map[string]interface{}{} + if len(rootTfArgumentList) == 0 { + reqParam[models.SourceDataIdx] = 0 + var convertedArgumentData map[string]interface{} + convertedArgumentData, rootResourceAssetId, err = handleConvertParams(action, sortedSourceData, allTfArgumentList, reqParam, providerData, regionData) + if err != nil { + err = fmt.Errorf("Handle convert params error:%s", err.Error()) + log.Logger.Error("Handle convert params error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + conStructObject = append(conStructObject, convertedArgumentData) + } else { + if sourceDataIdx > 0 { + isInternalAction = true + } + inPutValSlice := [][]interface{}{} + handledTfArguments := make(map[string]bool) + for _, rootTfArgumentData := range rootTfArgumentList { + handledTfArguments[rootTfArgumentData.Id] = true + if rootTfArgumentData.Parameter == "" && rootTfArgumentData.RelativeSource == "" { + err = fmt.Errorf("TfArgument data: %s must have parameter and relative_source", rootTfArgumentData.Id) + log.Logger.Error("TfArgument data: %s must have parameter and relative_source", log.String("rootTfArgumentId", rootTfArgumentData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } else if rootTfArgumentData.Parameter != "" { + convertedArgumentData, _, tmpErr := handleConvertParams(action, sortedSourceData, []*models.TfArgumentTable{rootTfArgumentData}, reqParam, providerData, regionData) + if tmpErr != nil { + err = fmt.Errorf("Handle convert params error:%s", err.Error()) + log.Logger.Error("Handle convert params error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if rootTfArgumentData.Type == "object" { + var inPutVal []map[string]interface{} + if rootTfArgumentData.IsMulti == "N" { + inPutVal = append(inPutVal, convertedArgumentData[rootTfArgumentData.Name].(map[string]interface{})) + } else { + inPutVal = convertedArgumentData[rootTfArgumentData.Name].([]map[string]interface{}) + } + + // Get the memberTfArguments of rootTfArgument + sqlCmd = "SELECT * FROM tf_argument WHERE source=? AND object_name=?" + var memberTfArguments []*models.TfArgumentTable + paramArgs = []interface{}{sortedSourceData.Id, rootTfArgumentData.Id} + err = x.SQL(sqlCmd, paramArgs...).Find(&memberTfArguments) + if err != nil { + err = fmt.Errorf("Get memberTfArgument list error:%s", err.Error()) + log.Logger.Error("Get memberTfArgument list error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(memberTfArguments) == 0 { + err = fmt.Errorf("MemberTfArgument list can not be found by source:%s and object_name:%s", sortedSourceData.Id, rootTfArgumentData.Id) + log.Logger.Warn("MemberTfArgument list can not be found by source and object_name", log.String("source", sortedSourceData.Id), log.String("object_name", rootTfArgumentData.Id), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + for _, v := range memberTfArguments { + handledTfArguments[v.Id] = true + } + + convertedInPutVal := []interface{}{} + for _, v := range inPutVal { + var tmpTfArguments map[string]interface{} + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + v[models.ResourceDataDebug] = reqParam[models.ResourceDataDebug] + } + + tmpTfArguments, _, err = handleConvertParams(action, sortedSourceData, memberTfArguments, v, providerData, regionData) + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + delete(v, models.ResourceDataDebug) + } + + if err != nil { + err = fmt.Errorf("HandleConvertParams error:%s", err.Error()) + log.Logger.Warn("HandleConvertParams error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + convertedInPutVal = append(convertedInPutVal, tmpTfArguments) + } + inPutValSlice = append(inPutValSlice, convertedInPutVal) + } else { + // get the key name for the ROOT's values in convertedArgumentData + sqlCmd = "SELECT * FROM tf_argument WHERE source=? AND name!=? AND parameter=?" + var memberTfArguments []*models.TfArgumentTable + paramArgs = []interface{}{sortedSourceData.Id, "ROOT", rootTfArgumentData.Parameter} + err = x.SQL(sqlCmd, paramArgs...).Find(&memberTfArguments) + if err != nil { + err = fmt.Errorf("Get memberTfArgument list error:%s", err.Error()) + log.Logger.Error("Get memberTfArgument list error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(memberTfArguments) == 0 { + err = fmt.Errorf("MemberTfArgument list can not be found by source:%s and parameter:%s", sortedSourceData.Id, rootTfArgumentData.Parameter) + log.Logger.Warn("MemberTfArgument list can not be found by source and parameter", log.String("source", sortedSourceData.Id), log.String("parameter", rootTfArgumentData.Parameter), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + for _, v := range memberTfArguments { + handledTfArguments[v.Id] = true + } + + // find the root's parameter + sqlCmd = `SELECT * FROM parameter WHERE id=?` + paramArgs = []interface{}{rootTfArgumentData.Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", rootTfArgumentData.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", rootTfArgumentData.Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", rootTfArgumentData.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", rootTfArgumentData.Parameter), log.Error(err)) + return + } + parameterData := parameterList[0] + + tmpRootVal := convertedArgumentData[rootTfArgumentData.Name] + tmpPVal := []interface{}{} + /* + p := reflect.ValueOf(tmpRootVal) + for i := 0; i < p.Len(); i++ { + tmpPVal = append(tmpPVal, p.Index(i).Interface()) + } + */ + if rootTfArgumentData.IsMulti == "N" { + tmpPVal = append(tmpPVal, tmpRootVal) + } else { + tmpPVal = tmpRootVal.([]interface{}) + } + + convertedInPutVal := []interface{}{} + for i := range tmpPVal { + v := make(map[string]interface{}) + if parameterData.Multiple == "Y" { + v[parameterData.Name] = []interface{}{tmpPVal[i]} + } else { + v[parameterData.Name] = tmpPVal[i] + } + + var tmpTfArguments map[string]interface{} + if _, ok := reqParam[models.ResourceDataDebug]; ok { + v[models.ResourceDataDebug] = reqParam[models.ResourceDataDebug] + } + + tmpTfArguments, _, err = handleConvertParams(action, sortedSourceData, memberTfArguments, v, providerData, regionData) + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + delete(v, models.ResourceDataDebug) + } + + if err != nil { + err = fmt.Errorf("HandleConvertParams error:%s", err.Error()) + log.Logger.Warn("HandleConvertParams error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + convertedInPutVal = append(convertedInPutVal, tmpTfArguments) + } + inPutValSlice = append(inPutValSlice, convertedInPutVal) + } + } else if rootTfArgumentData.Parameter == "" { + /* + handledTfArguments[rootTfArgumentData.Id] = true + // handle remain tfArguments + remainTfArguments := []*models.TfArgumentTable{} + for _, v := range allTfArgumentList { + if _, ok := handledTfArguments[v.Id]; !ok { + remainTfArguments = append(remainTfArguments, v) + } + } + for i := range remainTfArguments { + handledTfArguments[remainTfArguments[i].Id] = true + var tmpTfArguments map[string]interface{} + reqParam[models.ResourceIdDataConvert] = resourceId + tmpTfArguments, _, err = handleConvertParams(action, sortedSourceData, []*models.TfArgumentTable{remainTfArguments[i]}, reqParam, providerData, regionData) + delete(reqParam, models.ResourceIdDataConvert) + if err != nil { + err = fmt.Errorf("HandleConvertParams error:%s", err.Error()) + log.Logger.Warn("HandleConvertParams error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + resourceDataAssetIdList := []interface{}{} + if _, ok := tmpTfArguments[remainTfArguments[i].Name].([]interface{}); ok { + resourceDataAssetIdList = append(resourceDataAssetIdList, (tmpTfArguments[remainTfArguments[i].Name].([]interface{}))...) + } else { + resourceDataAssetIdList = append(resourceDataAssetIdList, tmpTfArguments[remainTfArguments[i].Name]) + } + + convertedInPutVal := []interface{}{} + for idx := range resourceDataAssetIdList { + tmpInPutVal := make(map[string]interface{}) + tmpInPutVal[remainTfArguments[i].Name] = resourceDataAssetIdList[idx] + convertedInPutVal = append(convertedInPutVal, tmpInPutVal) + } + inPutValSlice = append(inPutValSlice, convertedInPutVal) + } + */ + + sqlCmd = "SELECT * FROM tf_argument WHERE source=? AND relative_source=? AND name!=?" + var memberTfArguments []*models.TfArgumentTable + paramArgs = []interface{}{sortedSourceData.Id, rootTfArgumentData.RelativeSource, "ROOT"} + err = x.SQL(sqlCmd, paramArgs...).Find(&memberTfArguments) + if err != nil { + err = fmt.Errorf("Get memberTfArgument list error:%s", err.Error()) + log.Logger.Error("Get memberTfArgument list error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if len(memberTfArguments) == 0 { + err = fmt.Errorf("MemberTfArgument list can not be found by source:%s and parameter:%s", sortedSourceData.Id, rootTfArgumentData.Parameter) + log.Logger.Warn("MemberTfArgument list can not be found by source and parameter", log.String("source", sortedSourceData.Id), log.String("parameter", rootTfArgumentData.Parameter), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + for _, v := range memberTfArguments { + handledTfArguments[v.Id] = true + } + + reqParam[models.ResourceIdDataConvert] = resourceId + convertedRootArgumentData, _, tmpErr := handleConvertParams(action, sortedSourceData, []*models.TfArgumentTable{rootTfArgumentData}, reqParam, providerData, regionData) + delete(reqParam, models.ResourceIdDataConvert) + if tmpErr != nil { + err = fmt.Errorf("Handle convert params error:%s", err.Error()) + log.Logger.Error("Handle convert params error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + tmpRootVal := convertedRootArgumentData[rootTfArgumentData.Name] + tmpRootRes := []interface{}{} + if rootTfArgumentData.IsMulti == "N" { + tmpRootRes = append(tmpRootRes, tmpRootVal) + } else { + tmpRootRes = tmpRootVal.([]interface{}) + } + + // for i := range tmpRootRes { + // } + convertedInPutVal := []interface{}{} + for i := 0; i < len(tmpRootRes); i++ { + + // v := make(map[string]interface{}) + // if parameterData.Multiple == "Y" { + // v[parameterData.Name] = []interface{}{tmpPVal[i]} + // } else { + // v[parameterData.Name] = tmpPVal[i] + // } + + tmpTfArguments := make(map[string]interface{}) + // if _, ok := reqParam[models.ResourceDataDebug]; ok { + // v[models.ResourceDataDebug] = reqParam[models.ResourceDataDebug] + // } + + // tmpTfArguments, _, err = handleConvertParams(action, sortedSourceData, memberTfArguments, reqParam, providerData, regionData) + for j := range memberTfArguments { + tmpTfArguments[memberTfArguments[j].Name] = tmpRootRes[i] + } + + // if _, ok := reqParam[models.ResourceDataDebug]; ok { + // delete(v, models.ResourceDataDebug) + // } + + if err != nil { + err = fmt.Errorf("HandleConvertParams error:%s", err.Error()) + log.Logger.Warn("HandleConvertParams error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + convertedInPutVal = append(convertedInPutVal, tmpTfArguments) + } + inPutValSlice = append(inPutValSlice, convertedInPutVal) + } + } + // Construct the object + curObject := make(map[string]interface{}) + handleConStructObject(&conStructObject, inPutValSlice, curObject, 0) + + // handle remain tfArguments + remainTfArguments := []*models.TfArgumentTable{} + for _, v := range allTfArgumentList { + if _, ok := handledTfArguments[v.Id]; !ok { + remainTfArguments = append(remainTfArguments, v) + } + } + + var tmpTfArguments map[string]interface{} + reqParam[models.ResourceIdDataConvert] = resourceId + tmpTfArguments, _, err = handleConvertParams(action, sortedSourceData, remainTfArguments, reqParam, providerData, regionData) + delete(reqParam, models.ResourceIdDataConvert) + if err != nil { + err = fmt.Errorf("HandleConvertParams error:%s", err.Error()) + log.Logger.Warn("HandleConvertParams error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Add remain tfArgument to conStructObject + for i := range conStructObject { + for k, v := range tmpTfArguments { + conStructObject[i][k] = v + } + } + } + + // Finish to construct the converted parameters, and start to handle the whole process + + // Get the resource_data list by resource_id and source + sqlCmd = `SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=?` + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=?` + } + paramArgs = []interface{}{sortedSourceData.Id, resourceId, regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by resource:%s and resource_id:%s error: %s", sortedSourceData.Id, resourceId, err.Error()) + log.Logger.Error("Get resource data by resource and resource_id error", log.String("resource", sortedSourceData.Id), log.String("resource_id", resourceId), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("ResourceData can not be found by resource:%s and resource_id:%s", sortedSourceData.Id, resourceId) + log.Logger.Warn("ResourceData can not be found by resource and resource_id", log.String("resource", sortedSourceData.Id), log.String("resource_id", resourceId), log.Error(err)) + } + + // Get tfArgument list by key_argument='Y' + sqlCmd = `SELECT * FROM tf_argument WHERE source=? AND key_argument=?` + paramArgs = []interface{}{sortedSourceData.Id, "Y"} + var keyTfArgumentDataList []*models.TfArgumentTable + err = x.SQL(sqlCmd, paramArgs...).Find(&keyTfArgumentDataList) + if err != nil { + err = fmt.Errorf("Get tfArgument data by resource:%s and key_argument:%s error: %s", sortedSourceData.Id, "Y", err.Error()) + log.Logger.Error("Get tfArgument data by resource and key_argument error", log.String("resource", sortedSourceData.Id), log.String("key_arugment", "Y"), log.Error(err)) + return + } + if len(keyTfArgumentDataList) == 0 { + err = fmt.Errorf("TfArgument Data can not be found by resource:%s and key_argument:%s", sortedSourceData.Id, "Y") + log.Logger.Warn("TfArgument Data can not be found by resource and key_argument", log.String("resource", sortedSourceData.Id), log.String("key_argument", "Y"), log.Error(err)) + } + + workDirPath := GenWorkDirPath(reqParam["id"].(string), + reqParam["requestSn"].(string), + reqParam["requestId"].(string), + providerData, + regionData, + plugin, + sortedSourceData) + + // Gen the terraform workdir + err = GenDir(workDirPath) + if err != nil { + err = fmt.Errorf("Gen the terraform workdir: %s error: %s", workDirPath, err.Error()) + log.Logger.Error("Gen the terraform workdir error", log.String("workDirPath", workDirPath), log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Gen provider.tf.json + err = GenProviderFile(workDirPath, providerData, providerInfoData, regionData) + if err != nil { + err = fmt.Errorf("Gen providerFile error: %s", err.Error()) + log.Logger.Error("Gen providerFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + defer DelProviderFile(workDirPath) + + // Gen version.tf + err = GenVersionFile(workDirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen versionFile error: %s", err.Error()) + log.Logger.Error("Gen versionFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Gen softlink of terraform provider file + err = GenTerraformProviderSoftLink(workDirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen terraform provider soft link error: %s", err.Error()) + log.Logger.Error("Gen terraform provider soft link error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + // Gen soft link for .terraform.lock.hcl + err = GenTerraformLockHclSoftLink(workDirPath, providerData) + if err != nil { + err = fmt.Errorf("Gen terraform lock soft link error: %s", err.Error()) + log.Logger.Error("Gen terraform lock soft link error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + + newCreateObject := make(map[int]bool) + importObject := make(map[int]string) + importObjectResourceData := make(map[int]*models.ResourceDataTable) + needToTakeAway := make(map[int]string) + toDestroyResource := make(map[string]*models.ResourceDataTable) + matchResourceData := make(map[string]bool) + if action == "apply" { + if len(keyTfArgumentDataList) > 0 { + keyArgumentNameVal := make(map[string]interface{}) + for _, v := range keyTfArgumentDataList { + keyArgumentNameVal[v.Name] = "" + } + + for i := range conStructObject { + curObject := conStructObject[i] + for k, _ := range keyArgumentNameVal { + keyArgumentNameVal[k] = curObject[k] + } + // 对比 datas 的 tf file,看是否都匹配 + for _, data := range resourceDataList { + tmpTfFileArgument := make(map[string]map[string]map[string]map[string]interface{}) + tmpTfFileArgument["resource"] = make(map[string]map[string]map[string]interface{}) + tmpTfFileArgument["resource"][sortedSourceData.Name] = make(map[string]map[string]interface{}) + tmpTfFileArgument["resource"][sortedSourceData.Name][resourceId] = make(map[string]interface{}) + json.Unmarshal([]byte(data.TfFile), &tmpTfFileArgument) + isMatch := true + for k, v := range keyArgumentNameVal { + if v != tmpTfFileArgument["resource"][sortedSourceData.Name][resourceId][k] { + isMatch = false + break + } + } + if isMatch { + // check if the item needed to be deleted because of the relatived id in toDestroyList + isMatchAgain := true + for _, v := range keyArgumentNameVal { + isAllValid := true + for j := range toDestroyList { + if toDestroyList[j].ResourceAssetId == v { + isAllValid = false + break + } + } + if isAllValid == false { + isMatchAgain = false + toDestroyResource[data.Id] = data + needToTakeAway[i] = data.ResourceAssetId + break + } + } + if isMatchAgain { + matchResourceData[data.Id] = true + importObject[i] = data.ResourceAssetId + importObjectResourceData[i] = data + } + break + } + } + } + } else { + if len(resourceDataList) > 0 { + matchResourceData[resourceDataList[0].Id] = true + importObject[0] = resourceDataList[0].ResourceAssetId + importObjectResourceData[0] = resourceDataList[0] + } + } + + for i := range conStructObject { + if _, ok := importObject[i]; !ok { + if _, okAgain := needToTakeAway[i]; !okAgain { + newCreateObject[i] = true + } + } + } + for _, data := range resourceDataList { + if _, ok := matchResourceData[data.Id]; !ok { + // toDestroyResource[data.ResourceAssetId] = data + toDestroyResource[data.Id] = data + } + } + } + + // Recodr the debugFileContent + curDebugFileStartIdx := len(*debugFileContent) + for i := range conStructObject { + curDebugFileContent := make(map[string]interface{}) + curDebugFileContent["tf_json_old"] = "" + curDebugFileContent["tf_json_new"] = "" + curDebugFileContent["tf_state_old"] = "" + curDebugFileContent["tf_state_new"] = "" + curDebugFileContent["tf_state_import"] = "" + curDebugFileContent["plan_message"] = "" + curDebugFileContent["source_name"] = sortedSourceData.Name + *debugFileContent = append(*debugFileContent, curDebugFileContent) + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + curTfFileContentStr, err := GenTfFile(workDirPath, sortedSourceData, action, resourceId, conStructObject[i]) + if err != nil { + err = fmt.Errorf("Gen tfFile error: %s", err.Error()) + log.Logger.Error("Gen tfFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + continue + } + curDebugFileContent["tf_json_new"] = curTfFileContentStr + + if _, tmpOk := importObject[i]; tmpOk { + // get tf_json_old and tf_state_old file content + getOldTfFile(curDebugFileContent, regionData, sortedSourceData, resourceId, importObject[i]) + } + } + } + if action == "apply" { + if reqParam["confirmToken"] != "Y" { + destroyAssetId := "" + totalDestroyCnt := len(toDestroyResource) + if len(toDestroyResource) > 0 { + for _, resourceData := range toDestroyResource { + destroyAssetId += resourceData.ResourceAssetId + ", " + } + } + + for i := range conStructObject { + curDebugFileContent := (*debugFileContent)[curDebugFileStartIdx+i] + // check if importObject needed to be destroy + if _, ok := importObject[i]; ok || (sourceDataIdx == 0 && rootResourceAssetId != "" && rootResourceAssetId != nil) { + // Gen tf.json file + // uuid := "_" + guid.CreateGuid() + _, err = GenTfFile(workDirPath, sortedSourceData, action, resourceId, conStructObject[i]) + if err != nil { + err = fmt.Errorf("Gen tfFile error: %s", err.Error()) + log.Logger.Error("Gen tfFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + continue + } + + err = TerraformInit(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformInit error:%s", err.Error()) + log.Logger.Error("Do TerraformInit error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + continue + } + + DelTfstateFile(workDirPath) + if sortedSourceData.ImportSupport != "N" { + if sourceDataIdx == 0 && rootResourceAssetId != "" && rootResourceAssetId != nil { + err = TerraformImport(workDirPath, sortedSourceData.Name+"."+resourceId, rootResourceAssetId.(string)) + } else { + err = TerraformImport(workDirPath, sortedSourceData.Name+"."+resourceId, importObject[i]) + } + if err != nil { + errMsg := err.Error() + if strings.Contains(errMsg, "Cannot import non-existent remote object") == false { + err = fmt.Errorf("Do TerraformImport error:%s", err.Error()) + rowData["errorMessage"] = err.Error() + // return + continue + } else { + // deleteOldResourceData(sortedSourceData, regionData, resourceId, importObject[i], reqParam) + } + } else if len(importObjectResourceData) > i { + oldTfstateFile := importObjectResourceData[i].TfStateFile + var oldTfstateFileObj models.TfstateFileData + err = json.Unmarshal([]byte(oldTfstateFile), &oldTfstateFileObj) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + return + } + + secondFileObj := getFileAttrContent(workDirPath + "/terraform.tfstate") + first, second := make(map[string]interface{}), make(map[string]interface{}) + first = oldTfstateFileObj.Resources[0].Instances[0].Attributes + err = json.Unmarshal(secondFileObj.AttrBytes, &second) + if err != nil { + fmt.Printf("json unmarshal second file fail,%s \n", err.Error()) + return + } + + result, diff, message := compareObject(first, second) + if diff != 0 { + err = fmt.Errorf("Compare import_state file and old tfstate file error:%s. Please confirm again!", message) + log.Logger.Error("Compare import_state file and old tfstate file error", log.String("message", message), log.Error(err)) + rowData["errorMessage"] = err.Error() + rowData["errorCode"] = "-1" + return + } + resultBytes, tmpErr := json.MarshalIndent(result, " ", "\t") + if tmpErr != nil { + err = fmt.Errorf("json marshal result fail,%s \n", tmpErr.Error()) + return + } + + newFileBytes := []byte{} + newFileWriter := bytes.NewBuffer(newFileBytes) + newFileWriter.WriteString(secondFileObj.FileContent[:secondFileObj.StartIndex]) + newFileWriter.Write(resultBytes) + newFileWriter.WriteString(secondFileObj.FileContent[secondFileObj.EndIndex:]) + ioutil.WriteFile(workDirPath+"/terraform.tfstate", newFileWriter.Bytes(), 0644) + } + } else { + // get tfstate file from resource_data table and gen it + tfstateFileContent := importObjectResourceData[i].TfStateFile + tfstateFilePath := workDirPath + "/terraform.tfstate" + GenFile([]byte(tfstateFileContent), tfstateFilePath) + } + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // get import tfstate file + tfstateFilePath := workDirPath + "/terraform.tfstate" + tfstateImportFileData, tmpErr := ReadFile(tfstateFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read tfstate import file error:%s", tmpErr.Error()) + log.Logger.Error("Read tfstate import file error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + } + tfstateImportFileContentStr := string(tfstateImportFileData) + curDebugFileContent["tf_state_import"] = tfstateImportFileContentStr + } + + destroyCnt, tmpErr := TerraformPlan(workDirPath) + if tmpErr != nil { + err = fmt.Errorf("Do TerraformPlan error:%s", tmpErr.Error()) + log.Logger.Error("Do TerraformPlan error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + } + + if destroyCnt > 0 { + // 二次确认 + totalDestroyCnt += destroyCnt + destroyAssetId += importObject[i] + ", " + } + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // get plan file + planFilePath := workDirPath + "/planfile" + planFileData, tmpErr := ReadFile(planFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read tfstate import file error:%s", tmpErr.Error()) + log.Logger.Error("Read tfstate import file error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + } + planFileContentStr := string(planFileData) + curDebugFileContent["plan_message"] = planFileContentStr + } + + DelTfstateFile(workDirPath) + } + } + // test + // totalDestroyCnt = 1 + if totalDestroyCnt > 0 { + destroyCntStr := strconv.Itoa(totalDestroyCnt) + rowData["errorMessage"] = destroyCntStr + " resource(s) will be destroy: " + destroyAssetId + "please confirm again!" + rowData["errorCode"] = "-1" + return + } + } + } + + // Do Terraform Action + for i := range conStructObject { + if _, ok := needToTakeAway[i]; ok { + continue + } + curDebugFileContent := (*debugFileContent)[curDebugFileStartIdx+i] + var tfFileContentStr string + err = TerraformInit(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformInit error:%s", err.Error()) + log.Logger.Error("Do TerraformInit error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if action == "apply" { + if _, ok := importObject[i]; ok || (sourceDataIdx == 0 && rootResourceAssetId != "" && rootResourceAssetId != nil) { + // Gen tf.json file + // uuid := "_" + guid.CreateGuid() + tfFileContentStr, err = GenTfFile(workDirPath, sortedSourceData, action, resourceId, conStructObject[i]) + if err != nil { + err = fmt.Errorf("Gen tfFile error: %s", err.Error()) + log.Logger.Error("Gen tfFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + continue + } + + DelTfstateFile(workDirPath) + if sortedSourceData.ImportSupport != "N" { + if sourceDataIdx == 0 && rootResourceAssetId != "" && rootResourceAssetId != nil { + err = TerraformImport(workDirPath, sortedSourceData.Name+"."+resourceId, rootResourceAssetId.(string)) + } else { + err = TerraformImport(workDirPath, sortedSourceData.Name+"."+resourceId, importObject[i]) + } + if err != nil { + errMsg := err.Error() + if strings.Contains(errMsg, "Cannot import non-existent remote object") == false { + err = fmt.Errorf("Do TerraformImport error:%s", err.Error()) + rowData["errorMessage"] = err.Error() + // return + } else { + deleteOldResourceData(sortedSourceData, regionData, resourceId, importObject[i], reqParam) + } + } else { + // firstFileObj := getFileAttrContent(workDirPath + "/terraform.tfstate") + // get the old tfstate file content + if len(importObjectResourceData) > i { + oldTfstateFile := importObjectResourceData[i].TfStateFile + var oldTfstateFileObj models.TfstateFileData + err = json.Unmarshal([]byte(oldTfstateFile), &oldTfstateFileObj) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + return + } + + secondFileObj := getFileAttrContent(workDirPath + "/terraform.tfstate") + first, second := make(map[string]interface{}), make(map[string]interface{}) + first = oldTfstateFileObj.Resources[0].Instances[0].Attributes + err = json.Unmarshal(secondFileObj.AttrBytes, &second) + if err != nil { + fmt.Printf("json unmarshal second file fail,%s \n", err.Error()) + return + } + + result, _, _ := compareObject(first, second) + /* + result, diff, message := compareObject(first, second) + if diff != 0 { + err = fmt.Errorf("Compare import_state file and old tfstate file error:%s. Please confirm again!", message) + log.Logger.Error("Compare import_state file and old tfstate file error", log.String("message", message), log.Error(err)) + rowData["errorMessage"] = err.Error() + rowData["errorCode"] = "-1" + return + } + */ + resultBytes, tmpErr := json.MarshalIndent(result, " ", "\t") + if tmpErr != nil { + err = fmt.Errorf("json marshal result fail,%s \n", tmpErr.Error()) + return + } + + newFileBytes := []byte{} + newFileWriter := bytes.NewBuffer(newFileBytes) + newFileWriter.WriteString(secondFileObj.FileContent[:secondFileObj.StartIndex]) + newFileWriter.Write(resultBytes) + newFileWriter.WriteString(secondFileObj.FileContent[secondFileObj.EndIndex:]) + ioutil.WriteFile(workDirPath+"/terraform.tfstate", newFileWriter.Bytes(), 0644) + } + } + } else { + // get tfstate file from resource_data table and gen it + tfstateFileContent := importObjectResourceData[i].TfStateFile + tfstateFilePath := workDirPath + "/terraform.tfstate" + GenFile([]byte(tfstateFileContent), tfstateFilePath) + } + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // resource_data debug mode, get the terraform.state file after terraform import + tfstateFilePath := workDirPath + "/terraform.tfstate" + tfstateFileData, tmpErr := ReadFile(tfstateFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read import_tfstate file error:%s", tmpErr.Error()) + log.Logger.Error("Read import_tfstate file error", log.Error(err)) + // rowData["errorMessage"] = err.Error() + // return + } + tfstateFileContentStr := string(tfstateFileData) + curDebugFileContent["tf_state_import"] = tfstateFileContentStr + // DelTfstateFile(workDirPath) + } + if len(importObjectResourceData) > 0 { + reqParam[models.ImportResourceDataTableId] = importObjectResourceData[i].Id + } + } + } + + // Gen tf.json file + tfFileContentStr, err = GenTfFile(workDirPath, sortedSourceData, action, resourceId, conStructObject[i]) + if err != nil { + err = fmt.Errorf("Gen tfFile error: %s", err.Error()) + log.Logger.Error("Gen tfFile error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + continue + } + + if action == "apply" { + destroyCnt, tmpErr := TerraformPlan(workDirPath) + fmt.Printf("%v\n", destroyCnt) + if tmpErr != nil { + err = fmt.Errorf("Do TerraformPlan error:%s", tmpErr.Error()) + log.Logger.Error("Do TerraformPlan error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // resource_data debug mode, get the plan file after terraform plan + planFilePath := workDirPath + "/planfile" + planFileData, tmpErr := ReadFile(planFilePath) + if tmpErr != nil { + err = fmt.Errorf("Read plan file error:%s", tmpErr.Error()) + log.Logger.Error("Read plan file error", log.Error(err)) + rowData["errorMessage"] = err.Error() + // return + } + planFileContentStr := string(planFileData) + curDebugFileContent["plan_message"] = planFileContentStr + } + } + + err = TerraformApply(workDirPath) + if err != nil { + err = fmt.Errorf("Do TerraformApply error:%s", err.Error()) + log.Logger.Error("Do TerraformApply error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + // continue + } + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + tfstateFilePath := workDirPath + "/terraform.tfstate" + tfstateFileData, err := ReadFile(tfstateFilePath) + if err != nil { + err = fmt.Errorf("Read tfstate file error:%s", err.Error()) + log.Logger.Error("Read tfstate file error", log.Error(err)) + //return + } + tfstateFileContentStr := string(tfstateFileData) + curDebugFileContent["tf_state_new"] = tfstateFileContentStr + } + + // handl tfstate file + err = handleTfstateOutPut(sortedSourceData, + interfaceData, + reqParam, + regionData, + providerData, + action, + workDirPath, + tfFileContentStr, + resourceId, + rowData, + curDebugFileContent, + isInternalAction) + + // *debugFileContent = append(*debugFileContent, curDebugFileContent) + + DelTfstateFile(workDirPath) + + if action == "query" { + if sourceDataIdx != 0 { + resourceDataResult := reqParam[models.SimulateResourceDataResult].(map[string][]map[string]interface{}) + rootResult := resourceDataResult[sortedSourceList[0].Id] + curSourceRes := resourceDataResult[sortedSourceData.Id] + rootIdx := -1 + for ix := range rootResult { + flag := true + for k, v := range conStructObject[i] { + if rootResult[ix][k] != v { + flag = false + break + } + } + if flag == true { + rootIdx = ix + } + } + if rootIdx != -1 { + curRootResultOut := rootResult[rootIdx]["output"].(map[string]interface{}) + // fmt.Printf("%v", curRootResultOut) + if len(curSourceRes) > 0 { + for j := range curSourceRes { + for k, v := range curSourceRes[j] { + if _, ok := curRootResultOut[k]; !ok { + if _, tmpOk := v.([]interface{}); tmpOk { + var tmpV []interface{} + tmpV = v.([]interface{}) + curRootResultOut[k] = tmpV + } else { + curRootResultOut[k] = []interface{}{v} + } + } else { + if _, tmpOk := v.([]interface{}); tmpOk { + var tmpV []interface{} + tmpV = v.([]interface{}) + curRootResultOut[k] = append(curRootResultOut[k].([]interface{}), tmpV...) + } else { + curRootResultOut[k] = append(curRootResultOut[k].([]interface{}), v) + } + } + } + } + } + } + resourceDataResult[sortedSourceData.Id] = []map[string]interface{}{} + } + } + } + if action == "apply" && len(toDestroyResource) > 0 { + for k, v := range toDestroyResource { + toDestroyList[k] = v + } + } + + DelProviderFile(workDirPath) + } + if action == "query" { + if len(sortedSourceList) > 1 { + curSimulateResourceData := reqParam[models.SimulateResourceDataResult].(map[string][]map[string]interface{}) + // rowData[models.TerraformOutPutPrefix] = curSimulateResourceData[sortedSourceList[0].Id] + curResult := curSimulateResourceData[sortedSourceList[0].Id] + result := []interface{}{} + for i := range curResult { + result = append(result, curResult[i]["output"]) + } + rowData[models.TerraformOutPutPrefix] = result + } + } + if action == "apply" && len(toDestroyList) > 0 { + for i := len(sortedSourceList) - 1; i >= 0; i-- { + // deletedResourceDataId := make(map[string]bool) + for _, v := range toDestroyList { + if v.Resource == sortedSourceList[i].Id { + //deletedResourceDataId[v.Id] = true + workDirPath := GenWorkDirPath(reqParam["id"].(string), + reqParam["requestSn"].(string), + reqParam["requestId"].(string), + providerData, + regionData, + plugin, + sortedSourceList[i]) + _, err = handleDestroy(workDirPath, + sortedSourceList[i], + providerData, + providerInfoData, + regionData, + reqParam, + plugin, + v) + if err != nil { + err = fmt.Errorf("Handle Destroy error: %s", err.Error()) + log.Logger.Error("Handle Destroy error", log.Error(err)) + rowData["errorMessage"] = err.Error() + } + } + } + } + } + rowData["errorCode"] = "0" + } else { + sourceData := sourceList[0] + + workDirPath := GenWorkDirPath(reqParam["id"].(string), + reqParam["requestSn"].(string), + reqParam["requestId"].(string), + providerData, + regionData, + plugin, + sourceData) + + curDebugFileContent := make(map[string]interface{}) + curDebugFileContent["tf_json_old"] = "" + curDebugFileContent["tf_json_new"] = "" + curDebugFileContent["tf_state_old"] = "" + curDebugFileContent["tf_state_new"] = "" + curDebugFileContent["tf_state_import"] = "" + curDebugFileContent["plan_message"] = "" + curDebugFileContent["source_name"] = sourceData.Name + if action == "apply" || action == "query" { + var retOutput map[string]interface{} + var tmpErr error + if sourceData.TerraformUsed != "N" { + // retOutput, tmpErr = handleTerraformApplyOrQuery(reqParam, sourceData, providerData, providerInfoData, regionData, action, plugin, workDirPath, interfaceData, curDebugFileContent) + } else { + retOutput, tmpErr = handleApplyOrQuery(action, reqParam, sourceData, regionData) + } + *debugFileContent = append(*debugFileContent, curDebugFileContent) + if tmpErr != nil { + err = fmt.Errorf("Handle ApplyOrQuery error: %s", tmpErr.Error()) + log.Logger.Error("Handle ApplyOrQuery error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + rowData["errorCode"] = "0" + + for k, v := range retOutput { + rowData[k] = v + } + + } else if action == "destroy" { + rowData["id"] = reqParam["id"].(string) + for i := len(sortedSourceList) - 1; i >= 0; i-- { + workDirPath = GenWorkDirPath(reqParam["id"].(string), + reqParam["requestSn"].(string), + reqParam["requestId"].(string), + providerData, + regionData, + plugin, + sortedSourceList[i]) + retOutput, tmpErr := handleDestroy(workDirPath, sortedSourceList[i], providerData, providerInfoData, regionData, reqParam, plugin, nil) + + if _, ok := retOutput["errorCode"]; ok { + if retOutput["errorCode"] == "1" { + err = fmt.Errorf("Handle Destroy error: %s", tmpErr.Error()) + log.Logger.Error("Handle Destroy error", log.Error(err)) + rowData["errorMessage"] = err.Error() + return + } + } + + /* + if tmpErr != nil { + err = fmt.Errorf("Handle Destroy error: %s", tmpErr.Error()) + log.Logger.Error("Handle Destroy error", log.Error(err)) + rowData["errorMessage"] = err.Error() + continue + } + */ + + /* + for k, v := range retOutput { + rowData[k] = v + } + */ + // rowData["id"] = reqParam["id"].(string) + } + rowData["errorCode"] = "0" + } else { + err = fmt.Errorf("Action: %s is inValid", action) + log.Logger.Error("Action is inValid", log.String("action", action), log.Error(err)) + rowData["errorMessage"] = err.Error() + } + } + return +} + +func convertData(relativeSourceId string, reqParam map[string]interface{}, regionData *models.ResourceDataTable, tfArgument *models.TfArgumentTable, sourceData *models.SourceTable) (arg interface{}, err error) { + if tfArgument.Parameter == "" { + if sourceData.SourceType == "data_resource" { + if _, ok := reqParam[models.SimulateResourceData]; ok { + curSimulateResourceData := reqParam[models.SimulateResourceData].(map[string][]map[string]interface{}) + + tmpRes := []interface{}{} + resourceDatas := curSimulateResourceData[relativeSourceId] + for i := range resourceDatas { + tmpData := resourceDatas[i]["tfstateFile"].(map[string]interface{}) + assetIdAttribute := resourceDatas[i]["assetIdAttribute"].(string) + if _, ok := tmpData[assetIdAttribute]; ok { + tmpRes = append(tmpRes, tmpData[assetIdAttribute]) + } + } + if len(tmpRes) > 0 { + if tfArgument.IsMulti == "Y" { + arg = tmpRes + } else { + arg = tmpRes[0] + } + } + return + } + } + // get data from resource_data + // curTfArgRelativeSource := remainTfArguments[i].RelativeSource + sqlCmd := `SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=?` + } + // resourceId: 来源 param 先判断 + resourceId := reqParam[models.ResourceIdDataConvert].(string) + paramArgs := []interface{}{relativeSourceId, resourceId, regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by resource:%s and resource_id:%s error: %s", relativeSourceId, resourceId, err.Error()) + log.Logger.Error("Get resource data by resource and resource_id error", log.String("resource", relativeSourceId), log.String("resource_id", resourceId), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("ResourceData can not be found by resource:%s and resource_id:%s", relativeSourceId, resourceId) + log.Logger.Warn("ResourceData can not be found by resource and resource_id", log.String("resource", relativeSourceId), log.String("resource_id", resourceId), log.Error(err)) + // return + } + // arg = resourceDataList[0].ResourceAssetId + /* + tmpRes := []string{} + for i := range resourceDataList { + tmpRes = append(tmpRes, resourceDataList[i].ResourceAssetId) + } + arg = tmpRes + + */ + if tfArgument.IsMulti == "Y" { + tmpRes := []interface{}{} + for i := range resourceDataList { + tmpRes = append(tmpRes, resourceDataList[i].ResourceAssetId) + } + arg = tmpRes + } else { + arg = resourceDataList[0].ResourceAssetId + } + return + } + + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgument.Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgument.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgument.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + parameterData := parameterList[0] + if _, ok := reqParam[parameterData.Name]; !ok { + return + } + if reqParam[parameterData.Name] == nil { + return + } + var resourceIdList []string + if parameterData.Multiple == "Y" { + reqParamResourceIds := reqParam[parameterData.Name].([]interface{}) + for _, v := range reqParamResourceIds { + resourceIdList = append(resourceIdList, v.(string)) + } + } else { + resourceIdList = append(resourceIdList, reqParam[parameterData.Name].(string)) + } + + resourceIdsStr := strings.Join(resourceIdList, "','") + sqlCmd = "SELECT * FROM resource_data WHERE resource=? AND region_id=? AND resource_id IN ('" + resourceIdsStr + "')" + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = "SELECT * FROM resource_data_debug WHERE resource=? AND region_id=? AND resource_id IN ('" + resourceIdsStr + "')" + } + var resourceDataList []*models.ResourceDataTable + paramArgs = []interface{}{relativeSourceId, regionData.RegionId} + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by resource:%s and resource_id:%s error: %s", relativeSourceId, resourceIdsStr, err.Error()) + log.Logger.Error("Get resource data by resource and resource_id error", log.String("resource", relativeSourceId), log.String("resource_id", resourceIdsStr), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + /* + err = fmt.Errorf("Resource_data can not be found by resource:%s and resource_id:%s", relativeSourceId, resourceIdsStr) + log.Logger.Warn("Resource_data can not be found by resource and resource_id", log.String("resource", relativeSourceId), log.String("resource_id", resourceIdsStr), log.Error(err)) + */ + err = nil + return + } + + if tfArgument.IsMulti == "Y" { + tmpRes := []interface{}{} + for i := range resourceDataList { + tmpRes = append(tmpRes, resourceDataList[i].ResourceAssetId) + } + arg = tmpRes + } else { + arg = resourceDataList[0].ResourceAssetId + } + return +} + +func reverseConvertData(parameterData *models.ParameterTable, tfstateAttributeData *models.TfstateAttributeTable, tfstateVal interface{}, reqParam map[string]interface{}, regionData *models.ResourceDataTable) (argKey string, argVal interface{}, err error) { + argKey = parameterData.Name + if tfstateVal == nil { + return + } + relativeSourceId := tfstateAttributeData.RelativeSource + var resourceAssetIds []string + if tfstateAttributeData.IsMulti == "Y" { + tfstateAssetIds := tfstateVal.([]interface{}) + for _, v := range tfstateAssetIds { + resourceAssetIds = append(resourceAssetIds, v.(string)) + } + } else { + resourceAssetIds = append(resourceAssetIds, tfstateVal.(string)) + } + resourceAssetIdsStr := strings.Join(resourceAssetIds, "','") + sqlCmd := "SELECT * FROM resource_data WHERE resource=? AND region_id=? AND resource_asset_id IN ('" + resourceAssetIdsStr + "')" + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = "SELECT * FROM resource_data_debug WHERE resource=? AND region_id=? AND resource_asset_id IN ('" + resourceAssetIdsStr + "')" + } + + paramArgs := []interface{}{relativeSourceId, regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by resource:%s and resource_asset_id:%s error:%s", relativeSourceId, resourceAssetIdsStr, err.Error()) + log.Logger.Error("Get resource data by resource and resource_asset_id error", log.String("resource", relativeSourceId), log.String("resource_asset_id", resourceAssetIdsStr), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("Resource data can not be found by resource:%s and resource_asset_id:%s", relativeSourceId, resourceAssetIdsStr) + log.Logger.Warn("Resource data can not be found by resource and resource_asset_id", log.String("resource", relativeSourceId), log.String("resource_asset_id", resourceAssetIdsStr), log.Error(err)) + return + } + argKey = parameterData.Name + + if parameterData.Multiple == "Y" { + tmpRes := []interface{}{} + for i := range resourceDataList { + tmpRes = append(tmpRes, resourceDataList[i].ResourceId) + } + argVal = tmpRes + } else { + argVal = resourceDataList[0].ResourceId + } + return +} + +func convertTemplate(providerData *models.ProviderTable, reqParam map[string]interface{}, tfArgument *models.TfArgumentTable) (arg interface{}, err error) { + if tfArgument.Parameter == "" { + arg = tfArgument.DefaultValue + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgument.Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgument.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgument.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + parameterData := parameterList[0] + + if reqParam[parameterData.Name] == nil { + return + } + + sqlCmd = `SELECT * FROM template_value WHERE template=? AND value=?` + templateId := parameterData.Template + paramVal := reqParam[parameterData.Name].(string) + paramArgs = []interface{}{templateId, paramVal} + var templateValueList []*models.TemplateValueTable + err = x.SQL(sqlCmd, paramArgs...).Find(&templateValueList) + if err != nil { + err = fmt.Errorf("Get template_value data by template:%s and value:%s error:%s", templateId, paramVal) + log.Logger.Error("Get tempalte_value data by template and value error", log.String("template", templateId), log.String("value", paramVal), log.Error(err)) + return + } + if len(templateValueList) == 0 { + err = fmt.Errorf("Template_value can not be found by template:%s and value:%s", templateId, paramVal) + log.Logger.Warn("Template_value can not be found by template and value", log.String("template", templateId), log.String("value", paramVal), log.Error(err)) + return + } + templateValueData := templateValueList[0] + + sqlCmd = `SELECT * FROM provider_template_value WHERE template_value=? AND provider=?` + paramArgs = []interface{}{templateValueData.Id, providerData.Id} + var providerTemplateValueList []*models.ProviderTemplateValueTable + err = x.SQL(sqlCmd, paramArgs...).Find(&providerTemplateValueList) + if err != nil { + err = fmt.Errorf("Get provider_template_value data by template_value:%s and provider:%s error:%s", templateValueData.Id, providerData.Id) + log.Logger.Error("Get provider_tempalte_value data by template_value and provider error", log.String("template_value", templateValueData.Id), log.String("provider", providerData.Id), log.Error(err)) + return + } + if len(providerTemplateValueList) == 0 { + err = fmt.Errorf("Provider_template_value can not be found by template_value:%s and provider:%s", templateValueData.Id, providerData.Id) + log.Logger.Warn("Provider_template_value can not be found by template_value and provider", log.String("template_value", templateValueData.Id), log.String("provider", providerData.Id), log.Error(err)) + return + } + arg = providerTemplateValueList[0].Value + return +} + +func reverseConvertTemplate(parameterData *models.ParameterTable, providerData *models.ProviderTable, tfstateVal interface{}) (argKey string, argVal string, err error) { + argKey = parameterData.Name + if tfstateVal == nil { + return + } + sqlCmd := `SELECT t1.* FROM template_value AS t1 LEFT JOIN provider_template_value AS t2 ON t1.id=t2.template_value WHERE t2.provider=? AND t2.value=?` + paramArgs := []interface{}{providerData.Id, tfstateVal} + var templateValueList []*models.TemplateValueTable + err = x.SQL(sqlCmd, paramArgs...).Find(&templateValueList) + if err != nil { + err = fmt.Errorf("Get template_value data by provider:%s and tfstateValue:%s error:%s", providerData.Id, tfstateVal) + log.Logger.Error("Get tempalte_value data by provider and tfstateValue error", log.String("provider", providerData.Id), log.String("tfstateValue", tfstateVal.(string)), log.Error(err)) + return + } + if len(templateValueList) == 0 { + err = fmt.Errorf("Template_value can not be found by provider:%s and tfstateValue:%s", providerData.Id, tfstateVal) + log.Logger.Warn("Template_value can not be found by provider and tfstateValue", log.String("provider", providerData.Id), log.String("tfstateValue", tfstateVal.(string)), log.Error(err)) + return + } + templateValueData := templateValueList[0] + argKey = parameterData.Name + argVal = templateValueData.Value + return +} + +func convertAttr(tfArgumentData *models.TfArgumentTable, reqParam map[string]interface{}, regionData *models.ResourceDataTable, tfArgument *models.TfArgumentTable, sourceData *models.SourceTable) (arg interface{}, err error) { + if tfArgument.Parameter == "" { + // arg = tfArgument.DefaultValue + if sourceData.SourceType == "data_resource" { + if _, ok := reqParam[models.SimulateResourceData]; ok { + /* + curSimulateResourceData := reqParam[models.SimulateResourceData].(map[string][]map[string]interface{}) + + tmpRes := []interface{}{} + relativeSourceId := tfArgumentData.RelativeSource + resourceDatas := curSimulateResourceData[relativeSourceId] + for i := range resourceDatas { + tmpData := resourceDatas[i]["tfstateFile"].(map[string]interface{}) + assetIdAttribute := resourceDatas[i]["assetIdAttribute"].(string) + if _, ok := tmpData[assetIdAttribute]; ok { + tmpRes = append(tmpRes, tmpData[assetIdAttribute]) + } + } + if len(tmpRes) > 0 { + if tfArgument.IsMulti == "Y" { + arg = tmpRes + } else { + arg = tmpRes[0] + } + } + */ + return + } + } + + // get data from resource_data + sqlCmd := `SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=?` + } + // resourceId: 来源 param 先判断 + relativeSourceId := tfArgumentData.RelativeSource + resourceId := reqParam[models.ResourceIdDataConvert].(string) + paramArgs := []interface{}{relativeSourceId, resourceId, regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by resource:%s and resource_id:%s error: %s", relativeSourceId, resourceId, err.Error()) + log.Logger.Error("Get resource data by resource and resource_id error", log.String("resource", relativeSourceId), log.String("resource_id", resourceId), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("ResourceData can not be found by resource:%s and resource_id:%s", relativeSourceId, resourceId) + log.Logger.Warn("ResourceData can not be found by resource and resource_id", log.String("resource", relativeSourceId), log.String("resource_id", resourceId), log.Error(err)) + return + } + resourceData := resourceDataList[0] + + sqlCmd = `SELECT * FROM tfstate_attribute WHERE id=?` + paramArgs = []interface{}{tfArgumentData.RelativeTfstateAttribute} + var tfstateAttirbuteList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tfstateAttirbuteList) + if err != nil { + err = fmt.Errorf("Get tfstateAttribute data by id:%s error: %s", tfArgumentData.RelativeTfstateAttribute, err.Error()) + log.Logger.Error("Get tfstateAttribute data by id error", log.String("id", tfArgumentData.RelativeTfstateAttribute), log.Error(err)) + return + } + if len(tfstateAttirbuteList) == 0 { + err = fmt.Errorf("TfstateAttribute data can not be found by id:%s", tfArgumentData.RelativeTfstateAttribute) + log.Logger.Warn("TfstateAttribute data can not be found by id", log.String("id", tfArgumentData.RelativeTfstateAttribute), log.Error(err)) + return + } + tfstateAttirbuteData := tfstateAttirbuteList[0] + + tfstateFileData := resourceData.TfStateFile + var unmarshalTfstateFileData models.TfstateFileData + err = json.Unmarshal([]byte(tfstateFileData), &unmarshalTfstateFileData) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + return + } + var tfstateFileAttributes map[string]interface{} + tfstateFileAttributes = unmarshalTfstateFileData.Resources[0].Instances[0].Attributes + arg = tfstateFileAttributes[tfstateAttirbuteData.Name] + /* + if tfArgument.IsMulti == "Y" { + tmpRes := []interface{}{} + for i := range resourceDataList { + tmpRes = append(tmpRes, resourceDataList[i].ResourceAssetId) + } + arg = tmpRes + } else { + arg = resourceDataList[0].ResourceAssetId + } + + */ + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgument.Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgument.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgument.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + parameterData := parameterList[0] + + if reqParam[parameterData.Name] == nil { + return + } + + relativeResourceIds := []string{} + if parameterData.Multiple == "Y" { + tmpData := reqParam[parameterData.Name].([]interface{}) + for i := range tmpData { + relativeResourceIds = append(relativeResourceIds, tmpData[i].(string)) + } + } else { + relativeResourceIds = append(relativeResourceIds, reqParam[parameterData.Name].(string)) + } + + sqlCmd = `SELECT * FROM tfstate_attribute WHERE id=?` + paramArgs = []interface{}{tfArgumentData.RelativeTfstateAttribute} + var tfstateAttirbuteList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tfstateAttirbuteList) + if err != nil { + err = fmt.Errorf("Get tfstateAttribute data by id:%s error: %s", tfArgumentData.RelativeTfstateAttribute, err.Error()) + log.Logger.Error("Get tfstateAttribute data by id error", log.String("id", tfArgumentData.RelativeTfstateAttribute), log.Error(err)) + return + } + if len(tfstateAttirbuteList) == 0 { + err = fmt.Errorf("TfstateAttribute data can not be found by id:%s", tfArgumentData.RelativeTfstateAttribute) + log.Logger.Warn("TfstateAttribute data can not be found by id", log.String("id", tfArgumentData.RelativeTfstateAttribute), log.Error(err)) + return + } + tfstateAttirbuteData := tfstateAttirbuteList[0] + + result := []interface{}{} + for i := range relativeResourceIds { + sqlCmd := `SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=?` + } + + paramArgs := []interface{}{tfArgumentData.RelativeSource, relativeResourceIds[i], regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by source:%s and resource_id:%s error: %s", tfArgumentData.RelativeSource, relativeResourceIds[i], err.Error()) + log.Logger.Error("Get resource data by source and resource_id error", log.String("source", tfArgumentData.RelativeSource), log.String("resource_id", relativeResourceIds[i]), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("ResourceData can not be found by source:%s and resource_id:%s", tfArgumentData.RelativeSource, relativeResourceIds[i]) + log.Logger.Warn("ResourceData can not be found by source and resource_id", log.String("source", tfArgumentData.RelativeSource), log.String("resource_id", relativeResourceIds[i]), log.Error(err)) + return + } + resourceData := resourceDataList[0] + + tfstateFileData := resourceData.TfStateFile + var unmarshalTfstateFileData models.TfstateFileData + err = json.Unmarshal([]byte(tfstateFileData), &unmarshalTfstateFileData) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + return + } + var tfstateFileAttributes map[string]interface{} + tfstateFileAttributes = unmarshalTfstateFileData.Resources[0].Instances[0].Attributes + result = append(result, tfstateFileAttributes[tfstateAttirbuteData.Name]) + } + + if tfArgumentData.IsMulti == "Y" { + tmpRes := []interface{}{} + for i := range result { + tmpRes = append(tmpRes, result[i]) + } + arg = tmpRes + } else { + arg = result[0] + } + return +} + +func reverseConvertAttr(parameterData *models.ParameterTable, tfstateAttributeData *models.TfstateAttributeTable, tfstateVal interface{}, reqParam map[string]interface{}, regionData *models.ResourceDataTable) (argKey string, argVal interface{}, err error) { + argKey = parameterData.Name + if tfstateVal == nil { + return + } + relativeAssetVals := []string{} + if tfstateAttributeData.IsMulti == "Y" { + tmpData := tfstateVal.([]interface{}) + for i := range tmpData { + relativeAssetVals = append(relativeAssetVals, tmpData[i].(string)) + } + } else { + relativeAssetVals = append(relativeAssetVals, tfstateVal.(string)) + } + + relativeAssetValMap := make(map[string]bool) + for _, v := range relativeAssetVals { + relativeAssetValMap[v] = true + } + + sqlCmd := `SELECT * FROM tfstate_attribute WHERE id=?` + paramArgs := []interface{}{tfstateAttributeData.RelativeTfstateAttribute} + var tfstateAttirbuteList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tfstateAttirbuteList) + if err != nil { + err = fmt.Errorf("Get tfstateAttribute data by id:%s error: %s", tfstateAttributeData.RelativeTfstateAttribute, err.Error()) + log.Logger.Error("Get tfstateAttribute data by id error", log.String("id", tfstateAttributeData.RelativeTfstateAttribute), log.Error(err)) + return + } + if len(tfstateAttirbuteList) == 0 { + err = fmt.Errorf("TfstateAttribute data can not be found by id:%s", tfstateAttributeData.RelativeTfstateAttribute) + log.Logger.Warn("TfstateAttribute data can not be found by id", log.String("id", tfstateAttributeData.RelativeTfstateAttribute), log.Error(err)) + return + } + relativeTfstateAttirbuteData := tfstateAttirbuteList[0] + + result := []interface{}{} + sqlCmd = `SELECT * FROM resource_data WHERE resource=? AND region_id=?` + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = `SELECT * FROM resource_data_debug WHERE resource=? AND region_id=?` + } + + paramArgs = []interface{}{tfstateAttributeData.RelativeSource, regionData.RegionId} + var resourceDataList []*models.ResourceDataTable + err = x.SQL(sqlCmd, paramArgs...).Find(&resourceDataList) + if err != nil { + err = fmt.Errorf("Get resource data by source:%s error: %s", tfstateAttributeData.RelativeSource, err.Error()) + log.Logger.Error("Get resource data by source error", log.String("source", tfstateAttributeData.RelativeSource), log.Error(err)) + return + } + if len(resourceDataList) == 0 { + err = fmt.Errorf("ResourceData can not be found by source:%s", tfstateAttributeData.RelativeSource) + log.Logger.Warn("ResourceData can not be found by source", log.String("source", tfstateAttributeData.RelativeSource), log.Error(err)) + return + } + + for _, resourceData := range resourceDataList { + tfstateFileData := resourceData.TfStateFile + var unmarshalTfstateFileData models.TfstateFileData + err = json.Unmarshal([]byte(tfstateFileData), &unmarshalTfstateFileData) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + continue + } + var tfstateFileAttributes map[string]interface{} + tfstateFileAttributes = unmarshalTfstateFileData.Resources[0].Instances[0].Attributes + if _, ok := relativeAssetValMap[tfstateFileAttributes[relativeTfstateAttirbuteData.Name].(string)]; ok { + result = append(result, resourceData.ResourceId) + delete(relativeAssetValMap, tfstateFileAttributes[relativeTfstateAttirbuteData.Name].(string)) + } + } + + argKey = parameterData.Name + if parameterData.Multiple == "Y" { + tmpRes := []interface{}{} + for i := range result { + tmpRes = append(tmpRes, result[i].(string)) + } + argVal = tmpRes + } else { + argVal = result[0] + } + return +} + +func convertContextData(tfArgumentData *models.TfArgumentTable, reqParam map[string]interface{}, regionData *models.ResourceDataTable, tfArgument *models.TfArgumentTable, sourceData *models.SourceTable) (arg interface{}, isDiscard bool, err error) { + if tfArgument.Parameter == "" { + arg = tfArgument.DefaultValue + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgument.Parameter} + var tmpparameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tmpparameterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgument.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + if len(tmpparameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgument.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + // parameterData := tmpparameterList[0] + isDiscard = false + + // Get relative parameter + sqlCmd = `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfArgumentData.RelativeParameter + paramArgs = []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + + if reqParam[relativeParameterData.Name] == nil { + return + } + + if reqParam[relativeParameterData.Name].(string) == tfArgumentData.RelativeParameterValue { + arg, err = convertData(tfArgumentData.RelativeSource, reqParam, regionData, tfArgument, sourceData) + } else { + isDiscard = true + } + return +} + +func reverseConvertContextData(parameterData *models.ParameterTable, + tfstateAttributeData *models.TfstateAttributeTable, + tfstateVal interface{}, + outPutArgs map[string]interface{}, + reqParam map[string]interface{}, + regionData *models.ResourceDataTable) (argKey string, argVal interface{}, isDiscard bool, err error) { + + argKey = parameterData.Name + if tfstateVal == nil { + return + } + isDiscard = false + + // Get relative parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfstateAttributeData.RelativeParameter + paramArgs := []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if outPutArgs[relativeParameterData.Name] == nil { + return + } + if outPutArgs[relativeParameterData.Name].(string) == tfstateAttributeData.RelativeParameterValue { + argKey, argVal, err = reverseConvertData(parameterData, tfstateAttributeData, tfstateVal, reqParam, regionData) + } else { + isDiscard = true + } + return +} + +func convertContextDirect(tfArgumentData *models.TfArgumentTable, reqParam map[string]interface{}, regionData *models.ResourceDataTable) (arg interface{}, isDiscard bool, err error) { + if tfArgumentData.Parameter == "" { + arg = tfArgumentData.DefaultValue + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgumentData.Parameter} + var tmpparameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tmpparameterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgumentData.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgumentData.Parameter), log.Error(err)) + return + } + if len(tmpparameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgumentData.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgumentData.Parameter), log.Error(err)) + return + } + // parameterData := tmpparameterList[0] + isDiscard = false + + // Get relative parameter + sqlCmd = `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfArgumentData.RelativeParameter + paramArgs = []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if reqParam[relativeParameterData.Name] == nil { + return + } + if reqParam[relativeParameterData.Name].(string) == tfArgumentData.RelativeParameterValue { + arg, err = convertDirect(tfArgumentData.DefaultValue, reqParam, tfArgumentData) + } else { + isDiscard = true + } + return +} + +func reverseConvertContextDirect(parameterData *models.ParameterTable, + tfstateAttributeData *models.TfstateAttributeTable, + tfstateVal interface{}, + outPutArgs map[string]interface{}, + reqParam map[string]interface{}, + regionData *models.ResourceDataTable) (argKey string, argVal interface{}, isDiscard bool, err error) { + + argKey = parameterData.Name + if tfstateVal == nil { + return + } + isDiscard = false + + // Get relative parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfstateAttributeData.RelativeParameter + paramArgs := []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if outPutArgs[relativeParameterData.Name] == nil { + return + } + if outPutArgs[relativeParameterData.Name].(string) == tfstateAttributeData.RelativeParameterValue { + argKey, argVal, err = reverseConvertDirect(parameterData, tfstateAttributeData, tfstateVal) + } else { + isDiscard = true + } + return +} + +func convertContextAttr(tfArgumentData *models.TfArgumentTable, reqParam map[string]interface{}, regionData *models.ResourceDataTable, sourceData *models.SourceTable) (arg interface{}, isDiscard bool, err error) { + if tfArgumentData.Parameter == "" { + arg = tfArgumentData.DefaultValue + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgumentData.Parameter} + var tmpparameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tmpparameterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgumentData.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgumentData.Parameter), log.Error(err)) + return + } + if len(tmpparameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgumentData.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgumentData.Parameter), log.Error(err)) + return + } + // parameterData := tmpparameterList[0] + isDiscard = false + + // Get relative parameter + sqlCmd = `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfArgumentData.RelativeParameter + paramArgs = []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if reqParam[relativeParameterData.Name] == nil { + return + } + if reqParam[relativeParameterData.Name].(string) == tfArgumentData.RelativeParameterValue { + arg, err = convertAttr(tfArgumentData, reqParam, regionData, tfArgumentData, sourceData) + } else { + isDiscard = true + } + return +} + +func reverseConvertContextAttr(parameterData *models.ParameterTable, + tfstateAttributeData *models.TfstateAttributeTable, + tfstateVal interface{}, + outPutArgs map[string]interface{}, + reqParam map[string]interface{}, + regionData *models.ResourceDataTable) (argKey string, argVal interface{}, isDiscard bool, err error) { + + argKey = parameterData.Name + if tfstateVal == nil { + return + } + isDiscard = false + + // Get relative parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfstateAttributeData.RelativeParameter + paramArgs := []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if outPutArgs[relativeParameterData.Name] == nil { + return + } + if outPutArgs[relativeParameterData.Name].(string) == tfstateAttributeData.RelativeParameterValue { + argKey, argVal, err = reverseConvertAttr(parameterData, tfstateAttributeData, tfstateVal, reqParam, regionData) + } else { + isDiscard = true + } + return +} + +func convertContextTemplate(tfArgumentData *models.TfArgumentTable, reqParam map[string]interface{}, regionData *models.ResourceDataTable, providerData *models.ProviderTable) (arg interface{}, isDiscard bool, err error) { + if tfArgumentData.Parameter == "" { + arg = tfArgumentData.DefaultValue + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgumentData.Parameter} + var tmpparameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tmpparameterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgumentData.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgumentData.Parameter), log.Error(err)) + return + } + if len(tmpparameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgumentData.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgumentData.Parameter), log.Error(err)) + return + } + // parameterData := tmpparameterList[0] + isDiscard = false + + // Get relative parameter + sqlCmd = `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfArgumentData.RelativeParameter + paramArgs = []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if reqParam[relativeParameterData.Name] == nil { + return + } + if reqParam[relativeParameterData.Name].(string) == tfArgumentData.RelativeParameterValue { + arg, err = convertTemplate(providerData, reqParam, tfArgumentData) + } else { + isDiscard = true + } + return +} + +func reverseConvertContextTemplate(parameterData *models.ParameterTable, + tfstateAttributeData *models.TfstateAttributeTable, + tfstateVal interface{}, + outPutArgs map[string]interface{}, + reqParam map[string]interface{}, + regionData *models.ResourceDataTable, + providerData *models.ProviderTable) (argKey string, argVal interface{}, isDiscard bool, err error) { + + argKey = parameterData.Name + if tfstateVal == nil { + return + } + isDiscard = false + + // Get relative parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + relativeParameterId := tfstateAttributeData.RelativeParameter + paramArgs := []interface{}{relativeParameterId} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get parameter data by id:%s error:%s", relativeParameterId) + log.Logger.Error("Get parameter data by id error", log.String("id", relativeParameterId), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter can not be found by id:%s", relativeParameterId) + log.Logger.Warn("Parameter can not be found by id", log.String("id", relativeParameterId), log.Error(err)) + return + } + relativeParameterData := parameterList[0] + if outPutArgs[relativeParameterData.Name] == nil { + return + } + if outPutArgs[relativeParameterData.Name].(string) == tfstateAttributeData.RelativeParameterValue { + argKey, argVal, err = reverseConvertTemplate(parameterData, providerData, tfstateVal) + } else { + isDiscard = true + } + return +} + +func convertDirect(defaultValue string, reqParam map[string]interface{}, tfArgument *models.TfArgumentTable) (arg interface{}, err error) { + if tfArgument.Parameter == "" { + arg = tfArgument.DefaultValue + return + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgument.Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgument.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgument.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + parameterData := parameterList[0] + + var reqArg interface{} + if _, ok := reqParam[parameterData.Name]; !ok { + return + } else if reqParam[parameterData.Name] == nil { + reqArg = tfArgument.DefaultValue + } else { + reqArg = reqParam[parameterData.Name] + } + + /* + if parameterData.DataType == "string" { + if parameterData.Multiple == "N" { + tmpInputArg := reqArg + if tfArgument.IsMulti == "Y" { + arg = []interface{}{tmpInputArg} + } else { + arg = tmpInputArg + } + } else { + tmpInputArg := reqArg.([]interface{}) + if tfArgument.IsMulti == "Y" { + arg = tmpInputArg + } else { + arg = tmpInputArg[0] + } + } + } else if parameterData.DataType == "int" { + /* + if parameterData.Multiple == "N" { + tmpVal, _ := strconv.ParseFloat(fmt.Sprintf("%v", reqArg), 64) + arg = tmpVal + } else { + curArg := reqArg.([]interface{}) + curRes := []float64{} + for i := range curArg { + tmpVal, _ := strconv.ParseFloat(fmt.Sprintf("%v", curArg[i]), 64) + curRes = append(curRes, tmpVal) + } + arg = curRes + } + */ + /* + if parameterData.Multiple == "N" { + tmpInputArg := reqArg + if tfArgument.IsMulti == "Y" { + arg = []interface{}{tmpInputArg} + } else { + arg = tmpInputArg + } + } else { + tmpInputArg := reqArg.([]interface{}) + if tfArgument.IsMulti == "Y" { + arg = tmpInputArg + } else { + arg = tmpInputArg[0] + } + } + } else*/ + if parameterData.DataType == "object" { + if parameterData.Multiple == "N" { + var curArg map[string]interface{} + tmpMarshal, _ := json.Marshal(reqParam[parameterData.Name]) + json.Unmarshal(tmpMarshal, &curArg) + + if tfArgument.IsMulti == "N" { + arg = curArg + } else { + arg = []map[string]interface{}{curArg} + } + } else { + var curArg []map[string]interface{} + tmpMarshal, _ := json.Marshal(reqParam[parameterData.Name]) + json.Unmarshal(tmpMarshal, &curArg) + if tfArgument.IsMulti == "Y" { + arg = curArg + } else { + arg = curArg[0] + } + } + } else { + // arg = reqParam[parameterData.Name] + if parameterData.Multiple == "N" { + tmpInputArg := reqArg + if tfArgument.IsMulti == "Y" { + arg = []interface{}{tmpInputArg} + } else { + arg = tmpInputArg + } + } else { + tmpInputArg := reqArg.([]interface{}) + if tfArgument.IsMulti == "Y" { + arg = tmpInputArg + } else { + arg = tmpInputArg[0] + } + } + } + + if tfArgument.DefaultValue == models.RandomFlag && (arg == nil || arg == "") { + randomVal := guid.CreateGuid() + arg = randomVal[:16] + reqParam[parameterData.Name] = arg + } else { + if _, ok := arg.(string); ok && arg.(string) == models.RandomFlag { + randomVal := guid.CreateGuid() + arg = randomVal[:16] + reqParam[parameterData.Name] = arg + } + } + + /* + if parameterData.DataType == "string" && reqArg.(string) == "null" { + arg = "null" + } else if parameterData.DataType == "string" && reqArg.(string) == "" || parameterData.DataType == "int" && reqArg.(float64) == 0 { + arg = defaultValue + } else { + if parameterData.DataType == "object" { + if parameterData.Multiple == "N" { + var curArg map[string]interface{} + tmpMarshal, _ := json.Marshal(reqParam[parameterData.Name]) + json.Unmarshal(tmpMarshal, &curArg) + arg = curArg + } else { + var curArg []map[string]interface{} + tmpMarshal, _ := json.Marshal(reqParam[parameterData.Name]) + json.Unmarshal(tmpMarshal, &curArg) + arg = curArg + } + } else { + arg = reqParam[parameterData.Name] + } + } + */ + return +} + +func convertFunction(tfArgumentData *models.TfArgumentTable, reqParam map[string]interface{}, tfArgument *models.TfArgumentTable) (arg interface{}, err error) { + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgument.Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgument.Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgument.Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgument.Parameter), log.Error(err)) + return + } + // parameterData := parameterList[0] + return +} + +func reverseConvertFunction(parameterData *models.ParameterTable, tfstateAttributeData *models.TfstateAttributeTable, tfstateVal interface{}) (argKey string, argVal interface{}, err error) { + argKey = parameterData.Name + if tfstateVal == nil { + return + } + functionDefine := tfstateAttributeData.FunctionDefine + var functionDefineData models.FunctionDefine + json.Unmarshal([]byte(functionDefine), &functionDefineData) + + resultIdx := -1 + if functionDefineData.Return != "result" { + idxStrStart := strings.Index(functionDefineData.Return, "[") + idxStrEnd := strings.Index(functionDefineData.Return, "]") + if idxStrStart == -1 || idxStrEnd == -1 || idxStrStart >= idxStrEnd { + argKey = parameterData.Name + err = fmt.Errorf("The function_define return_value: %s of tfstateAttribute:%s config error", functionDefineData.Return, tfstateAttributeData.Name) + log.Logger.Error("The function_define return_value of tfstateAttribute config error", log.String("return_value", functionDefineData.Return), log.String("tfstateAttribute", tfstateAttributeData.Name), log.Error(err)) + return + } + resultIdx, _ = strconv.Atoi(functionDefineData.Return[idxStrStart+1 : idxStrEnd]) + } + + var result []interface{} + if tfstateAttributeData.Type == "object" { + handleTfstateVals := []map[string]interface{}{} + if tfstateAttributeData.IsMulti == "Y" { + tmpData := tfstateVal.([]map[string]interface{}) + for i := range tmpData { + handleTfstateVals = append(handleTfstateVals, tmpData[i]) + } + } else { + handleTfstateVals = append(handleTfstateVals, tfstateVal.(map[string]interface{})) + } + + if functionDefineData.Function == models.FunctionConvertFunctionDefineName["Remove"] { + for _, val := range handleTfstateVals { + removeResult := []map[string]string{} + removeKeys := functionDefineData.Args.RemoveKey + for i := range removeKeys { + tmpVal := make(map[string]string) + for k, v := range val { + tmpVal[k] = v.(string) + } + delete(tmpVal, removeKeys[i]) + removeResult = append(removeResult, tmpVal) + } + result = append(result, removeResult[0]) + } + } + } else { + handleTfstateVals := []string{} + if tfstateAttributeData.IsMulti == "Y" { + tmpData := tfstateVal.([]interface{}) + for i := range tmpData { + handleTfstateVals = append(handleTfstateVals, tmpData[i].(string)) + } + } else { + handleTfstateVals = append(handleTfstateVals, tfstateVal.(string)) + } + if functionDefineData.Function == models.FunctionConvertFunctionDefineName["Split"] { + for _, tfstateValStr := range handleTfstateVals { + splitResult := [][]string{} + splitChars := functionDefineData.Args.SplitChar + for i := range splitChars { + curResult := strings.Split(tfstateValStr, splitChars[i]) + if len(curResult) < 2 || curResult[1] == "" { + curResult = append(curResult, curResult[0]) + } + splitResult = append(splitResult, curResult) + } + if resultIdx == -1 { + result = append(result, splitResult[0]) + } else { + result = append(result, splitResult[0][resultIdx]) + } + } + } else if functionDefineData.Function == models.FunctionConvertFunctionDefineName["Replace"] { + for _, tfstateValStr := range handleTfstateVals { + replaceResult := []string{} + replaceVals := functionDefineData.Args.ReplaceVal + for i := range replaceVals { + for old, new := range replaceVals[i] { + curResult := strings.Replace(tfstateValStr, old, new, -1) + replaceResult = append(replaceResult, curResult) + } + } + if resultIdx == -1 { + result = append(result, replaceResult[0]) + } else { + result = append(result, replaceResult[0][resultIdx]) + } + } + } else if functionDefineData.Function == models.FunctionConvertFunctionDefineName["Regx"] { + for _, tfstateValStr := range handleTfstateVals { + regxResult := [][]string{} + regExprs := functionDefineData.Args.RegExp + for i := range regExprs { + regExp := regexp.MustCompile(regExprs[i]) + curResult := regExp.FindStringSubmatch(tfstateValStr) + // the first one is the original str + curResult = curResult[1:] + regxResult = append(regxResult, curResult) + } + if resultIdx == -1 { + result = append(result, regxResult[0]) + } else { + result = append(result, regxResult[0][resultIdx]) + } + } + } else { + err = fmt.Errorf("The function_define:%s of tfstateAttribute:%s config error", functionDefine, tfstateAttributeData.Name) + log.Logger.Error("The function_define of tfstateAttribute config error", log.String("function_define", functionDefine), log.String("tfstateAttribute", tfstateAttributeData.Name), log.Error(err)) + return + } + } + argKey = parameterData.Name + if parameterData.Multiple == "Y" { + tmpRes := []interface{}{} + for i := range result { + tmpRes = append(tmpRes, result[i]) + } + argVal = tmpRes + } else { + argVal = result[0] + } + return +} + +func reverseConvertDirect(parameterData *models.ParameterTable, tfstateAttributeData *models.TfstateAttributeTable, tfstateVal interface{}) (argKey string, argVal interface{}, err error) { + argKey = parameterData.Name + if tfstateVal == nil { + return + } + /* + if tfstateAttributeData.IsMulti == "Y" { + tmpRes := []interface{}{} + result := tfstateVal.([]interface{}) + for i := range result { + tmpRes = append(tmpRes, result[i]) + } + argVal = tmpRes + } else { + argVal = tfstateVal + } + */ + var result []interface{} + if tfstateAttributeData.IsMulti == "Y" { + result = tfstateVal.([]interface{}) + } else { + result = append(result, tfstateVal) + } + + if parameterData.Multiple == "Y" { + tmpRes := []interface{}{} + for i := range result { + tmpRes = append(tmpRes, result[i]) + } + argVal = tmpRes + } else { + argVal = result[0] + } + + //if tfstateAttributeData.ObjectName != "" { + // relativeTfstateAttr := tfstateAttrIdMap[tfstateAttributeData.ObjectName] + // if relativeTfstateAttr.Type == "object" { + // argVal = tfstateFileAttributes[argKey] + // } + //} + return +} + +func handleReverseConvert(outPutParameterNameMap map[string]*models.ParameterTable, + outPutParameterIdMap map[string]*models.ParameterTable, + tfstateAttrParamMap map[string]*models.TfstateAttributeTable, + tfstateAttrNameMap map[string]*models.TfstateAttributeTable, + tfstateAttrIdMap map[string]*models.TfstateAttributeTable, + reqParam map[string]interface{}, + providerData *models.ProviderTable, + tfstateFileAttributes map[string]interface{}, + action string, + parentObjectName string, + tfstateAttributeList []*models.TfstateAttributeTable, + paramCnt *int, + regionData *models.ResourceDataTable) (outPutArgs map[string]interface{}, err error) { + + var errorTfstateAttr *models.TfstateAttributeTable + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("HandleReverseConvert error, TfstateAttr:%s convert error:%v", errorTfstateAttr.Name, r) + } + }() + + outPutArgs = make(map[string]interface{}) + curLevelResult := make(map[string]interface{}) + + // 循环遍历每个 tfstateAttribute 进行 reverseConvert 生成输出参数 + for _, tfstateAttr := range tfstateAttributeList { + errorTfstateAttr = tfstateAttr + if tfstateAttr.ObjectName == parentObjectName { + // handle current level tfstateAttribute + // if tfstateAttr.Type == "object" && tfstateAttr.Name != "tags" { + if tfstateAttr.Type == "object" { + // go into next level + var curTfstateFileAttributes []interface{} + var curAttributesRet []interface{} + if tfstateAttr.IsMulti == "Y" { + tmpData := tfstateFileAttributes[tfstateAttr.Name].([]interface{}) + for _, v := range tmpData { + curTfstateFileAttributes = append(curTfstateFileAttributes, v) + } + } else { + curTfstateFileAttributes = append(curTfstateFileAttributes, tfstateFileAttributes[tfstateAttr.Name]) + } + for i := range curTfstateFileAttributes { + var tmpCurTfstateFileAttributes map[string]interface{} + tmpMarshal, _ := json.Marshal(curTfstateFileAttributes[i]) + json.Unmarshal(tmpMarshal, &tmpCurTfstateFileAttributes) + ret, tmpErr := handleReverseConvert(outPutParameterNameMap, + outPutParameterIdMap, + tfstateAttrParamMap, + tfstateAttrNameMap, + tfstateAttrIdMap, + reqParam, + providerData, + tmpCurTfstateFileAttributes, + action, + tfstateAttr.Id, + tfstateAttributeList, + paramCnt, + regionData) + if tmpErr != nil { + err = fmt.Errorf("Reverse convert tfstateAttr:%s error:%s", tfstateAttr.Name, err.Error()) + log.Logger.Error("Revese convert tfstateAttr error", log.String("tfstateAttr", tfstateAttr.Name), log.Error(err)) + return + } + curAttributesRet = append(curAttributesRet, ret) + } + /* + if tfstateAttr.ConvertWay == "" { + *paramCnt += 1 + outPutArgs[models.TerraformOutPutPrefix+strconv.Itoa(*paramCnt)] = curAttributesRet + } else { + outPutArgs[outPutParameterIdMap[tfstateAttr.Parameter].Name] = curAttributesRet + } + */ + if tfstateAttr.Parameter == "" || tfstateAttr.Name == "tags" { + *paramCnt += 1 + outPutArgs[models.TerraformOutPutPrefix+strconv.Itoa(*paramCnt)] = curAttributesRet + } else { + outPutArgs[outPutParameterIdMap[tfstateAttr.Parameter].Name] = curAttributesRet + } + + if tfstateAttr.Name != "tags" { + continue + } + } /*else {*/ + curParamData := outPutParameterIdMap[tfstateAttr.Parameter] + if curParamData == nil { + continue + } + if tfstateOutParamVal, ok := tfstateFileAttributes[tfstateAttr.Name]; ok { + convertWay := tfstateAttr.ConvertWay + var outArgKey string + var outArgVal interface{} + var isDiscard = false + switch convertWay { + case models.ConvertWay["Data"]: + outArgKey, outArgVal, err = reverseConvertData(curParamData, tfstateAttr, tfstateOutParamVal, reqParam, regionData) + case models.ConvertWay["Template"]: + if tfstateAttr.DefaultValue != "" { + tfstateOutParamVal = tfstateAttr.DefaultValue + } + outArgKey, outArgVal, err = reverseConvertTemplate(curParamData, providerData, tfstateOutParamVal) + case models.ConvertWay["Attr"]: + outArgKey, outArgVal, err = reverseConvertAttr(curParamData, tfstateAttr, tfstateOutParamVal, reqParam, regionData) + case models.ConvertWay["ContextData"]: + outArgKey, outArgVal, isDiscard, err = reverseConvertContextData(curParamData, tfstateAttr, tfstateOutParamVal, curLevelResult, reqParam, regionData) + case models.ConvertWay["Direct"]: + outArgKey, outArgVal, err = reverseConvertDirect(curParamData, tfstateAttr, tfstateOutParamVal) + // outArgKey, outArgVal, err = curParamData.Name, tfstateOutParamVal, nil + case models.ConvertWay["Function"]: + outArgKey, outArgVal, err = reverseConvertFunction(curParamData, tfstateAttr, tfstateOutParamVal) + case models.ConvertWay["ContextDirect"]: + outArgKey, outArgVal, isDiscard, err = reverseConvertContextDirect(curParamData, tfstateAttr, tfstateOutParamVal, curLevelResult, reqParam, regionData) + case models.ConvertWay["ContextAttr"]: + outArgKey, outArgVal, isDiscard, err = reverseConvertContextAttr(curParamData, tfstateAttr, tfstateOutParamVal, curLevelResult, reqParam, regionData) + case models.ConvertWay["ContextTemplate"]: + outArgKey, outArgVal, isDiscard, err = reverseConvertContextTemplate(curParamData, tfstateAttr, tfstateOutParamVal, curLevelResult, reqParam, regionData, providerData) + default: + err = fmt.Errorf("The convertWay:%s of tfstateAttribute:%s is invalid", convertWay, tfstateAttr.Name) + log.Logger.Error("The convertWay of tfstateAttribute is invalid", log.String("convertWay", convertWay), log.String("tfstateAttribute", tfstateAttr.Name), log.Error(err)) + return + } + if isDiscard { + continue + } + + if action == "query" { + if outArgVal == nil || outArgVal == "" { + err = nil + } + if outArgKey == "" { + continue + } + } + + if err != nil { + err = fmt.Errorf("TfstateAttr:%s Reverse convert parameter:%s error:%s", tfstateAttr.Name, curParamData.Name, err.Error()) + log.Logger.Error("Revese convert parameter error", log.String("tfstateAttrName", tfstateAttr.Name), log.String("parameter", curParamData.Name), log.Error(err)) + return + } + + // check outArg type, string -> int + /* + if _, ok := outArgVal.(string); ok { + if curParamData.DataType == "int" { + // tmpVal, _ := strconv.Atoi(outArgVal.(string)) + // tmpVal := outArgVal.(float64) + tmpVal, _ := strconv.ParseFloat(fmt.Sprintf("%v", outArgVal), 64) + outArgVal = tmpVal + } + } + */ + if curParamData.DataType == "int" { + if tfstateAttr.IsMulti == "Y" { + if tmpVal, ok := outArgVal.([]string); ok { + tmpRes := []float64{} + for i := range tmpVal { + tmpRet, _ := strconv.ParseFloat(fmt.Sprintf("%v", tmpVal[i]), 64) + tmpRes = append(tmpRes, tmpRet) + } + outArgVal = tmpRes + } + } else { + if tmpVal, ok := outArgVal.(string); ok { + tmpRes, _ := strconv.ParseFloat(fmt.Sprintf("%v", tmpVal), 64) + outArgVal = tmpRes + } + } + } else if curParamData.DataType == "string" { + if tfstateAttr.IsMulti == "Y" { + if tmpVal, ok := outArgVal.([]float64); ok { + tmpRes := []string{} + for i := range tmpVal { + tmpRes = append(tmpRes, fmt.Sprintf("%.0f", tmpVal[i])) + } + } + } else { + if tmpVal, ok := outArgVal.(float64); ok { + outArgVal = fmt.Sprintf("%.0f", tmpVal) + } + } + } + + // merger the tfstateAttributeVal if they have the same name + if _, ok := outPutArgs[outArgKey]; ok { + if _, ok := outPutArgs[outArgKey].([]interface{}); ok { + tmpData := outPutArgs[outArgKey].([]interface{}) + tmpData = append(tmpData, outArgVal) + outPutArgs[outArgKey] = tmpData + } else { + tmpData := []interface{}{outPutArgs[outArgKey]} + tmpData = append(tmpData, outArgVal) + outPutArgs[outArgKey] = tmpData + } + } else { + outPutArgs[outArgKey] = outArgVal + } + curLevelResult[outArgKey] = outArgVal + } else { + outPutArgs[curParamData.Name] = "" + } + //} + } else { + continue + } + } + return +} + +func getSortedSourceList(sourceList []*models.SourceTable, interfaceData *models.InterfaceTable, providerData *models.ProviderTable) (sortedSourceList []*models.SourceTable, err error) { + sortedSourceListIdMap := make(map[string]bool) + if len(sourceList) == 1 { + sortedSourceList = append(sortedSourceList, sourceList[0]) + return + } else { + // get the first batch sourceListId + // sqlCmd := `SELECT DISTINCT(source) FROM tf_argument WHERE source NOT IN (SELECT id FROM source WHERE interface=? AND provider=?) AND (parameter is null AND relative_source is null)` + sqlCmd := `SELECT DISTINCT(source) FROM tf_argument WHERE source NOT IN (SELECT DISTINCT(source) FROM tf_argument WHERE source IN (SELECT id FROM source WHERE interface=? AND provider=?) AND parameter is null AND relative_source is not null) AND source IN (SELECT id FROM source WHERE interface=? AND provider=?)` + paramArgs := []interface{}{interfaceData.Id, providerData.Id, interfaceData.Id, providerData.Id} + var tmpTfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tmpTfArgumentList) + if err != nil { + err = fmt.Errorf("Get first batch source ids by interface:%s and provider:%s error:%s", interfaceData.Id, providerData.Id, err.Error()) + log.Logger.Error("Get first batch source ids by interface and provider error", log.String("interface", interfaceData.Id), log.String("provider", providerData.Id), log.Error(err)) + return + } + if len(tmpTfArgumentList) == 0 { + err = fmt.Errorf("First batch source_ids can not be found by interface:%s and provider:%s", interfaceData.Id, providerData.Id) + log.Logger.Warn("First batch source ids can not be found by interface and provider", log.String("interface", interfaceData.Id), log.String("provider", providerData.Id), log.Error(err)) + return + } + + initAllSourceListIdMap := make(map[string]*models.SourceTable) + for i := range sourceList { + initAllSourceListIdMap[sourceList[i].Id] = sourceList[i] + } + + // delete the first batch sources in initAllSourceListIdMap + for i := range tmpTfArgumentList { + if _, ok := initAllSourceListIdMap[tmpTfArgumentList[i].Source]; ok { + sortedSourceListIdMap[tmpTfArgumentList[i].Source] = true + sortedSourceList = append(sortedSourceList, initAllSourceListIdMap[tmpTfArgumentList[i].Source]) + delete(initAllSourceListIdMap, tmpTfArgumentList[i].Source) + } else { + err = fmt.Errorf("TfArgument config error: there are some first batch sourceIds not in allSourceList") + log.Logger.Warn("TfArgument config error: there are some first batch sourceIds not in allSourceList", log.Error(err)) + return + } + } + + // get the all tf_argument data of each remain source list + tfArgumentListSourceIdMap := make(map[string][]*models.TfArgumentTable) + for sourceId := range initAllSourceListIdMap { + sqlCmd = `SELECT * FROM tf_argument WHERE source=?` + paramArgs = []interface{}{sourceId} + var tmpTfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd, paramArgs...).Find(&tmpTfArgumentList) + if err != nil { + err = fmt.Errorf("Get tfArgument data by source:%s error:%s", sourceId, err.Error()) + log.Logger.Error("Get tfArgument data by source error", log.String("source", sourceId), log.Error(err)) + return + } + if len(tmpTfArgumentList) == 0 { + err = fmt.Errorf("TfArgument data can not be found by source:%s", sourceId) + log.Logger.Warn("TfArgument data can not be found by source", log.String("source", sourceId), log.Error(err)) + return + } + for i := range tmpTfArgumentList { + tfArgumentListSourceIdMap[sourceId] = append(tfArgumentListSourceIdMap[sourceId], tmpTfArgumentList[i]) + } + } + + // get the second, the third batch sources ... + remainCnt := len(initAllSourceListIdMap) + for remainCnt > 0 { + for sourceId := range initAllSourceListIdMap { + isValid := true + for _, tmpTfArgument := range tfArgumentListSourceIdMap[sourceId] { + if tmpTfArgument.Parameter != "" { + continue + } else { + if _, ok := sortedSourceListIdMap[tmpTfArgument.RelativeSource]; ok { + continue + } else { + isValid = false + break + } + } + } + if isValid == true { + sortedSourceListIdMap[sourceId] = true + sortedSourceList = append(sortedSourceList, initAllSourceListIdMap[sourceId]) + delete(initAllSourceListIdMap, sourceId) + } + } + if len(initAllSourceListIdMap) == remainCnt { + err = fmt.Errorf("TfArgument config error: there are some sourceIds can not be in sortedSourceList") + log.Logger.Warn("TfArgument config error: there are some sourceIds can not be in sortedSourceList", log.Error(err)) + return + } + remainCnt = len(initAllSourceListIdMap) + } + } + return +} + +func handleConStructObject(conStructObject *[]map[string]interface{}, inPutValSlice [][]interface{}, curObject map[string]interface{}, idx int) { + if idx == len(inPutValSlice) { + tmpObject := make(map[string]interface{}) + for k, v := range curObject { + tmpObject[k] = v + } + if len(tmpObject) > 0 { + *conStructObject = append(*conStructObject, tmpObject) + } + return + } + for i := 0; i < len(inPutValSlice[idx]); i++ { + tmpVal := inPutValSlice[idx][i].(map[string]interface{}) + for k, v := range tmpVal { + curObject[k] = v + } + handleConStructObject(conStructObject, inPutValSlice, curObject, idx+1) + for k, _ := range tmpVal { + delete(curObject, k) + } + } + return +} + +func handleConvertParams(action string, + sourceData *models.SourceTable, + tfArgumentList []*models.TfArgumentTable, + reqParam map[string]interface{}, + providerData *models.ProviderTable, + regionData *models.ResourceDataTable) (tfArguments map[string]interface{}, resourceAssetId interface{}, err error) { + + var errorTfArgument *models.TfArgumentTable + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("HandleConvertParams error, sourceName: %s, TfArgument:%s convert error:%v", sourceData.Name, errorTfArgument.Name, r) + } + }() + + // sort the tfArgumentList + tfArgumentIdMap := make(map[string]*models.TfArgumentTable) + orderTfArgumentList := []*models.TfArgumentTable{} + for i, v := range tfArgumentList { + if v.ObjectName == "" { + orderTfArgumentList = append(orderTfArgumentList, tfArgumentList[i]) + tfArgumentIdMap[v.Id] = tfArgumentList[i] + } + } + for i, v := range tfArgumentList { + if _, ok := tfArgumentIdMap[v.Id]; !ok { + orderTfArgumentList = append(orderTfArgumentList, tfArgumentList[i]) + tfArgumentIdMap[v.Id] = tfArgumentList[i] + } + } + + tfArgumentList = orderTfArgumentList + + tfArguments = make(map[string]interface{}) + // 循环处理每一个 tf_argument + for i := range tfArgumentList { + /* + if tfArgumentList[i].Parameter == "" { + tfArguments[tfArgumentList[i].Name] = tfArgumentList[i].DefaultValue + continue + } + // 查询 tfArgument 对应的 parameter + sqlCmd := `SELECT * FROM parameter WHERE id=?` + paramArgs := []interface{}{tfArgumentList[i].Parameter} + var parameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(¶meterList) + if err != nil { + err = fmt.Errorf("Get Parameter data by id:%s error:%s", tfArgumentList[i].Parameter, err.Error()) + log.Logger.Error("Get parameter data by id error", log.String("id", tfArgumentList[i].Parameter), log.Error(err)) + return + } + if len(parameterList) == 0 { + err = fmt.Errorf("Parameter data can not be found by id:%s", tfArgumentList[i].Parameter) + log.Logger.Warn("Parameter data can not be found by id", log.String("id", tfArgumentList[i].Parameter), log.Error(err)) + return + } + parameterData := parameterList[0] + + if _, ok := reqParam[parameterData.Name]; !ok { + continue + } + */ + errorTfArgument = tfArgumentList[i] + convertWay := tfArgumentList[i].ConvertWay + var arg interface{} + var isDiscard = false + switch convertWay { + case models.ConvertWay["Data"]: + // search resource_data table,get resource_asset_id by resource_id and resource(which is relative_source column in tf_argument table ) + arg, err = convertData(tfArgumentList[i].RelativeSource, reqParam, regionData, tfArgumentList[i], sourceData) + case models.ConvertWay["Template"]: + arg, err = convertTemplate(providerData, reqParam, tfArgumentList[i]) + case models.ConvertWay["ContextData"]: + arg, isDiscard, err = convertContextData(tfArgumentList[i], reqParam, regionData, tfArgumentList[i], sourceData) + case models.ConvertWay["Attr"]: + // search resouce_data table by relative_source and 输入的值, 获取 tfstat_file 字段内容,找到relative_tfstate_attribute id(search tfstate_attribute table) 对应的 name, 获取其在 tfstate_file 中的值 + arg, err = convertAttr(tfArgumentList[i], reqParam, regionData, tfArgumentList[i], sourceData) + case models.ConvertWay["Direct"]: + arg, err = convertDirect(tfArgumentList[i].DefaultValue, reqParam, tfArgumentList[i]) + case models.ConvertWay["Function"]: + arg, err = convertFunction(tfArgumentList[i], reqParam, tfArgumentList[i]) + case models.ConvertWay["ContextDirect"]: + arg, isDiscard, err = convertContextDirect(tfArgumentList[i], reqParam, regionData) + case models.ConvertWay["ContextAttr"]: + arg, isDiscard, err = convertContextAttr(tfArgumentList[i], reqParam, regionData, sourceData) + case models.ConvertWay["ContextTemplate"]: + arg, isDiscard, err = convertContextTemplate(tfArgumentList[i], reqParam, regionData, providerData) + default: + err = fmt.Errorf("The convertWay:%s of tfArgument:%s is invalid", convertWay, tfArgumentList[i].Name) + log.Logger.Error("The convertWay of tfArgument is invalid", log.String("convertWay", convertWay), log.String("tfArgument", tfArgumentList[i].Name), log.Error(err)) + return + } + + if isDiscard { + continue + } + + // handle tfArgument that is not in tf.json file + if action == "apply" { + if tfArgumentList[i].Name == sourceData.AssetIdAttribute { + if arg != nil && arg != "" { + if resourceAssetId == "" || resourceAssetId == nil { + resourceAssetId = arg + } else { + if resourceAssetId != arg { + err = fmt.Errorf("Source:%s, tfArgument:%s two asset_id result is different error", sourceData.Name, tfArgumentList[i].Name) + log.Logger.Error("Convert parameter error: two asset_id result is different", log.String("sourceName", sourceData.Name), log.String("tfArgumentName", tfArgumentList[i].Name), log.String("parameterId", tfArgumentList[i].Parameter), log.Error(err)) + return + } + } + } + /* + if parameterData.Name == "id" { + // if arg != nil { + // resourceId = arg.(string) + // } + } else if parameterData.Name == "asset_id" { + if arg != nil { + resourceAssetId = arg.(string) + } + } + */ + continue + } + + // merge the input tfArgument + if tfArgumentList[i].ObjectName != "" { + relativeTfArgumentData := tfArgumentIdMap[tfArgumentList[i].ObjectName] + if relativeTfArgumentData != nil && relativeTfArgumentData.Type == "object" && relativeTfArgumentData.Name == "tags" { + // tmpVal := tfArguments[relativeTfArgumentData.Name].(map[string]interface{}) + // tmpVal[tfArgumentList[i].Name] = arg + // tfArguments[relativeTfArgumentData.Name] = tmpVal + if tfArguments[relativeTfArgumentData.Name] != nil { + // fmt.Printf("%v, %v, %T ## ", tfArguments[relativeTfArgumentData.Name], tfArguments[relativeTfArgumentData.Name] == nil, tfArguments[relativeTfArgumentData.Name]) + tmpVal := tfArguments[relativeTfArgumentData.Name].(map[string]interface{}) + if len(tmpVal) == 0 { + tmpVal = make(map[string]interface{}) + } + if arg != nil { + tmpVal[tfArgumentList[i].Name] = arg + } + tfArguments[relativeTfArgumentData.Name] = tmpVal + } else { + if arg != nil { + tmpVal := make(map[string]interface{}) + tmpVal[tfArgumentList[i].Name] = arg + tfArguments[relativeTfArgumentData.Name] = tmpVal + } + } + continue + } + } + } + + if action == "query" { + if arg == nil || arg == "" { + continue + } + } + + if err != nil { + err = fmt.Errorf("Source:%s, tfArgument:%s, Convert parameter:%s error:%s", sourceData.Name, tfArgumentList[i].Name, tfArgumentList[i].Parameter, err.Error()) + log.Logger.Error("Convert parameter error", log.String("sourceName", sourceData.Name), log.String("tfArgumentName", tfArgumentList[i].Name), log.String("parameterId", tfArgumentList[i].Parameter), log.Error(err)) + return + } + + if arg == nil || arg == "" { + continue + } + + // check the type string, int + if tfArgumentList[i].Type == "int" { + // tmpVal, ok := arg.(int) + // tmpVal, _ := strconv.Atoi(arg.(string)) + // tmpVal := arg.(float64) + /* + tmpVal, _ := strconv.ParseFloat(fmt.Sprintf("%v", arg), 64) + arg = tmpVal + */ + if tfArgumentList[i].IsMulti == "Y" { + if tmpVal, ok := arg.([]string); ok { + tmpRes := []float64{} + for i := range tmpVal { + tmpRet, _ := strconv.ParseFloat(fmt.Sprintf("%v", tmpVal[i]), 64) + tmpRes = append(tmpRes, tmpRet) + } + arg = tmpRes + } + } else { + if tmpVal, ok := arg.(string); ok { + tmpRes, _ := strconv.ParseFloat(fmt.Sprintf("%v", tmpVal), 64) + arg = tmpRes + } + } + } else if tfArgumentList[i].Type == "string" { + if tfArgumentList[i].IsMulti == "Y" { + if tmpVal, ok := arg.([]float64); ok { + tmpRes := []string{} + for i := range tmpVal { + tmpRes = append(tmpRes, fmt.Sprintf("%.0f", tmpVal[i])) + } + } + } else { + if tmpVal, ok := arg.(float64); ok { + arg = fmt.Sprintf("%.0f", tmpVal) + } + } + } + + // merger the tfArgument if they have the same name && tfArgument.IsMulti == "Y" + if _, ok := tfArguments[tfArgumentList[i].Name]; ok { + if tfArgumentList[i].IsMulti == "Y" { + tmpData := []interface{}{} + if _, ok := tfArguments[tfArgumentList[i].Name].([]interface{}); ok { + p := reflect.ValueOf(tfArguments[tfArgumentList[i].Name]) + for idx := 0; idx < p.Len(); idx++ { + tmpData = append(tmpData, p.Index(idx).Interface()) + } + } else { + tmpData = append(tmpData, tfArguments[tfArgumentList[i].Name]) + } + + if _, ok := arg.([]interface{}); ok { + p := reflect.ValueOf(arg) + for idx := 0; idx < p.Len(); idx++ { + tmpData = append(tmpData, p.Index(idx).Interface()) + } + } else { + tmpData = append(tmpData, arg) + } + tfArguments[tfArgumentList[i].Name] = tmpData + } else { + tfArguments[tfArgumentList[i].Name] = arg + } + } else { + tfArguments[tfArgumentList[i].Name] = arg + } + + // handle convert the object type tfArgument + if tfArgumentList[i].Type == "object" && tfArgumentList[i].Name != "tags" { + memberArgumentList := []*models.TfArgumentTable{} + for j := range tfArgumentList { + if tfArgumentList[j].ObjectName == tfArgumentList[i].Id { + memberArgumentList = append(memberArgumentList, tfArgumentList[j]) + } + } + if len(memberArgumentList) > 0 { + inputArgs := []map[string]interface{}{} + if tfArgumentList[i].IsMulti == "Y" { + // inputArgs = append(inputArgs, arg.([]map[string]interface{})...) + inputArgs = arg.([]map[string]interface{}) + } else { + inputArgs = append(inputArgs, arg.(map[string]interface{})) + } + tmpConvertResult := []interface{}{} + for k := range inputArgs { + if _, ok := reqParam[models.ResourceDataDebug]; ok { + inputArgs[k][models.ResourceDataDebug] = reqParam[models.ResourceDataDebug] + } + tmpRes, _, _ := handleConvertParams(action, + sourceData, + memberArgumentList, + inputArgs[k], + providerData, + regionData) + tmpConvertResult = append(tmpConvertResult, tmpRes) + } + + if tfArgumentList[i].IsMulti == "Y" { + tfArguments[tfArgumentList[i].Name] = tmpConvertResult + } else { + tfArguments[tfArgumentList[i].Name] = tmpConvertResult[0] + } + } + } + + // if arg != nil && convertWay == "direct" && parameterData.DataType == "string" && arg.(string) == "null" { + // delete(tfArguments, tfArgumentList[i].Name) + // } + } + return +} + +func handleTfstateOutPut(sourceData *models.SourceTable, + interfaceData *models.InterfaceTable, + reqParam map[string]interface{}, + regionData *models.ResourceDataTable, + providerData *models.ProviderTable, + action string, + dirPath string, + tfFileContentStr string, + resourceId string, + retOutput map[string]interface{}, + curDebugFileContent map[string]interface{}, + isInternalAction bool) (err error) { + + defer func() { + if r := recover(); r != nil { + err = fmt.Errorf("HandleTfstateOutPut error:%v", r) + } + }() + + sourceIdStr := sourceData.Id + // Get tfstate_attribute by sourceId + sqlCmd := "SELECT * FROM tfstate_attribute WHERE source IN ('" + sourceIdStr + "')" + var tfstateAttributeList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd).Find(&tfstateAttributeList) + if err != nil { + err = fmt.Errorf("Get tfstate_attribute list error:%s", err.Error()) + log.Logger.Error("Get tfstate_attribute list error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + if len(tfstateAttributeList) == 0 { + err = fmt.Errorf("Tfstate_attribute list can not be found by source:%s", sourceIdStr) + log.Logger.Warn("Tfstate_attribute list can not be found by source", log.String("source", sourceIdStr), log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // Get parameter by interfaceId and type=out + sqlCmd = "SELECT * FROM parameter WHERE interface=? and type=?" + paramArgs := []interface{}{interfaceData.Id, "output"} + var outPutParameterList []*models.ParameterTable + err = x.SQL(sqlCmd, paramArgs...).Find(&outPutParameterList) + if err != nil { + err = fmt.Errorf("Get outPutParameter list error:%s", err.Error()) + log.Logger.Error("Get outPutParameter list error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + if len(outPutParameterList) == 0 { + err = fmt.Errorf("OutPutParameter can not be found by interface:%s and type=out", interfaceData.Id) + log.Logger.Warn("OutPutParameter can not be found by interface and type", log.String("interface", interfaceData.Id), log.String("type", "out"), log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + var tfstateObjectTypeAttribute *models.TfstateAttributeTable + tfstateAttrParamMap := make(map[string]*models.TfstateAttributeTable) + tfstateAttrNameMap := make(map[string]*models.TfstateAttributeTable) + tfstateAttrIdMap := make(map[string]*models.TfstateAttributeTable) + for _, v := range tfstateAttributeList { + if v.Parameter == "" && v.ObjectName == "" { + tfstateObjectTypeAttribute = v + } else { + tfstateAttrParamMap[v.Parameter] = v + } + tfstateAttrNameMap[v.Name] = v + tfstateAttrIdMap[v.Id] = v + } + + if action == "apply" { + tfstateObjectTypeAttribute = nil + } + + sortTfstateAttributesList := []*models.SortTfstateAttributes{} + sortTfstateAttrParamMap := make(map[string]*models.SortTfstateAttributes) + for i := range tfstateAttributeList { + curSortTfstateAttr := &models.SortTfstateAttributes{TfstateAttr: tfstateAttributeList[i], Point: 1000, IsExist: false} + sortTfstateAttributesList = append(sortTfstateAttributesList, curSortTfstateAttr) + sortTfstateAttrParamMap[tfstateAttributeList[i].Parameter] = curSortTfstateAttr + } + + for _, v := range sortTfstateAttributesList { + relativeParam := v.TfstateAttr.RelativeParameter + if relativeParam != "" { + maxPoint := sortTfstateAttrParamMap[relativeParam].Point + if v.Point > maxPoint { + maxPoint = v.Point + } + if sortTfstateAttrParamMap[relativeParam].IsExist { + sortTfstateAttrParamMap[relativeParam].Point = maxPoint + 1 + } else { + sortTfstateAttrParamMap[relativeParam].Point = maxPoint + 10 + } + sortTfstateAttrParamMap[relativeParam].IsExist = true + } + } + + // sort sortTfstateAttributesList + sort.Slice(sortTfstateAttributesList, func(i int, j int) bool { + return sortTfstateAttributesList[i].Point > sortTfstateAttributesList[j].Point + }) + + orderTfstateAttrList := []*models.TfstateAttributeTable{} + for _, v := range sortTfstateAttributesList { + orderTfstateAttrList = append(orderTfstateAttrList, v.TfstateAttr) + } + + // sort the tfstateAttribute by the objectName + tmptfstateIdMap := make(map[string]*models.TfstateAttributeTable) + tmpOrderTfstateAttrList := []*models.TfstateAttributeTable{} + for i, v := range orderTfstateAttrList { + if v.ObjectName == "" { + tmpOrderTfstateAttrList = append(tmpOrderTfstateAttrList, orderTfstateAttrList[i]) + tmptfstateIdMap[v.Id] = orderTfstateAttrList[i] + } + } + for i, v := range orderTfstateAttrList { + if _, ok := tmptfstateIdMap[v.Id]; !ok { + tmpOrderTfstateAttrList = append(tmpOrderTfstateAttrList, orderTfstateAttrList[i]) + tmptfstateIdMap[v.Id] = orderTfstateAttrList[i] + } + } + orderTfstateAttrList = tmpOrderTfstateAttrList + + outPutParameterNameMap := make(map[string]*models.ParameterTable) + outPutParameterIdMap := make(map[string]*models.ParameterTable) + for _, v := range outPutParameterList { + outPutParameterNameMap[v.Name] = v + outPutParameterIdMap[v.Id] = v + } + + // Read terraform.tfstate 文件 + var tfstateFilePath string + tfstateFilePath = dirPath + "/terraform.tfstate" + tfstateFileData, err := ReadFile(tfstateFilePath) + if err != nil { + err = fmt.Errorf("Read tfstate file error:%s", err.Error()) + log.Logger.Error("Read tfstate file error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + tfstateFileContentStr := string(tfstateFileData) + var unmarshalTfstateFileData models.TfstateFileData + err = json.Unmarshal(tfstateFileData, &unmarshalTfstateFileData) + if err != nil { + err = fmt.Errorf("Unmarshal tfstate file data error:%s", err.Error()) + log.Logger.Error("Unmarshal tfstate file data error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + var tfstateFileAttributes map[string]interface{} + tfstateFileAttributes = unmarshalTfstateFileData.Resources[0].Instances[0].Attributes + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + curDebugFileContent["tf_json_new"] = tfFileContentStr + curDebugFileContent["tf_state_new"] = tfstateFileContentStr + curDebugFileContent["source_name"] = sourceData.Name + } + + if action == "apply" { + // 记录到 resource_data table + resourceDataId := guid.CreateGuid() + resourceDataSourceId := sourceData.Id + resourceDataResourceId := resourceId + resourceDataResourceAssetId := tfstateFileAttributes[sourceData.AssetIdAttribute] + createTime := time.Now().Format(models.DateTimeFormat) + createUser := reqParam["operator_user"].(string) + + if _, ok := reqParam[models.ResourceDataDebug]; ok { + // get resource_data_debug table + sqlCmd = "SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + var oldResourceDataDebugList []*models.ResourceDataTable + paramArgs := []interface{}{resourceDataSourceId, resourceDataResourceId, regionData.RegionId, resourceDataResourceAssetId} + err = x.SQL(sqlCmd, paramArgs...).Find(&oldResourceDataDebugList) + if err != nil { + err = fmt.Errorf("Get old_resource data_debug by resource:%s and resource_id:%s error: %s", resourceDataSourceId, resourceDataResourceId, err.Error()) + log.Logger.Error("Get old_resource_data_debug by resource and resource_id error", log.String("resource", resourceDataSourceId), log.String("resource_id", resourceDataResourceId), log.Error(err)) + retOutput["errorMessage"] = err.Error() + } + if len(oldResourceDataDebugList) == 0 { + curDebugFileContent["tf_json_old"] = "" + curDebugFileContent["tf_state_old"] = "" + } else { + curDebugFileContent["tf_json_old"] = oldResourceDataDebugList[0].TfFile + curDebugFileContent["tf_state_old"] = oldResourceDataDebugList[0].TfStateFile + } + + if len(oldResourceDataDebugList) == 0 { + // insert into resource_data_debug + _, err = x.Exec("INSERT INTO resource_data_debug(id,resource,resource_id,resource_asset_id,tf_file,tf_state_file,region_id,create_time,create_user,update_time,update_user) VALUE (?,?,?,?,?,?,?,?,?,?,?)", + resourceDataId, resourceDataSourceId, resourceDataResourceId, resourceDataResourceAssetId, tfFileContentStr, tfstateFileContentStr, regionData.RegionId, createTime, createUser, createTime, createUser) + + if _, ok := reqParam[models.ImportResourceDataTableId]; ok { + delId := reqParam[models.ImportResourceDataTableId].(string) + _, err = x.Exec("DELETE FROM resource_data_debug WHERE id=?", delId) + + delete(reqParam, models.ImportResourceDataTableId) + } + } else { + // update the oldResourceDataDebug item + tmpId := oldResourceDataDebugList[0].Id + tmpTfFile := tfFileContentStr + tmpTfStateFile := tfstateFileContentStr + // _, err = x.Exec("UPDATE resource_data_debug SET tf_file=?,tf_state_file=?,update_time=?,update_user=? WHERE id=?", + // tmpTfFile, tmpTfStateFile, createTime, createUser, tmpId) + _, err = x.Exec("UPDATE resource_data_debug SET id=?,tf_file=?,tf_state_file=?,update_time=?,update_user=? WHERE id=?", + resourceDataId, tmpTfFile, tmpTfStateFile, createTime, createUser, tmpId) + } + } else { + // get resource_data table + sqlCmd = "SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + var oldResourceDataList []*models.ResourceDataTable + paramArgs := []interface{}{resourceDataSourceId, resourceDataResourceId, regionData.RegionId, resourceDataResourceAssetId} + err = x.SQL(sqlCmd, paramArgs...).Find(&oldResourceDataList) + if err != nil { + err = fmt.Errorf("Get old_resource data by resource:%s and resource_id:%s error: %s", resourceDataSourceId, resourceDataResourceId, err.Error()) + log.Logger.Error("Get old_resource_data by resource and resource_id error", log.String("resource", resourceDataSourceId), log.String("resource_id", resourceDataResourceId), log.Error(err)) + retOutput["errorMessage"] = err.Error() + } + if len(oldResourceDataList) == 0 { + _, err = x.Exec("INSERT INTO resource_data(id,resource,resource_id,resource_asset_id,tf_file,tf_state_file,region_id,create_time,create_user,update_time,update_user) VALUE (?,?,?,?,?,?,?,?,?,?,?)", + resourceDataId, resourceDataSourceId, resourceDataResourceId, resourceDataResourceAssetId, tfFileContentStr, tfstateFileContentStr, regionData.RegionId, createTime, createUser, createTime, createUser) + + if _, ok := reqParam[models.ImportResourceDataTableId]; ok { + delId := reqParam[models.ImportResourceDataTableId].(string) + _, err = x.Exec("DELETE FROM resource_data_debug WHERE id=?", delId) + + delete(reqParam, models.ImportResourceDataTableId) + } + } else { + // update the oldResourceDataDebug item + tmpId := oldResourceDataList[0].Id + tmpTfFile := tfFileContentStr + tmpTfStateFile := tfstateFileContentStr + // _, err = x.Exec("UPDATE resource_data SET tf_file=?,tf_state_file=?,update_time=?,update_user=? WHERE id=?", + // tmpTfFile, tmpTfStateFile, createTime, createUser, tmpId) + _, err = x.Exec("UPDATE resource_data SET id=?,tf_file=?,tf_state_file=?,update_time=?,update_user=? WHERE id=?", + resourceDataId, tmpTfFile, tmpTfStateFile, createTime, createUser, tmpId) + } + } + + if err != nil { + err = fmt.Errorf("Try to create resource_data fail,%s ", err.Error()) + log.Logger.Error("Try to create resource_data fail", log.Error(err)) + retOutput["errorMessage"] = err.Error() + } + } else if action == "query" { + /* + // record the resource_data into simulateResourceData struct + curSimulateResourceData := reqParam[models.SimulateResourceData].(map[string][]map[string]interface{}) + // curResourceDataSlice := curSimulateResourceData[sourceData.Id] + + curResourceData := make(map[string]interface{}) + curResourceData["resourceId"] = resourceId + curResourceData["resourceAssetId"] = tfstateFileAttributes[sourceData.AssetIdAttribute] + curResourceData["tfstateFile"] = tfstateFileContentStr + + // curResourceDataSlice = append(curResourceDataSlice, curResourceData) + curSimulateResourceData[sourceData.Id] = append(curSimulateResourceData[sourceData.Id], curResourceData) + */ + } + + if tfstateObjectTypeAttribute == nil { + var outPutArgs map[string]interface{} + parentObjectName := "" + paramCnt := 0 + outPutArgs, err = handleReverseConvert(outPutParameterNameMap, + outPutParameterIdMap, + tfstateAttrParamMap, + tfstateAttrNameMap, + tfstateAttrIdMap, + reqParam, + providerData, + tfstateFileAttributes, + action, + parentObjectName, + orderTfstateAttrList, + ¶mCnt, + regionData) + if err != nil { + err = fmt.Errorf("Handle reverse convert error:%s", err.Error()) + log.Logger.Error("Handle revese convert error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + if action == "query" { + // record the resource_data into simulateResourceData struct + curSimulateResourceData := reqParam[models.SimulateResourceData].(map[string][]map[string]interface{}) + + curResourceData := make(map[string]interface{}) + curResourceData["resourceId"] = resourceId + curResourceData["assetIdAttribute"] = sourceData.AssetIdAttribute + curResourceData["tfstateFile"] = tfstateFileAttributes + curSimulateResourceData[sourceData.Id] = append(curSimulateResourceData[sourceData.Id], curResourceData) + } + + // handle outPutArgs + outPutResultList, _ := handleOutPutArgs(outPutArgs, outPutParameterNameMap, tfstateAttrParamMap, reqParam, isInternalAction) + + if !isInternalAction { + retOutput[models.TerraformOutPutPrefix] = outPutResultList + } + + if action == "query" { + curRes := reqParam[models.SimulateResourceDataResult].(map[string][]map[string]interface{}) + curRes[sourceData.Id] = append(curRes[sourceData.Id], outPutResultList...) + reqParam[models.SimulateResourceDataResult] = curRes + } + } else { + // 处理结果字段为 object 的情况 + var tfstateResult []map[string]interface{} + if tfstateObjectTypeAttribute.IsMulti == "Y" { + var tmpData []map[string]interface{} + tmpMarshal, _ := json.Marshal(tfstateFileAttributes[tfstateObjectTypeAttribute.Name]) + json.Unmarshal(tmpMarshal, &tmpData) + for i := range tmpData { + tfstateResult = append(tfstateResult, tmpData[i]) + } + } else { + var tmpData map[string]interface{} + tmpMarshal, _ := json.Marshal(tfstateFileAttributes[tfstateObjectTypeAttribute.Name]) + json.Unmarshal(tmpMarshal, &tmpData) + tfstateResult = append(tfstateResult, tmpData) + } + + if action == "query" { + // record the resource_data into simulateResourceData struct + curSimulateResourceData := reqParam[models.SimulateResourceData].(map[string][]map[string]interface{}) + + for i := range tfstateResult { + curResourceData := make(map[string]interface{}) + curResourceData["resourceId"] = resourceId + curResourceData["assetIdAttribute"] = sourceData.AssetIdAttribute + curResourceData["tfstateFile"] = tfstateResult[i] + curSimulateResourceData[sourceData.Id] = append(curSimulateResourceData[sourceData.Id], curResourceData) + } + } + + querryFilteredResult := []map[string]interface{}{} + outPutResultList := []map[string]interface{}{} + for i := range tfstateResult { + var outPutArgs map[string]interface{} + parentObjectName := tfstateObjectTypeAttribute.Id + paramCnt := 0 + outPutArgs, err = handleReverseConvert(outPutParameterNameMap, + outPutParameterIdMap, + tfstateAttrParamMap, + tfstateAttrNameMap, + tfstateAttrIdMap, + reqParam, + providerData, + tfstateResult[i], + action, + parentObjectName, + orderTfstateAttrList, + ¶mCnt, + regionData) + + if err != nil { + err = fmt.Errorf("Handle reverse convert error:%s", err.Error()) + log.Logger.Error("Handle revese convert error", log.Error(err)) + retOutput["errorMessage"] = err.Error() + return + } + + // handle outPutArgs + tmpOutPutResult, _ := handleOutPutArgs(outPutArgs, outPutParameterNameMap, tfstateAttrParamMap, reqParam, isInternalAction) + outPutResultList = append(outPutResultList, tmpOutPutResult...) + //retOutput[models.TerraformOutPutPrefix] = outPutResultList + + if action == "query" { + if reqParam[models.SourceDataIdx] == 0 { + // action: query, check the result is valid + filterKeys := make(map[string]interface{}) + for k := range reqParam { + if _, ok := models.ExcludeFilterKeys[k]; !ok { + filterKeys[k] = reqParam[k] + } + } + for ix := range tmpOutPutResult { + isValid := true + for k, v := range filterKeys { + if tmpOutPutResult[ix][k] != v { + isValid = false + break + } + } + if isValid { + tmpFilterRes := make(map[string]interface{}) + tmpFilterRes[sourceData.AssetIdAttribute] = tfstateResult[i][sourceData.AssetIdAttribute] + tmpFilterRes["output"] = tmpOutPutResult[ix] + querryFilteredResult = append(querryFilteredResult, tmpFilterRes) + } + } + } + } + + } + if !isInternalAction { + retOutput[models.TerraformOutPutPrefix] = outPutResultList + } + if action == "query" { + curRes := reqParam[models.SimulateResourceDataResult].(map[string][]map[string]interface{}) + if reqParam[models.SourceDataIdx] == 0 { + curRes[sourceData.Id] = append(curRes[sourceData.Id], querryFilteredResult...) + reqParam[models.SimulateResourceDataResult] = curRes + } else { + curRes[sourceData.Id] = append(curRes[sourceData.Id], outPutResultList...) + reqParam[models.SimulateResourceDataResult] = curRes + } + } + } + return +} + +func getOldTfFile(curDebugFileContent map[string]interface{}, + regionData *models.ResourceDataTable, + sourceData *models.SourceTable, + resourceId string, + resourceAssetId string) (err error) { + + // get resource_data_debug table + sqlCmd := "SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + var oldResourceDataDebugList []*models.ResourceDataTable + paramArgs := []interface{}{sourceData.Id, resourceId, regionData.RegionId, resourceAssetId} + err = x.SQL(sqlCmd, paramArgs...).Find(&oldResourceDataDebugList) + if err != nil { + err = fmt.Errorf("Get old_resource data_debug by resource:%s and resource_id:%s error: %s", sourceData.Id, resourceId, err.Error()) + log.Logger.Error("Get old_resource_data_debug by resource and resource_id error", log.String("resource", sourceData.Id), log.String("resource_id", resourceId), log.Error(err)) + } + if len(oldResourceDataDebugList) == 0 { + curDebugFileContent["tf_json_old"] = "" + curDebugFileContent["tf_state_old"] = "" + } else { + curDebugFileContent["tf_json_old"] = oldResourceDataDebugList[0].TfFile + curDebugFileContent["tf_state_old"] = oldResourceDataDebugList[0].TfStateFile + } + return +} + +func deleteOldResourceData(sourceData *models.SourceTable, + regionData *models.ResourceDataTable, + resourceId string, + resourceAssetId string, + reqParam map[string]interface{}) (err error) { + + resourceDataSourceId := sourceData.Id + resourceDataResourceId := resourceId + resourceDataResourceAssetId := resourceAssetId + + sqlCmd := "SELECT * FROM resource_data WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + if _, ok := reqParam[models.ResourceDataDebug]; ok { + sqlCmd = "SELECT * FROM resource_data_debug WHERE resource=? AND resource_id=? AND region_id=? AND resource_asset_id=?" + } + var oldResourceDataList []*models.ResourceDataTable + paramArgs := []interface{}{resourceDataSourceId, resourceDataResourceId, regionData.RegionId, resourceDataResourceAssetId} + err = x.SQL(sqlCmd, paramArgs...).Find(&oldResourceDataList) + if err != nil { + err = fmt.Errorf("Get old_resource data_debug by resource:%s and resource_id:%s error: %s", resourceDataSourceId, resourceDataResourceId, err.Error()) + log.Logger.Error("Get old_resource_data_debug by resource and resource_id error", log.String("resource", resourceDataSourceId), log.String("resource_id", resourceDataResourceId), log.Error(err)) + } + if len(oldResourceDataList) > 0 { + resourceData := oldResourceDataList[0] + if _, ok := reqParam[models.ResourceDataDebug]; ok { + _, err = x.Exec("DELETE FROM resource_data_debug WHERE id=?", resourceData.Id) + } else { + _, err = x.Exec("DELETE FROM resource_data WHERE id=?", resourceData.Id) + } + } + return +} + +func compareObject(first, second map[string]interface{}) (result map[string]interface{}, diff int, message string) { + result = make(map[string]interface{}) + for k, v := range second { + result[k] = v + if v == nil { + if first[k] != nil { + // fmt.Printf("k: %s is nil,use first value:%v \n", k, first[k]) + result[k] = first[k] + } + continue + } + tmpFirV := fmt.Sprintf("%v", first[k]) + tmpSecV := fmt.Sprintf("%v", v) + if tmpSecV == "[]" || tmpSecV == "{}" || tmpSecV == "map[]" { + //fmt.Printf("k:%s fir:%s sec:%s \n",k, tmpFirV, tmpSecV) + if tmpFirV == "" || tmpFirV == "" { + continue + } + } + if tmpSecV != tmpFirV { + diff = 1 + message += fmt.Sprintf("Key:%s is diff with record:%s, real:%s \n", k, tmpFirV, tmpSecV) + } + } + message = strings.ReplaceAll(message, "", "null") + return +} + +func getFileAttrContent(filename string) models.TfFileAttrFetchResult { + result := models.TfFileAttrFetchResult{} + tfFileByte, _ := ioutil.ReadFile(filename) + result.FileContent = string(tfFileByte) + startFlag, startIndex, endIndex := 0, 0, 0 + for i := strings.Index(result.FileContent, "\"attributes\":"); i < len(result.FileContent); i++ { + if result.FileContent[i] == 123 { + startFlag += 1 + if startIndex == 0 { + startIndex = i + } + continue + } + if result.FileContent[i] == 125 { + startFlag = startFlag - 1 + if startFlag == 0 { + endIndex = i + break + } + } + } + endIndex += 1 + result.StartIndex = startIndex + result.EndIndex = endIndex + result.AttrBytes = []byte(result.FileContent[startIndex:endIndex]) + return result +} diff --git a/terraform-server/services/db/tf_argument.go b/terraform-server/services/db/tf_argument.go new file mode 100644 index 00000000..493ea540 --- /dev/null +++ b/terraform-server/services/db/tf_argument.go @@ -0,0 +1,210 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func TfArgumentList(paramsMap map[string]interface{}) (rowData []*models.TfArgumentQuery, err error) { + /* + sqlCmd := "SELECT * FROM tf_argument WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + */ + sqlCmd := "SELECT t1.*,t2.name AS object_name_title,t3.name AS source_title,t4.name AS parameter_title,t5.name AS " + + "relative_source_title,t6.name AS relative_tfstate_attribute_title,t7.name AS relative_parameter_title FROM tf_argument " + + "t1 LEFT JOIN tf_argument t2 ON t1.object_name=t2.id LEFT JOIN source t3 ON t3.id=t1.source LEFT JOIN parameter t4 " + + "ON t4.id=t1.parameter LEFT JOIN source t5 ON t5.id=t1.relative_source LEFT JOIN tfstate_attribute t6 ON t6.id=t1.relative_tfstate_attribute " + + "LEFT JOIN parameter t7 ON t7.id=t1.relative_parameter WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + "t1." + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY t1.id DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get tfArgument list error", log.Error(err)) + } + return +} + +func TfArgumentBatchCreate(user string, param []*models.TfArgumentTable) (rowData []*models.TfArgumentTable, err error) { + actions := []*execAction{} + tableName := "tf_argument" + createTime := time.Now().Format(models.DateTimeFormat) + + for i := range param { + id := guid.CreateGuid() + data := &models.TfArgumentTable{Id: id, Name: param[i].Name, Source: param[i].Source, Parameter: param[i].Parameter, DefaultValue: param[i].DefaultValue, + IsNull: param[i].IsNull, Type: param[i].Type, ObjectName: param[i].ObjectName, IsMulti: param[i].IsMulti, ConvertWay: param[i].ConvertWay, RelativeSource: param[i].RelativeSource, + RelativeTfstateAttribute: param[i].RelativeTfstateAttribute, RelativeParameter: param[i].RelativeParameter, RelativeParameterValue: param[i].RelativeParameterValue, + FunctionDefine: param[i].FunctionDefine, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime, KeyArgument: param[i].KeyArgument} + rowData = append(rowData, data) + } + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["default_value"] = "true" + transNullStr["object_name"] = "true" + transNullStr["relative_source"] = "true" + transNullStr["relative_tfstate_attribute"] = "true" + transNullStr["relative_parameter"] = "true" + transNullStr["relative_parameter_value"] = "true" + transNullStr["source"] = "true" + transNullStr["parameter"] = "true" + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to create tf_argument fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create tf_argument fail,%s ", err.Error()) + } + return +} + +func TfArgumentBatchDelete(ids []string) (err error) { + actions := []*execAction{} + + idsStr := strings.Join(ids, "','") + sqlCmd := "SELECT * FROM tf_argument WHERE id IN ('" + idsStr + "')" + "ORDER BY object_name DESC" + var tfArgumentList []*models.TfArgumentTable + err = x.SQL(sqlCmd).Find(&tfArgumentList) + if err != nil { + log.Logger.Error("Get tfArgument list error", log.Error(err)) + } + + tmpIds := []string{} + for i := range tfArgumentList { + tmpIds = append(tmpIds, tfArgumentList[i].Id) + } + ids = tmpIds + + tableName := "tf_argument" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete tf_argument fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete tf_argument fail,%s ", err.Error()) + } + return +} + +func TfArgumentBatchUpdate(user string, param []*models.TfArgumentTable) (err error) { + actions := []*execAction{} + tableName := "tf_argument" + updateTime := time.Now().Format(models.DateTimeFormat) + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["default_value"] = "true" + transNullStr["object_name"] = "true" + transNullStr["relative_source"] = "true" + transNullStr["relative_tfstate_attribute"] = "true" + transNullStr["relative_parameter"] = "true" + transNullStr["relative_parameter_value"] = "true" + transNullStr["source"] = "true" + transNullStr["parameter"] = "true" + + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to update tf_argument fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update tf_argument fail,%s ", err.Error()) + } + return +} + +func TfArgumentBatchCreateUpdate(user string, param []*models.TfArgumentTable) (rowData []*models.TfArgumentTable, err error) { + actions := []*execAction{} + tableName := "tf_argument" + createTime := time.Now().Format(models.DateTimeFormat) + updateDataIds := make(map[string]bool) + var parameterId string + + for i := range param { + var data *models.TfArgumentTable + if param[i].Id == "" { + parameterId = guid.CreateGuid() + data = &models.TfArgumentTable{Id: parameterId, Name: param[i].Name, Source: param[i].Source, Parameter: param[i].Parameter, DefaultValue: param[i].DefaultValue, + IsNull: param[i].IsNull, Type: param[i].Type, ObjectName: param[i].ObjectName, IsMulti: param[i].IsMulti, ConvertWay: param[i].ConvertWay, RelativeSource: param[i].RelativeSource, + RelativeTfstateAttribute: param[i].RelativeTfstateAttribute, RelativeParameter: param[i].RelativeParameter, RelativeParameterValue: param[i].RelativeParameterValue, + FunctionDefine: param[i].FunctionDefine, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime, KeyArgument: param[i].KeyArgument} + } else { + updateDataIds[param[i].Id] = true + parameterId = param[i].Id + data = &models.TfArgumentTable{Id: parameterId, Name: param[i].Name, Source: param[i].Source, Parameter: param[i].Parameter, DefaultValue: param[i].DefaultValue, + IsNull: param[i].IsNull, Type: param[i].Type, ObjectName: param[i].ObjectName, IsMulti: param[i].IsMulti, ConvertWay: param[i].ConvertWay, RelativeSource: param[i].RelativeSource, + RelativeTfstateAttribute: param[i].RelativeTfstateAttribute, RelativeParameter: param[i].RelativeParameter, RelativeParameterValue: param[i].RelativeParameterValue, + FunctionDefine: param[i].FunctionDefine, CreateUser: param[i].CreateUser, CreateTime: param[i].CreateTime, UpdateUser: user, UpdateTime: createTime, KeyArgument: param[i].KeyArgument} + } + rowData = append(rowData, data) + } + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["default_value"] = "true" + transNullStr["object_name"] = "true" + transNullStr["relative_source"] = "true" + transNullStr["relative_tfstate_attribute"] = "true" + transNullStr["relative_parameter"] = "true" + transNullStr["relative_parameter_value"] = "true" + transNullStr["source"] = "true" + transNullStr["parameter"] = "true" + + var tmpErr error + for i := range rowData { + var action *execAction + if _, ok := updateDataIds[rowData[i].Id]; ok { + action, tmpErr = GetUpdateTableExecAction(tableName, "id", rowData[i].Id, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to get update_tf_argument execAction fail,%s ", tmpErr.Error()) + return + } + } else { + action, tmpErr = GetInsertTableExecAction(tableName, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to create_tf_argument execAction fail,%s ", tmpErr.Error()) + return + } + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create or update tf_argument fail,%s ", err.Error()) + } + return +} \ No newline at end of file diff --git a/terraform-server/services/db/tfstate_attribute.go b/terraform-server/services/db/tfstate_attribute.go new file mode 100644 index 00000000..b4486689 --- /dev/null +++ b/terraform-server/services/db/tfstate_attribute.go @@ -0,0 +1,208 @@ +package db + +import ( + "fmt" + "strings" + "time" + + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common-lib/guid" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/common/log" + "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" +) + +func TfstateAttributeList(paramsMap map[string]interface{}) (rowData []*models.TfstateAttributeQuery, err error) { + /* + sqlCmd := "SELECT * FROM tfstate_attribute WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY create_time DESC" + */ + sqlCmd := "SELECT t1.*,t2.name AS object_name_title,t3.name AS source_title,t4.name AS parameter_title,t5.name AS " + + "relative_source_title,t6.name AS relative_tfstate_attribute_title,t7.name AS relative_parameter_title FROM tfstate_attribute " + + "t1 LEFT JOIN tfstate_attribute t2 ON t1.object_name=t2.id LEFT JOIN source t3 ON t3.id=t1.source LEFT JOIN parameter t4 " + + "ON t4.id=t1.parameter LEFT JOIN source t5 ON t5.id=t1.relative_source LEFT JOIN tfstate_attribute t6 ON t6.id=t1.relative_tfstate_attribute " + + "LEFT JOIN parameter t7 ON t7.id=t1.relative_parameter WHERE 1=1" + paramArgs := []interface{}{} + for k, v := range paramsMap { + sqlCmd += " AND " + "t1." + k + "=?" + paramArgs = append(paramArgs, v) + } + sqlCmd += " ORDER BY t1.id DESC" + err = x.SQL(sqlCmd, paramArgs...).Find(&rowData) + if err != nil { + log.Logger.Error("Get tfstateAttribute list error", log.Error(err)) + } + return +} + +func TfstateAttributeBatchCreate(user string, param []*models.TfstateAttributeTable) (rowData []*models.TfstateAttributeTable, err error) { + actions := []*execAction{} + tableName := "tfstate_attribute" + createTime := time.Now().Format(models.DateTimeFormat) + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["default_value"] = "true" + transNullStr["object_name"] = "true" + transNullStr["relative_source"] = "true" + transNullStr["relative_tfstate_attribute"] = "true" + transNullStr["relative_parameter"] = "true" + transNullStr["relative_parameter_value"] = "true" + transNullStr["source"] = "true" + transNullStr["parameter"] = "true" + + for i := range param { + id := guid.CreateGuid() + data := &models.TfstateAttributeTable{Id: id, Name: param[i].Name, Source: param[i].Source, Parameter: param[i].Parameter, DefaultValue: param[i].DefaultValue, IsNull: param[i].IsNull, Type: param[i].Type, ObjectName: param[i].ObjectName, + IsMulti: param[i].IsMulti, ConvertWay: param[i].ConvertWay, RelativeSource: param[i].RelativeSource, RelativeTfstateAttribute: param[i].RelativeTfstateAttribute, RelativeParameter: param[i].RelativeParameter, + RelativeParameterValue: param[i].RelativeParameterValue, FunctionDefine: param[i].FunctionDefine, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + rowData = append(rowData, data) + } + + for i := range rowData { + action, tmpErr := GetInsertTableExecAction(tableName, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to create tfstate_attribute fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create tfstate_attribute fail,%s ", err.Error()) + } + return +} + +func TfstateAttributeBatchDelete(ids []string) (err error) { + actions := []*execAction{} + + idsStr := strings.Join(ids, "','") + sqlCmd := "SELECT * FROM tfstate_attribute WHERE id IN ('" + idsStr + "')" + "ORDER BY object_name DESC" + var tfstateAttributeList []*models.TfstateAttributeTable + err = x.SQL(sqlCmd).Find(&tfstateAttributeList) + if err != nil { + log.Logger.Error("Get tfstateAttribute list error", log.Error(err)) + } + + tmpIds := []string{} + for i := range tfstateAttributeList { + tmpIds = append(tmpIds, tfstateAttributeList[i].Id) + } + ids = tmpIds + + tableName := "tfstate_attribute" + for i := range ids { + action, tmpErr := GetDeleteTableExecAction(tableName, "id", ids[i]) + if tmpErr != nil { + err = fmt.Errorf("Try to delete tfstate_attribute fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to delete tfstate_attribute fail,%s ", err.Error()) + } + return +} + +func TfstateAttributeBatchUpdate(user string, param []*models.TfstateAttributeTable) (err error) { + actions := []*execAction{} + tableName := "tfstate_attribute" + updateTime := time.Now().Format(models.DateTimeFormat) + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["default_value"] = "true" + transNullStr["object_name"] = "true" + transNullStr["relative_source"] = "true" + transNullStr["relative_tfstate_attribute"] = "true" + transNullStr["relative_parameter"] = "true" + transNullStr["relative_parameter_value"] = "true" + transNullStr["source"] = "true" + transNullStr["parameter"] = "true" + + for i := range param { + param[i].UpdateTime = updateTime + param[i].UpdateUser = user + action, tmpErr := GetUpdateTableExecAction(tableName, "id", param[i].Id, *param[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to update tfstate_attribute fail,%s ", tmpErr.Error()) + return + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to update tfstate_attribute fail,%s ", err.Error()) + } + return +} + +func TfstateAttributeBatchCreateUpdate(user string, param []*models.TfstateAttributeTable) (rowData []*models.TfstateAttributeTable, err error) { + actions := []*execAction{} + tableName := "tfstate_attribute" + createTime := time.Now().Format(models.DateTimeFormat) + updateDataIds := make(map[string]bool) + var parameterId string + + for i := range param { + var data *models.TfstateAttributeTable + if param[i].Id == "" { + parameterId = guid.CreateGuid() + data = &models.TfstateAttributeTable{Id: parameterId, Name: param[i].Name, Source: param[i].Source, Parameter: param[i].Parameter, DefaultValue: param[i].DefaultValue, + IsNull: param[i].IsNull, Type: param[i].Type, ObjectName: param[i].ObjectName, IsMulti: param[i].IsMulti, ConvertWay: param[i].ConvertWay, RelativeSource: param[i].RelativeSource, + RelativeTfstateAttribute: param[i].RelativeTfstateAttribute, RelativeParameter: param[i].RelativeParameter, RelativeParameterValue: param[i].RelativeParameterValue, + FunctionDefine: param[i].FunctionDefine, CreateUser: user, CreateTime: createTime, UpdateUser: user, UpdateTime: createTime} + } else { + updateDataIds[param[i].Id] = true + parameterId = param[i].Id + data = &models.TfstateAttributeTable{Id: parameterId, Name: param[i].Name, Source: param[i].Source, Parameter: param[i].Parameter, DefaultValue: param[i].DefaultValue, + IsNull: param[i].IsNull, Type: param[i].Type, ObjectName: param[i].ObjectName, IsMulti: param[i].IsMulti, ConvertWay: param[i].ConvertWay, RelativeSource: param[i].RelativeSource, + RelativeTfstateAttribute: param[i].RelativeTfstateAttribute, RelativeParameter: param[i].RelativeParameter, RelativeParameterValue: param[i].RelativeParameterValue, FunctionDefine: param[i].FunctionDefine, CreateUser: param[i].CreateUser, CreateTime: param[i].CreateTime, UpdateUser: user, UpdateTime: createTime} + } + rowData = append(rowData, data) + } + + // 当 transNullStr 的 key 表示的字段为空时,表示需要将其插入 null + transNullStr := make(map[string]string) + transNullStr["default_value"] = "true" + transNullStr["object_name"] = "true" + transNullStr["relative_source"] = "true" + transNullStr["relative_tfstate_attribute"] = "true" + transNullStr["relative_parameter"] = "true" + transNullStr["relative_parameter_value"] = "true" + transNullStr["source"] = "true" + transNullStr["parameter"] = "true" + + var tmpErr error + for i := range rowData { + var action *execAction + if _, ok := updateDataIds[rowData[i].Id]; ok { + action, tmpErr = GetUpdateTableExecAction(tableName, "id", rowData[i].Id, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to get update_tfstate_attribute execAction fail,%s ", tmpErr.Error()) + return + } + } else { + action, tmpErr = GetInsertTableExecAction(tableName, *rowData[i], transNullStr) + if tmpErr != nil { + err = fmt.Errorf("Try to create_tfstate_attribute execAction fail,%s ", tmpErr.Error()) + return + } + } + actions = append(actions, action) + } + + err = transaction(actions) + if err != nil { + err = fmt.Errorf("Try to create or update tfstate_attribute fail,%s ", err.Error()) + } + return +} diff --git a/terraform-server/services/db/wecube.go b/terraform-server/services/db/wecube.go new file mode 100644 index 00000000..b74fa009 --- /dev/null +++ b/terraform-server/services/db/wecube.go @@ -0,0 +1,62 @@ +package db + +import "github.com/WeBankPartners/wecube-plugins-terraform/terraform-server/models" + +func GetAllDataModel() (result models.SyncDataModelResponse, err error) { + result = models.SyncDataModelResponse{Status: "OK", Message: "success"} + var attrTable []*models.SyncDataModelCiAttr + err = x.SQL("select id,ci_type,name,display_name,description,input_type,ref_ci_type from sys_ci_type_attr where status='created' order by ci_type,ui_form_order").Find(&attrTable) + if err != nil { + return + } + var ciTable []*models.SyncDataModelCiType + err = x.SQL("select id,display_name,description from sys_ci_type where status='created'").Find(&ciTable) + if err != nil { + return + } + attrMap := make(map[string][]*models.SyncDataModelCiAttr) + for _, attr := range attrTable { + if attr.DataType == "ref" || attr.DataType == "multiRef" { + attr.DataType = "ref" + } else if attr.DataType == "int" { + attr.DataType = "int" + } else { + attr.DataType = "str" + } + if attr.RefEntityName != "" { + attr.RefAttributeName = "id" + attr.RefPackageName = "wecmdb" + } + if attr.Name == "guid" { + tmpAttr := &models.SyncDataModelCiAttr{Name: "id", EntityName: attr.EntityName, Description: attr.Description, DataType: attr.DataType, RefPackageName: attr.RefPackageName, RefAttributeName: attr.RefAttributeName, RefEntityName: attr.RefEntityName} + if _, b := attrMap[attr.EntityName]; b { + attrMap[attr.EntityName] = append(attrMap[attr.EntityName], attr, tmpAttr) + } else { + attrMap[attr.EntityName] = []*models.SyncDataModelCiAttr{attr, tmpAttr} + } + } else if attr.Name == "key_name" { + tmpAttr := &models.SyncDataModelCiAttr{Name: "displayName", EntityName: attr.EntityName, Description: attr.Description, DataType: attr.DataType, RefPackageName: attr.RefPackageName, RefAttributeName: attr.RefAttributeName, RefEntityName: attr.RefEntityName} + if _, b := attrMap[attr.EntityName]; b { + attrMap[attr.EntityName] = append(attrMap[attr.EntityName], attr, tmpAttr) + } else { + attrMap[attr.EntityName] = []*models.SyncDataModelCiAttr{attr, tmpAttr} + } + } else { + if _, b := attrMap[attr.EntityName]; b { + attrMap[attr.EntityName] = append(attrMap[attr.EntityName], attr) + } else { + attrMap[attr.EntityName] = []*models.SyncDataModelCiAttr{attr} + } + } + } + + for _, ci := range ciTable { + if _, b := attrMap[ci.Name]; b { + ci.Attributes = attrMap[ci.Name] + } else { + ci.Attributes = []*models.SyncDataModelCiAttr{} + } + result.Data = append(result.Data, ci) + } + return +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/.gitignore b/terraform-server/vendor/github.com/dgrijalva/jwt-go/.gitignore new file mode 100644 index 00000000..80bed650 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/.gitignore @@ -0,0 +1,4 @@ +.DS_Store +bin + + diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/.travis.yml b/terraform-server/vendor/github.com/dgrijalva/jwt-go/.travis.yml new file mode 100644 index 00000000..1027f56c --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/.travis.yml @@ -0,0 +1,13 @@ +language: go + +script: + - go vet ./... + - go test -v ./... + +go: + - 1.3 + - 1.4 + - 1.5 + - 1.6 + - 1.7 + - tip diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/LICENSE b/terraform-server/vendor/github.com/dgrijalva/jwt-go/LICENSE new file mode 100644 index 00000000..df83a9c2 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/LICENSE @@ -0,0 +1,8 @@ +Copyright (c) 2012 Dave Grijalva + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. + diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md b/terraform-server/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md new file mode 100644 index 00000000..7fc1f793 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/MIGRATION_GUIDE.md @@ -0,0 +1,97 @@ +## Migration Guide from v2 -> v3 + +Version 3 adds several new, frequently requested features. To do so, it introduces a few breaking changes. We've worked to keep these as minimal as possible. This guide explains the breaking changes and how you can quickly update your code. + +### `Token.Claims` is now an interface type + +The most requested feature from the 2.0 verison of this library was the ability to provide a custom type to the JSON parser for claims. This was implemented by introducing a new interface, `Claims`, to replace `map[string]interface{}`. We also included two concrete implementations of `Claims`: `MapClaims` and `StandardClaims`. + +`MapClaims` is an alias for `map[string]interface{}` with built in validation behavior. It is the default claims type when using `Parse`. The usage is unchanged except you must type cast the claims property. + +The old example for parsing a token looked like this.. + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is now directly mapped to... + +```go + if token, err := jwt.Parse(tokenString, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +`StandardClaims` is designed to be embedded in your custom type. You can supply a custom claims type with the new `ParseWithClaims` function. Here's an example of using a custom claims type. + +```go + type MyCustomClaims struct { + User string + *StandardClaims + } + + if token, err := jwt.ParseWithClaims(tokenString, &MyCustomClaims{}, keyLookupFunc); err == nil { + claims := token.Claims.(*MyCustomClaims) + fmt.Printf("Token for user %v expires %v", claims.User, claims.StandardClaims.ExpiresAt) + } +``` + +### `ParseFromRequest` has been moved + +To keep this library focused on the tokens without becoming overburdened with complex request processing logic, `ParseFromRequest` and its new companion `ParseFromRequestWithClaims` have been moved to a subpackage, `request`. The method signatues have also been augmented to receive a new argument: `Extractor`. + +`Extractors` do the work of picking the token string out of a request. The interface is simple and composable. + +This simple parsing example: + +```go + if token, err := jwt.ParseFromRequest(tokenString, req, keyLookupFunc); err == nil { + fmt.Printf("Token for user %v expires %v", token.Claims["user"], token.Claims["exp"]) + } +``` + +is directly mapped to: + +```go + if token, err := request.ParseFromRequest(req, request.OAuth2Extractor, keyLookupFunc); err == nil { + claims := token.Claims.(jwt.MapClaims) + fmt.Printf("Token for user %v expires %v", claims["user"], claims["exp"]) + } +``` + +There are several concrete `Extractor` types provided for your convenience: + +* `HeaderExtractor` will search a list of headers until one contains content. +* `ArgumentExtractor` will search a list of keys in request query and form arguments until one contains content. +* `MultiExtractor` will try a list of `Extractors` in order until one returns content. +* `AuthorizationHeaderExtractor` will look in the `Authorization` header for a `Bearer` token. +* `OAuth2Extractor` searches the places an OAuth2 token would be specified (per the spec): `Authorization` header and `access_token` argument +* `PostExtractionFilter` wraps an `Extractor`, allowing you to process the content before it's parsed. A simple example is stripping the `Bearer ` text from a header + + +### RSA signing methods no longer accept `[]byte` keys + +Due to a [critical vulnerability](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/), we've decided the convenience of accepting `[]byte` instead of `rsa.PublicKey` or `rsa.PrivateKey` isn't worth the risk of misuse. + +To replace this behavior, we've added two helper methods: `ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error)` and `ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error)`. These are just simple helpers for unpacking PEM encoded PKCS1 and PKCS8 keys. If your keys are encoded any other way, all you need to do is convert them to the `crypto/rsa` package's types. + +```go + func keyLookupFunc(*Token) (interface{}, error) { + // Don't forget to validate the alg is what you expect: + if _, ok := token.Method.(*jwt.SigningMethodRSA); !ok { + return nil, fmt.Errorf("Unexpected signing method: %v", token.Header["alg"]) + } + + // Look up key + key, err := lookupPublicKey(token.Header["kid"]) + if err != nil { + return nil, err + } + + // Unpack key from PEM encoded PKCS8 + return jwt.ParseRSAPublicKeyFromPEM(key) + } +``` diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/README.md b/terraform-server/vendor/github.com/dgrijalva/jwt-go/README.md new file mode 100644 index 00000000..d358d881 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/README.md @@ -0,0 +1,100 @@ +# jwt-go + +[![Build Status](https://travis-ci.org/dgrijalva/jwt-go.svg?branch=master)](https://travis-ci.org/dgrijalva/jwt-go) +[![GoDoc](https://godoc.org/github.com/dgrijalva/jwt-go?status.svg)](https://godoc.org/github.com/dgrijalva/jwt-go) + +A [go](http://www.golang.org) (or 'golang' for search engine friendliness) implementation of [JSON Web Tokens](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html) + +**NEW VERSION COMING:** There have been a lot of improvements suggested since the version 3.0.0 released in 2016. I'm working now on cutting two different releases: 3.2.0 will contain any non-breaking changes or enhancements. 4.0.0 will follow shortly which will include breaking changes. See the 4.0.0 milestone to get an idea of what's coming. If you have other ideas, or would like to participate in 4.0.0, now's the time. If you depend on this library and don't want to be interrupted, I recommend you use your dependency mangement tool to pin to version 3. + +**SECURITY NOTICE:** Some older versions of Go have a security issue in the cryotp/elliptic. Recommendation is to upgrade to at least 1.8.3. See issue #216 for more detail. + +**SECURITY NOTICE:** It's important that you [validate the `alg` presented is what you expect](https://auth0.com/blog/2015/03/31/critical-vulnerabilities-in-json-web-token-libraries/). This library attempts to make it easy to do the right thing by requiring key types match the expected alg, but you should take the extra step to verify it in your usage. See the examples provided. + +## What the heck is a JWT? + +JWT.io has [a great introduction](https://jwt.io/introduction) to JSON Web Tokens. + +In short, it's a signed JSON object that does something useful (for example, authentication). It's commonly used for `Bearer` tokens in Oauth 2. A token is made of three parts, separated by `.`'s. The first two parts are JSON objects, that have been [base64url](http://tools.ietf.org/html/rfc4648) encoded. The last part is the signature, encoded the same way. + +The first part is called the header. It contains the necessary information for verifying the last part, the signature. For example, which encryption method was used for signing and what key was used. + +The part in the middle is the interesting bit. It's called the Claims and contains the actual stuff you care about. Refer to [the RFC](http://self-issued.info/docs/draft-jones-json-web-token.html) for information about reserved keys and the proper way to add your own. + +## What's in the box? + +This library supports the parsing and verification as well as the generation and signing of JWTs. Current supported signing algorithms are HMAC SHA, RSA, RSA-PSS, and ECDSA, though hooks are present for adding your own. + +## Examples + +See [the project documentation](https://godoc.org/github.com/dgrijalva/jwt-go) for examples of usage: + +* [Simple example of parsing and validating a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-Parse--Hmac) +* [Simple example of building and signing a token](https://godoc.org/github.com/dgrijalva/jwt-go#example-New--Hmac) +* [Directory of Examples](https://godoc.org/github.com/dgrijalva/jwt-go#pkg-examples) + +## Extensions + +This library publishes all the necessary components for adding your own signing methods. Simply implement the `SigningMethod` interface and register a factory method using `RegisterSigningMethod`. + +Here's an example of an extension that integrates with the Google App Engine signing tools: https://github.com/someone1/gcp-jwt-go + +## Compliance + +This library was last reviewed to comply with [RTF 7519](http://www.rfc-editor.org/info/rfc7519) dated May 2015 with a few notable differences: + +* In order to protect against accidental use of [Unsecured JWTs](http://self-issued.info/docs/draft-ietf-oauth-json-web-token.html#UnsecuredJWT), tokens using `alg=none` will only be accepted if the constant `jwt.UnsafeAllowNoneSignatureType` is provided as the key. + +## Project Status & Versioning + +This library is considered production ready. Feedback and feature requests are appreciated. The API should be considered stable. There should be very few backwards-incompatible changes outside of major version updates (and only with good reason). + +This project uses [Semantic Versioning 2.0.0](http://semver.org). Accepted pull requests will land on `master`. Periodically, versions will be tagged from `master`. You can find all the releases on [the project releases page](https://github.com/dgrijalva/jwt-go/releases). + +While we try to make it obvious when we make breaking changes, there isn't a great mechanism for pushing announcements out to users. You may want to use this alternative package include: `gopkg.in/dgrijalva/jwt-go.v3`. It will do the right thing WRT semantic versioning. + +**BREAKING CHANGES:*** +* Version 3.0.0 includes _a lot_ of changes from the 2.x line, including a few that break the API. We've tried to break as few things as possible, so there should just be a few type signature changes. A full list of breaking changes is available in `VERSION_HISTORY.md`. See `MIGRATION_GUIDE.md` for more information on updating your code. + +## Usage Tips + +### Signing vs Encryption + +A token is simply a JSON object that is signed by its author. this tells you exactly two things about the data: + +* The author of the token was in the possession of the signing secret +* The data has not been modified since it was signed + +It's important to know that JWT does not provide encryption, which means anyone who has access to the token can read its contents. If you need to protect (encrypt) the data, there is a companion spec, `JWE`, that provides this functionality. JWE is currently outside the scope of this library. + +### Choosing a Signing Method + +There are several signing methods available, and you should probably take the time to learn about the various options before choosing one. The principal design decision is most likely going to be symmetric vs asymmetric. + +Symmetric signing methods, such as HSA, use only a single secret. This is probably the simplest signing method to use since any `[]byte` can be used as a valid secret. They are also slightly computationally faster to use, though this rarely is enough to matter. Symmetric signing methods work the best when both producers and consumers of tokens are trusted, or even the same system. Since the same secret is used to both sign and validate tokens, you can't easily distribute the key for validation. + +Asymmetric signing methods, such as RSA, use different keys for signing and verifying tokens. This makes it possible to produce tokens with a private key, and allow any consumer to access the public key for verification. + +### Signing Methods and Key Types + +Each signing method expects a different object type for its signing keys. See the package documentation for details. Here are the most common ones: + +* The [HMAC signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodHMAC) (`HS256`,`HS384`,`HS512`) expect `[]byte` values for signing and validation +* The [RSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodRSA) (`RS256`,`RS384`,`RS512`) expect `*rsa.PrivateKey` for signing and `*rsa.PublicKey` for validation +* The [ECDSA signing method](https://godoc.org/github.com/dgrijalva/jwt-go#SigningMethodECDSA) (`ES256`,`ES384`,`ES512`) expect `*ecdsa.PrivateKey` for signing and `*ecdsa.PublicKey` for validation + +### JWT and OAuth + +It's worth mentioning that OAuth and JWT are not the same thing. A JWT token is simply a signed JSON object. It can be used anywhere such a thing is useful. There is some confusion, though, as JWT is the most common type of bearer token used in OAuth2 authentication. + +Without going too far down the rabbit hole, here's a description of the interaction of these technologies: + +* OAuth is a protocol for allowing an identity provider to be separate from the service a user is logging in to. For example, whenever you use Facebook to log into a different service (Yelp, Spotify, etc), you are using OAuth. +* OAuth defines several options for passing around authentication data. One popular method is called a "bearer token". A bearer token is simply a string that _should_ only be held by an authenticated user. Thus, simply presenting this token proves your identity. You can probably derive from here why a JWT might make a good bearer token. +* Because bearer tokens are used for authentication, it's important they're kept secret. This is why transactions that use bearer tokens typically happen over SSL. + +## More + +Documentation can be found [on godoc.org](http://godoc.org/github.com/dgrijalva/jwt-go). + +The command line utility included in this project (cmd/jwt) provides a straightforward example of token creation and parsing as well as a useful tool for debugging your own integration. You'll also find several implementation examples in the documentation. diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md b/terraform-server/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md new file mode 100644 index 00000000..63702983 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/VERSION_HISTORY.md @@ -0,0 +1,118 @@ +## `jwt-go` Version History + +#### 3.2.0 + +* Added method `ParseUnverified` to allow users to split up the tasks of parsing and validation +* HMAC signing method returns `ErrInvalidKeyType` instead of `ErrInvalidKey` where appropriate +* Added options to `request.ParseFromRequest`, which allows for an arbitrary list of modifiers to parsing behavior. Initial set include `WithClaims` and `WithParser`. Existing usage of this function will continue to work as before. +* Deprecated `ParseFromRequestWithClaims` to simplify API in the future. + +#### 3.1.0 + +* Improvements to `jwt` command line tool +* Added `SkipClaimsValidation` option to `Parser` +* Documentation updates + +#### 3.0.0 + +* **Compatibility Breaking Changes**: See MIGRATION_GUIDE.md for tips on updating your code + * Dropped support for `[]byte` keys when using RSA signing methods. This convenience feature could contribute to security vulnerabilities involving mismatched key types with signing methods. + * `ParseFromRequest` has been moved to `request` subpackage and usage has changed + * The `Claims` property on `Token` is now type `Claims` instead of `map[string]interface{}`. The default value is type `MapClaims`, which is an alias to `map[string]interface{}`. This makes it possible to use a custom type when decoding claims. +* Other Additions and Changes + * Added `Claims` interface type to allow users to decode the claims into a custom type + * Added `ParseWithClaims`, which takes a third argument of type `Claims`. Use this function instead of `Parse` if you have a custom type you'd like to decode into. + * Dramatically improved the functionality and flexibility of `ParseFromRequest`, which is now in the `request` subpackage + * Added `ParseFromRequestWithClaims` which is the `FromRequest` equivalent of `ParseWithClaims` + * Added new interface type `Extractor`, which is used for extracting JWT strings from http requests. Used with `ParseFromRequest` and `ParseFromRequestWithClaims`. + * Added several new, more specific, validation errors to error type bitmask + * Moved examples from README to executable example files + * Signing method registry is now thread safe + * Added new property to `ValidationError`, which contains the raw error returned by calls made by parse/verify (such as those returned by keyfunc or json parser) + +#### 2.7.0 + +This will likely be the last backwards compatible release before 3.0.0, excluding essential bug fixes. + +* Added new option `-show` to the `jwt` command that will just output the decoded token without verifying +* Error text for expired tokens includes how long it's been expired +* Fixed incorrect error returned from `ParseRSAPublicKeyFromPEM` +* Documentation updates + +#### 2.6.0 + +* Exposed inner error within ValidationError +* Fixed validation errors when using UseJSONNumber flag +* Added several unit tests + +#### 2.5.0 + +* Added support for signing method none. You shouldn't use this. The API tries to make this clear. +* Updated/fixed some documentation +* Added more helpful error message when trying to parse tokens that begin with `BEARER ` + +#### 2.4.0 + +* Added new type, Parser, to allow for configuration of various parsing parameters + * You can now specify a list of valid signing methods. Anything outside this set will be rejected. + * You can now opt to use the `json.Number` type instead of `float64` when parsing token JSON +* Added support for [Travis CI](https://travis-ci.org/dgrijalva/jwt-go) +* Fixed some bugs with ECDSA parsing + +#### 2.3.0 + +* Added support for ECDSA signing methods +* Added support for RSA PSS signing methods (requires go v1.4) + +#### 2.2.0 + +* Gracefully handle a `nil` `Keyfunc` being passed to `Parse`. Result will now be the parsed token and an error, instead of a panic. + +#### 2.1.0 + +Backwards compatible API change that was missed in 2.0.0. + +* The `SignedString` method on `Token` now takes `interface{}` instead of `[]byte` + +#### 2.0.0 + +There were two major reasons for breaking backwards compatibility with this update. The first was a refactor required to expand the width of the RSA and HMAC-SHA signing implementations. There will likely be no required code changes to support this change. + +The second update, while unfortunately requiring a small change in integration, is required to open up this library to other signing methods. Not all keys used for all signing methods have a single standard on-disk representation. Requiring `[]byte` as the type for all keys proved too limiting. Additionally, this implementation allows for pre-parsed tokens to be reused, which might matter in an application that parses a high volume of tokens with a small set of keys. Backwards compatibilty has been maintained for passing `[]byte` to the RSA signing methods, but they will also accept `*rsa.PublicKey` and `*rsa.PrivateKey`. + +It is likely the only integration change required here will be to change `func(t *jwt.Token) ([]byte, error)` to `func(t *jwt.Token) (interface{}, error)` when calling `Parse`. + +* **Compatibility Breaking Changes** + * `SigningMethodHS256` is now `*SigningMethodHMAC` instead of `type struct` + * `SigningMethodRS256` is now `*SigningMethodRSA` instead of `type struct` + * `KeyFunc` now returns `interface{}` instead of `[]byte` + * `SigningMethod.Sign` now takes `interface{}` instead of `[]byte` for the key + * `SigningMethod.Verify` now takes `interface{}` instead of `[]byte` for the key +* Renamed type `SigningMethodHS256` to `SigningMethodHMAC`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodHS256` + * Added public package global `SigningMethodHS384` + * Added public package global `SigningMethodHS512` +* Renamed type `SigningMethodRS256` to `SigningMethodRSA`. Specific sizes are now just instances of this type. + * Added public package global `SigningMethodRS256` + * Added public package global `SigningMethodRS384` + * Added public package global `SigningMethodRS512` +* Moved sample private key for HMAC tests from an inline value to a file on disk. Value is unchanged. +* Refactored the RSA implementation to be easier to read +* Exposed helper methods `ParseRSAPrivateKeyFromPEM` and `ParseRSAPublicKeyFromPEM` + +#### 1.0.2 + +* Fixed bug in parsing public keys from certificates +* Added more tests around the parsing of keys for RS256 +* Code refactoring in RS256 implementation. No functional changes + +#### 1.0.1 + +* Fixed panic if RS256 signing method was passed an invalid key + +#### 1.0.0 + +* First versioned release +* API stabilized +* Supports creating, signing, parsing, and validating JWT tokens +* Supports RS256 and HS256 signing methods \ No newline at end of file diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/claims.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/claims.go new file mode 100644 index 00000000..f0228f02 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/claims.go @@ -0,0 +1,134 @@ +package jwt + +import ( + "crypto/subtle" + "fmt" + "time" +) + +// For a type to be a Claims object, it must just have a Valid method that determines +// if the token is invalid for any supported reason +type Claims interface { + Valid() error +} + +// Structured version of Claims Section, as referenced at +// https://tools.ietf.org/html/rfc7519#section-4.1 +// See examples for how to use this with your own claim types +type StandardClaims struct { + Audience string `json:"aud,omitempty"` + ExpiresAt int64 `json:"exp,omitempty"` + Id string `json:"jti,omitempty"` + IssuedAt int64 `json:"iat,omitempty"` + Issuer string `json:"iss,omitempty"` + NotBefore int64 `json:"nbf,omitempty"` + Subject string `json:"sub,omitempty"` +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (c StandardClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + // The claims below are optional, by default, so if they are set to the + // default value in Go, let's not fail the verification for them. + if c.VerifyExpiresAt(now, false) == false { + delta := time.Unix(now, 0).Sub(time.Unix(c.ExpiresAt, 0)) + vErr.Inner = fmt.Errorf("token is expired by %v", delta) + vErr.Errors |= ValidationErrorExpired + } + + if c.VerifyIssuedAt(now, false) == false { + vErr.Inner = fmt.Errorf("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if c.VerifyNotBefore(now, false) == false { + vErr.Inner = fmt.Errorf("token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyAudience(cmp string, req bool) bool { + return verifyAud(c.Audience, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyExpiresAt(cmp int64, req bool) bool { + return verifyExp(c.ExpiresAt, cmp, req) +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuedAt(cmp int64, req bool) bool { + return verifyIat(c.IssuedAt, cmp, req) +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyIssuer(cmp string, req bool) bool { + return verifyIss(c.Issuer, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (c *StandardClaims) VerifyNotBefore(cmp int64, req bool) bool { + return verifyNbf(c.NotBefore, cmp, req) +} + +// ----- helpers + +func verifyAud(aud string, cmp string, required bool) bool { + if aud == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(aud), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyExp(exp int64, now int64, required bool) bool { + if exp == 0 { + return !required + } + return now <= exp +} + +func verifyIat(iat int64, now int64, required bool) bool { + if iat == 0 { + return !required + } + return now >= iat +} + +func verifyIss(iss string, cmp string, required bool) bool { + if iss == "" { + return !required + } + if subtle.ConstantTimeCompare([]byte(iss), []byte(cmp)) != 0 { + return true + } else { + return false + } +} + +func verifyNbf(nbf int64, now int64, required bool) bool { + if nbf == 0 { + return !required + } + return now >= nbf +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/doc.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/doc.go new file mode 100644 index 00000000..a86dc1a3 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/doc.go @@ -0,0 +1,4 @@ +// Package jwt is a Go implementation of JSON Web Tokens: http://self-issued.info/docs/draft-jones-json-web-token.html +// +// See README.md for more info. +package jwt diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/ecdsa.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/ecdsa.go new file mode 100644 index 00000000..f9773812 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/ecdsa.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "crypto" + "crypto/ecdsa" + "crypto/rand" + "errors" + "math/big" +) + +var ( + // Sadly this is missing from crypto/ecdsa compared to crypto/rsa + ErrECDSAVerification = errors.New("crypto/ecdsa: verification error") +) + +// Implements the ECDSA family of signing methods signing methods +// Expects *ecdsa.PrivateKey for signing and *ecdsa.PublicKey for verification +type SigningMethodECDSA struct { + Name string + Hash crypto.Hash + KeySize int + CurveBits int +} + +// Specific instances for EC256 and company +var ( + SigningMethodES256 *SigningMethodECDSA + SigningMethodES384 *SigningMethodECDSA + SigningMethodES512 *SigningMethodECDSA +) + +func init() { + // ES256 + SigningMethodES256 = &SigningMethodECDSA{"ES256", crypto.SHA256, 32, 256} + RegisterSigningMethod(SigningMethodES256.Alg(), func() SigningMethod { + return SigningMethodES256 + }) + + // ES384 + SigningMethodES384 = &SigningMethodECDSA{"ES384", crypto.SHA384, 48, 384} + RegisterSigningMethod(SigningMethodES384.Alg(), func() SigningMethod { + return SigningMethodES384 + }) + + // ES512 + SigningMethodES512 = &SigningMethodECDSA{"ES512", crypto.SHA512, 66, 521} + RegisterSigningMethod(SigningMethodES512.Alg(), func() SigningMethod { + return SigningMethodES512 + }) +} + +func (m *SigningMethodECDSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an ecdsa.PublicKey struct +func (m *SigningMethodECDSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + // Get the key + var ecdsaKey *ecdsa.PublicKey + switch k := key.(type) { + case *ecdsa.PublicKey: + ecdsaKey = k + default: + return ErrInvalidKeyType + } + + if len(sig) != 2*m.KeySize { + return ErrECDSAVerification + } + + r := big.NewInt(0).SetBytes(sig[:m.KeySize]) + s := big.NewInt(0).SetBytes(sig[m.KeySize:]) + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + if verifystatus := ecdsa.Verify(ecdsaKey, hasher.Sum(nil), r, s); verifystatus == true { + return nil + } else { + return ErrECDSAVerification + } +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an ecdsa.PrivateKey struct +func (m *SigningMethodECDSA) Sign(signingString string, key interface{}) (string, error) { + // Get the key + var ecdsaKey *ecdsa.PrivateKey + switch k := key.(type) { + case *ecdsa.PrivateKey: + ecdsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return r, s + if r, s, err := ecdsa.Sign(rand.Reader, ecdsaKey, hasher.Sum(nil)); err == nil { + curveBits := ecdsaKey.Curve.Params().BitSize + + if m.CurveBits != curveBits { + return "", ErrInvalidKey + } + + keyBytes := curveBits / 8 + if curveBits%8 > 0 { + keyBytes += 1 + } + + // We serialize the outpus (r and s) into big-endian byte arrays and pad + // them with zeros on the left to make sure the sizes work out. Both arrays + // must be keyBytes long, and the output must be 2*keyBytes long. + rBytes := r.Bytes() + rBytesPadded := make([]byte, keyBytes) + copy(rBytesPadded[keyBytes-len(rBytes):], rBytes) + + sBytes := s.Bytes() + sBytesPadded := make([]byte, keyBytes) + copy(sBytesPadded[keyBytes-len(sBytes):], sBytes) + + out := append(rBytesPadded, sBytesPadded...) + + return EncodeSegment(out), nil + } else { + return "", err + } +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go new file mode 100644 index 00000000..d19624b7 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/ecdsa_utils.go @@ -0,0 +1,67 @@ +package jwt + +import ( + "crypto/ecdsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrNotECPublicKey = errors.New("Key is not a valid ECDSA public key") + ErrNotECPrivateKey = errors.New("Key is not a valid ECDSA private key") +) + +// Parse PEM encoded Elliptic Curve Private Key Structure +func ParseECPrivateKeyFromPEM(key []byte) (*ecdsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParseECPrivateKey(block.Bytes); err != nil { + return nil, err + } + + var pkey *ecdsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PrivateKey); !ok { + return nil, ErrNotECPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseECPublicKeyFromPEM(key []byte) (*ecdsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *ecdsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*ecdsa.PublicKey); !ok { + return nil, ErrNotECPublicKey + } + + return pkey, nil +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/errors.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/errors.go new file mode 100644 index 00000000..1c93024a --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/errors.go @@ -0,0 +1,59 @@ +package jwt + +import ( + "errors" +) + +// Error constants +var ( + ErrInvalidKey = errors.New("key is invalid") + ErrInvalidKeyType = errors.New("key is of invalid type") + ErrHashUnavailable = errors.New("the requested hash function is unavailable") +) + +// The errors that might occur when parsing and validating a token +const ( + ValidationErrorMalformed uint32 = 1 << iota // Token is malformed + ValidationErrorUnverifiable // Token could not be verified because of signing problems + ValidationErrorSignatureInvalid // Signature validation failed + + // Standard Claim validation errors + ValidationErrorAudience // AUD validation failed + ValidationErrorExpired // EXP validation failed + ValidationErrorIssuedAt // IAT validation failed + ValidationErrorIssuer // ISS validation failed + ValidationErrorNotValidYet // NBF validation failed + ValidationErrorId // JTI validation failed + ValidationErrorClaimsInvalid // Generic claims validation error +) + +// Helper for constructing a ValidationError with a string error message +func NewValidationError(errorText string, errorFlags uint32) *ValidationError { + return &ValidationError{ + text: errorText, + Errors: errorFlags, + } +} + +// The error from Parse if token is not valid +type ValidationError struct { + Inner error // stores the error returned by external dependencies, i.e.: KeyFunc + Errors uint32 // bitfield. see ValidationError... constants + text string // errors that do not have a valid error just have text +} + +// Validation error is an error type +func (e ValidationError) Error() string { + if e.Inner != nil { + return e.Inner.Error() + } else if e.text != "" { + return e.text + } else { + return "token is invalid" + } +} + +// No errors +func (e *ValidationError) valid() bool { + return e.Errors == 0 +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/hmac.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/hmac.go new file mode 100644 index 00000000..addbe5d4 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/hmac.go @@ -0,0 +1,95 @@ +package jwt + +import ( + "crypto" + "crypto/hmac" + "errors" +) + +// Implements the HMAC-SHA family of signing methods signing methods +// Expects key type of []byte for both signing and validation +type SigningMethodHMAC struct { + Name string + Hash crypto.Hash +} + +// Specific instances for HS256 and company +var ( + SigningMethodHS256 *SigningMethodHMAC + SigningMethodHS384 *SigningMethodHMAC + SigningMethodHS512 *SigningMethodHMAC + ErrSignatureInvalid = errors.New("signature is invalid") +) + +func init() { + // HS256 + SigningMethodHS256 = &SigningMethodHMAC{"HS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodHS256.Alg(), func() SigningMethod { + return SigningMethodHS256 + }) + + // HS384 + SigningMethodHS384 = &SigningMethodHMAC{"HS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodHS384.Alg(), func() SigningMethod { + return SigningMethodHS384 + }) + + // HS512 + SigningMethodHS512 = &SigningMethodHMAC{"HS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodHS512.Alg(), func() SigningMethod { + return SigningMethodHS512 + }) +} + +func (m *SigningMethodHMAC) Alg() string { + return m.Name +} + +// Verify the signature of HSXXX tokens. Returns nil if the signature is valid. +func (m *SigningMethodHMAC) Verify(signingString, signature string, key interface{}) error { + // Verify the key is the right type + keyBytes, ok := key.([]byte) + if !ok { + return ErrInvalidKeyType + } + + // Decode signature, for comparison + sig, err := DecodeSegment(signature) + if err != nil { + return err + } + + // Can we use the specified hashing method? + if !m.Hash.Available() { + return ErrHashUnavailable + } + + // This signing method is symmetric, so we validate the signature + // by reproducing the signature from the signing string and key, then + // comparing that against the provided signature. + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + if !hmac.Equal(sig, hasher.Sum(nil)) { + return ErrSignatureInvalid + } + + // No validation errors. Signature is good. + return nil +} + +// Implements the Sign method from SigningMethod for this signing method. +// Key must be []byte +func (m *SigningMethodHMAC) Sign(signingString string, key interface{}) (string, error) { + if keyBytes, ok := key.([]byte); ok { + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := hmac.New(m.Hash.New, keyBytes) + hasher.Write([]byte(signingString)) + + return EncodeSegment(hasher.Sum(nil)), nil + } + + return "", ErrInvalidKeyType +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/map_claims.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/map_claims.go new file mode 100644 index 00000000..291213c4 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/map_claims.go @@ -0,0 +1,94 @@ +package jwt + +import ( + "encoding/json" + "errors" + // "fmt" +) + +// Claims type that uses the map[string]interface{} for JSON decoding +// This is the default claims type if you don't supply one +type MapClaims map[string]interface{} + +// Compares the aud claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyAudience(cmp string, req bool) bool { + aud, _ := m["aud"].(string) + return verifyAud(aud, cmp, req) +} + +// Compares the exp claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyExpiresAt(cmp int64, req bool) bool { + switch exp := m["exp"].(type) { + case float64: + return verifyExp(int64(exp), cmp, req) + case json.Number: + v, _ := exp.Int64() + return verifyExp(v, cmp, req) + } + return req == false +} + +// Compares the iat claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuedAt(cmp int64, req bool) bool { + switch iat := m["iat"].(type) { + case float64: + return verifyIat(int64(iat), cmp, req) + case json.Number: + v, _ := iat.Int64() + return verifyIat(v, cmp, req) + } + return req == false +} + +// Compares the iss claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyIssuer(cmp string, req bool) bool { + iss, _ := m["iss"].(string) + return verifyIss(iss, cmp, req) +} + +// Compares the nbf claim against cmp. +// If required is false, this method will return true if the value matches or is unset +func (m MapClaims) VerifyNotBefore(cmp int64, req bool) bool { + switch nbf := m["nbf"].(type) { + case float64: + return verifyNbf(int64(nbf), cmp, req) + case json.Number: + v, _ := nbf.Int64() + return verifyNbf(v, cmp, req) + } + return req == false +} + +// Validates time based claims "exp, iat, nbf". +// There is no accounting for clock skew. +// As well, if any of the above claims are not in the token, it will still +// be considered a valid claim. +func (m MapClaims) Valid() error { + vErr := new(ValidationError) + now := TimeFunc().Unix() + + if m.VerifyExpiresAt(now, false) == false { + vErr.Inner = errors.New("Token is expired") + vErr.Errors |= ValidationErrorExpired + } + + if m.VerifyIssuedAt(now, false) == false { + vErr.Inner = errors.New("Token used before issued") + vErr.Errors |= ValidationErrorIssuedAt + } + + if m.VerifyNotBefore(now, false) == false { + vErr.Inner = errors.New("Token is not valid yet") + vErr.Errors |= ValidationErrorNotValidYet + } + + if vErr.valid() { + return nil + } + + return vErr +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/none.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/none.go new file mode 100644 index 00000000..f04d189d --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/none.go @@ -0,0 +1,52 @@ +package jwt + +// Implements the none signing method. This is required by the spec +// but you probably should never use it. +var SigningMethodNone *signingMethodNone + +const UnsafeAllowNoneSignatureType unsafeNoneMagicConstant = "none signing method allowed" + +var NoneSignatureTypeDisallowedError error + +type signingMethodNone struct{} +type unsafeNoneMagicConstant string + +func init() { + SigningMethodNone = &signingMethodNone{} + NoneSignatureTypeDisallowedError = NewValidationError("'none' signature type is not allowed", ValidationErrorSignatureInvalid) + + RegisterSigningMethod(SigningMethodNone.Alg(), func() SigningMethod { + return SigningMethodNone + }) +} + +func (m *signingMethodNone) Alg() string { + return "none" +} + +// Only allow 'none' alg type if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Verify(signingString, signature string, key interface{}) (err error) { + // Key must be UnsafeAllowNoneSignatureType to prevent accidentally + // accepting 'none' signing method + if _, ok := key.(unsafeNoneMagicConstant); !ok { + return NoneSignatureTypeDisallowedError + } + // If signing method is none, signature must be an empty string + if signature != "" { + return NewValidationError( + "'none' signing method with non-empty signature", + ValidationErrorSignatureInvalid, + ) + } + + // Accept 'none' signing method. + return nil +} + +// Only allow 'none' signing if UnsafeAllowNoneSignatureType is specified as the key +func (m *signingMethodNone) Sign(signingString string, key interface{}) (string, error) { + if _, ok := key.(unsafeNoneMagicConstant); ok { + return "", nil + } + return "", NoneSignatureTypeDisallowedError +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/parser.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/parser.go new file mode 100644 index 00000000..d6901d9a --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/parser.go @@ -0,0 +1,148 @@ +package jwt + +import ( + "bytes" + "encoding/json" + "fmt" + "strings" +) + +type Parser struct { + ValidMethods []string // If populated, only these methods will be considered valid + UseJSONNumber bool // Use JSON Number format in JSON decoder + SkipClaimsValidation bool // Skip claims validation during token parsing +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func (p *Parser) Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return p.ParseWithClaims(tokenString, MapClaims{}, keyFunc) +} + +func (p *Parser) ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + token, parts, err := p.ParseUnverified(tokenString, claims) + if err != nil { + return token, err + } + + // Verify signing method is in the required set + if p.ValidMethods != nil { + var signingMethodValid = false + var alg = token.Method.Alg() + for _, m := range p.ValidMethods { + if m == alg { + signingMethodValid = true + break + } + } + if !signingMethodValid { + // signing method is not in the listed set + return token, NewValidationError(fmt.Sprintf("signing method %v is invalid", alg), ValidationErrorSignatureInvalid) + } + } + + // Lookup key + var key interface{} + if keyFunc == nil { + // keyFunc was not provided. short circuiting validation + return token, NewValidationError("no Keyfunc was provided.", ValidationErrorUnverifiable) + } + if key, err = keyFunc(token); err != nil { + // keyFunc returned an error + if ve, ok := err.(*ValidationError); ok { + return token, ve + } + return token, &ValidationError{Inner: err, Errors: ValidationErrorUnverifiable} + } + + vErr := &ValidationError{} + + // Validate Claims + if !p.SkipClaimsValidation { + if err := token.Claims.Valid(); err != nil { + + // If the Claims Valid returned an error, check if it is a validation error, + // If it was another error type, create a ValidationError with a generic ClaimsInvalid flag set + if e, ok := err.(*ValidationError); !ok { + vErr = &ValidationError{Inner: err, Errors: ValidationErrorClaimsInvalid} + } else { + vErr = e + } + } + } + + // Perform validation + token.Signature = parts[2] + if err = token.Method.Verify(strings.Join(parts[0:2], "."), token.Signature, key); err != nil { + vErr.Inner = err + vErr.Errors |= ValidationErrorSignatureInvalid + } + + if vErr.valid() { + token.Valid = true + return token, nil + } + + return token, vErr +} + +// WARNING: Don't use this method unless you know what you're doing +// +// This method parses the token but doesn't validate the signature. It's only +// ever useful in cases where you know the signature is valid (because it has +// been checked previously in the stack) and you want to extract values from +// it. +func (p *Parser) ParseUnverified(tokenString string, claims Claims) (token *Token, parts []string, err error) { + parts = strings.Split(tokenString, ".") + if len(parts) != 3 { + return nil, parts, NewValidationError("token contains an invalid number of segments", ValidationErrorMalformed) + } + + token = &Token{Raw: tokenString} + + // parse Header + var headerBytes []byte + if headerBytes, err = DecodeSegment(parts[0]); err != nil { + if strings.HasPrefix(strings.ToLower(tokenString), "bearer ") { + return token, parts, NewValidationError("tokenstring should not contain 'bearer '", ValidationErrorMalformed) + } + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + if err = json.Unmarshal(headerBytes, &token.Header); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // parse Claims + var claimBytes []byte + token.Claims = claims + + if claimBytes, err = DecodeSegment(parts[1]); err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + dec := json.NewDecoder(bytes.NewBuffer(claimBytes)) + if p.UseJSONNumber { + dec.UseNumber() + } + // JSON Decode. Special case for map type to avoid weird pointer behavior + if c, ok := token.Claims.(MapClaims); ok { + err = dec.Decode(&c) + } else { + err = dec.Decode(&claims) + } + // Handle decode error + if err != nil { + return token, parts, &ValidationError{Inner: err, Errors: ValidationErrorMalformed} + } + + // Lookup signature method + if method, ok := token.Header["alg"].(string); ok { + if token.Method = GetSigningMethod(method); token.Method == nil { + return token, parts, NewValidationError("signing method (alg) is unavailable.", ValidationErrorUnverifiable) + } + } else { + return token, parts, NewValidationError("signing method (alg) is unspecified.", ValidationErrorUnverifiable) + } + + return token, parts, nil +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa.go new file mode 100644 index 00000000..e4caf1ca --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSA family of signing methods signing methods +// Expects *rsa.PrivateKey for signing and *rsa.PublicKey for validation +type SigningMethodRSA struct { + Name string + Hash crypto.Hash +} + +// Specific instances for RS256 and company +var ( + SigningMethodRS256 *SigningMethodRSA + SigningMethodRS384 *SigningMethodRSA + SigningMethodRS512 *SigningMethodRSA +) + +func init() { + // RS256 + SigningMethodRS256 = &SigningMethodRSA{"RS256", crypto.SHA256} + RegisterSigningMethod(SigningMethodRS256.Alg(), func() SigningMethod { + return SigningMethodRS256 + }) + + // RS384 + SigningMethodRS384 = &SigningMethodRSA{"RS384", crypto.SHA384} + RegisterSigningMethod(SigningMethodRS384.Alg(), func() SigningMethod { + return SigningMethodRS384 + }) + + // RS512 + SigningMethodRS512 = &SigningMethodRSA{"RS512", crypto.SHA512} + RegisterSigningMethod(SigningMethodRS512.Alg(), func() SigningMethod { + return SigningMethodRS512 + }) +} + +func (m *SigningMethodRSA) Alg() string { + return m.Name +} + +// Implements the Verify method from SigningMethod +// For this signing method, must be an *rsa.PublicKey structure. +func (m *SigningMethodRSA) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + var ok bool + + if rsaKey, ok = key.(*rsa.PublicKey); !ok { + return ErrInvalidKeyType + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Verify the signature + return rsa.VerifyPKCS1v15(rsaKey, m.Hash, hasher.Sum(nil), sig) +} + +// Implements the Sign method from SigningMethod +// For this signing method, must be an *rsa.PrivateKey structure. +func (m *SigningMethodRSA) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + var ok bool + + // Validate type of key + if rsaKey, ok = key.(*rsa.PrivateKey); !ok { + return "", ErrInvalidKey + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPKCS1v15(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil)); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go new file mode 100644 index 00000000..10ee9db8 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa_pss.go @@ -0,0 +1,126 @@ +// +build go1.4 + +package jwt + +import ( + "crypto" + "crypto/rand" + "crypto/rsa" +) + +// Implements the RSAPSS family of signing methods signing methods +type SigningMethodRSAPSS struct { + *SigningMethodRSA + Options *rsa.PSSOptions +} + +// Specific instances for RS/PS and company +var ( + SigningMethodPS256 *SigningMethodRSAPSS + SigningMethodPS384 *SigningMethodRSAPSS + SigningMethodPS512 *SigningMethodRSAPSS +) + +func init() { + // PS256 + SigningMethodPS256 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS256", + Hash: crypto.SHA256, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA256, + }, + } + RegisterSigningMethod(SigningMethodPS256.Alg(), func() SigningMethod { + return SigningMethodPS256 + }) + + // PS384 + SigningMethodPS384 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS384", + Hash: crypto.SHA384, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA384, + }, + } + RegisterSigningMethod(SigningMethodPS384.Alg(), func() SigningMethod { + return SigningMethodPS384 + }) + + // PS512 + SigningMethodPS512 = &SigningMethodRSAPSS{ + &SigningMethodRSA{ + Name: "PS512", + Hash: crypto.SHA512, + }, + &rsa.PSSOptions{ + SaltLength: rsa.PSSSaltLengthAuto, + Hash: crypto.SHA512, + }, + } + RegisterSigningMethod(SigningMethodPS512.Alg(), func() SigningMethod { + return SigningMethodPS512 + }) +} + +// Implements the Verify method from SigningMethod +// For this verify method, key must be an rsa.PublicKey struct +func (m *SigningMethodRSAPSS) Verify(signingString, signature string, key interface{}) error { + var err error + + // Decode the signature + var sig []byte + if sig, err = DecodeSegment(signature); err != nil { + return err + } + + var rsaKey *rsa.PublicKey + switch k := key.(type) { + case *rsa.PublicKey: + rsaKey = k + default: + return ErrInvalidKey + } + + // Create hasher + if !m.Hash.Available() { + return ErrHashUnavailable + } + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + return rsa.VerifyPSS(rsaKey, m.Hash, hasher.Sum(nil), sig, m.Options) +} + +// Implements the Sign method from SigningMethod +// For this signing method, key must be an rsa.PrivateKey struct +func (m *SigningMethodRSAPSS) Sign(signingString string, key interface{}) (string, error) { + var rsaKey *rsa.PrivateKey + + switch k := key.(type) { + case *rsa.PrivateKey: + rsaKey = k + default: + return "", ErrInvalidKeyType + } + + // Create the hasher + if !m.Hash.Available() { + return "", ErrHashUnavailable + } + + hasher := m.Hash.New() + hasher.Write([]byte(signingString)) + + // Sign the string and return the encoded bytes + if sigBytes, err := rsa.SignPSS(rand.Reader, rsaKey, m.Hash, hasher.Sum(nil), m.Options); err == nil { + return EncodeSegment(sigBytes), nil + } else { + return "", err + } +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go new file mode 100644 index 00000000..a5ababf9 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/rsa_utils.go @@ -0,0 +1,101 @@ +package jwt + +import ( + "crypto/rsa" + "crypto/x509" + "encoding/pem" + "errors" +) + +var ( + ErrKeyMustBePEMEncoded = errors.New("Invalid Key: Key must be PEM encoded PKCS1 or PKCS8 private key") + ErrNotRSAPrivateKey = errors.New("Key is not a valid RSA private key") + ErrNotRSAPublicKey = errors.New("Key is not a valid RSA public key") +) + +// Parse PEM encoded PKCS1 or PKCS8 private key +func ParseRSAPrivateKeyFromPEM(key []byte) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + if parsedKey, err = x509.ParsePKCS1PrivateKey(block.Bytes); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(block.Bytes); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 private key protected with password +func ParseRSAPrivateKeyFromPEMWithPassword(key []byte, password string) (*rsa.PrivateKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + var parsedKey interface{} + + var blockDecrypted []byte + if blockDecrypted, err = x509.DecryptPEMBlock(block, []byte(password)); err != nil { + return nil, err + } + + if parsedKey, err = x509.ParsePKCS1PrivateKey(blockDecrypted); err != nil { + if parsedKey, err = x509.ParsePKCS8PrivateKey(blockDecrypted); err != nil { + return nil, err + } + } + + var pkey *rsa.PrivateKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PrivateKey); !ok { + return nil, ErrNotRSAPrivateKey + } + + return pkey, nil +} + +// Parse PEM encoded PKCS1 or PKCS8 public key +func ParseRSAPublicKeyFromPEM(key []byte) (*rsa.PublicKey, error) { + var err error + + // Parse PEM block + var block *pem.Block + if block, _ = pem.Decode(key); block == nil { + return nil, ErrKeyMustBePEMEncoded + } + + // Parse the key + var parsedKey interface{} + if parsedKey, err = x509.ParsePKIXPublicKey(block.Bytes); err != nil { + if cert, err := x509.ParseCertificate(block.Bytes); err == nil { + parsedKey = cert.PublicKey + } else { + return nil, err + } + } + + var pkey *rsa.PublicKey + var ok bool + if pkey, ok = parsedKey.(*rsa.PublicKey); !ok { + return nil, ErrNotRSAPublicKey + } + + return pkey, nil +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/signing_method.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/signing_method.go new file mode 100644 index 00000000..ed1f212b --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/signing_method.go @@ -0,0 +1,35 @@ +package jwt + +import ( + "sync" +) + +var signingMethods = map[string]func() SigningMethod{} +var signingMethodLock = new(sync.RWMutex) + +// Implement SigningMethod to add new methods for signing or verifying tokens. +type SigningMethod interface { + Verify(signingString, signature string, key interface{}) error // Returns nil if signature is valid + Sign(signingString string, key interface{}) (string, error) // Returns encoded signature or error + Alg() string // returns the alg identifier for this method (example: 'HS256') +} + +// Register the "alg" name and a factory function for signing method. +// This is typically done during init() in the method's implementation +func RegisterSigningMethod(alg string, f func() SigningMethod) { + signingMethodLock.Lock() + defer signingMethodLock.Unlock() + + signingMethods[alg] = f +} + +// Get a signing method from an "alg" string +func GetSigningMethod(alg string) (method SigningMethod) { + signingMethodLock.RLock() + defer signingMethodLock.RUnlock() + + if methodF, ok := signingMethods[alg]; ok { + method = methodF() + } + return +} diff --git a/terraform-server/vendor/github.com/dgrijalva/jwt-go/token.go b/terraform-server/vendor/github.com/dgrijalva/jwt-go/token.go new file mode 100644 index 00000000..d637e086 --- /dev/null +++ b/terraform-server/vendor/github.com/dgrijalva/jwt-go/token.go @@ -0,0 +1,108 @@ +package jwt + +import ( + "encoding/base64" + "encoding/json" + "strings" + "time" +) + +// TimeFunc provides the current time when parsing token to validate "exp" claim (expiration time). +// You can override it to use another time value. This is useful for testing or if your +// server uses a different time zone than your tokens. +var TimeFunc = time.Now + +// Parse methods use this callback function to supply +// the key for verification. The function receives the parsed, +// but unverified Token. This allows you to use properties in the +// Header of the token (such as `kid`) to identify which key to use. +type Keyfunc func(*Token) (interface{}, error) + +// A JWT Token. Different fields will be used depending on whether you're +// creating or parsing/verifying a token. +type Token struct { + Raw string // The raw token. Populated when you Parse a token + Method SigningMethod // The signing method used or to be used + Header map[string]interface{} // The first segment of the token + Claims Claims // The second segment of the token + Signature string // The third segment of the token. Populated when you Parse a token + Valid bool // Is the token valid? Populated when you Parse/Verify a token +} + +// Create a new Token. Takes a signing method +func New(method SigningMethod) *Token { + return NewWithClaims(method, MapClaims{}) +} + +func NewWithClaims(method SigningMethod, claims Claims) *Token { + return &Token{ + Header: map[string]interface{}{ + "typ": "JWT", + "alg": method.Alg(), + }, + Claims: claims, + Method: method, + } +} + +// Get the complete, signed token +func (t *Token) SignedString(key interface{}) (string, error) { + var sig, sstr string + var err error + if sstr, err = t.SigningString(); err != nil { + return "", err + } + if sig, err = t.Method.Sign(sstr, key); err != nil { + return "", err + } + return strings.Join([]string{sstr, sig}, "."), nil +} + +// Generate the signing string. This is the +// most expensive part of the whole deal. Unless you +// need this for something special, just go straight for +// the SignedString. +func (t *Token) SigningString() (string, error) { + var err error + parts := make([]string, 2) + for i, _ := range parts { + var jsonValue []byte + if i == 0 { + if jsonValue, err = json.Marshal(t.Header); err != nil { + return "", err + } + } else { + if jsonValue, err = json.Marshal(t.Claims); err != nil { + return "", err + } + } + + parts[i] = EncodeSegment(jsonValue) + } + return strings.Join(parts, "."), nil +} + +// Parse, validate, and return a token. +// keyFunc will receive the parsed token and should return the key for validating. +// If everything is kosher, err will be nil +func Parse(tokenString string, keyFunc Keyfunc) (*Token, error) { + return new(Parser).Parse(tokenString, keyFunc) +} + +func ParseWithClaims(tokenString string, claims Claims, keyFunc Keyfunc) (*Token, error) { + return new(Parser).ParseWithClaims(tokenString, claims, keyFunc) +} + +// Encode JWT specific base64url encoding with padding stripped +func EncodeSegment(seg []byte) string { + return strings.TrimRight(base64.URLEncoding.EncodeToString(seg), "=") +} + +// Decode JWT specific base64url encoding with padding stripped +func DecodeSegment(seg string) ([]byte, error) { + if l := len(seg) % 4; l > 0 { + seg += strings.Repeat("=", 4-l) + } + + return base64.URLEncoding.DecodeString(seg) +} diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/.travis.yml b/terraform-server/vendor/github.com/gin-contrib/sse/.travis.yml new file mode 100644 index 00000000..d0e8fcf9 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/.travis.yml @@ -0,0 +1,26 @@ +language: go +sudo: false +go: + - 1.8.x + - 1.9.x + - 1.10.x + - 1.11.x + - 1.12.x + - master + +git: + depth: 10 + +matrix: + fast_finish: true + include: + - go: 1.11.x + env: GO111MODULE=on + - go: 1.12.x + env: GO111MODULE=on + +script: + - go test -v -covermode=count -coverprofile=coverage.out + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/LICENSE b/terraform-server/vendor/github.com/gin-contrib/sse/LICENSE new file mode 100644 index 00000000..1ff7f370 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Manuel Martínez-Almeida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/README.md b/terraform-server/vendor/github.com/gin-contrib/sse/README.md new file mode 100644 index 00000000..c9c49cf9 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/README.md @@ -0,0 +1,58 @@ +# Server-Sent Events + +[![GoDoc](https://godoc.org/github.com/gin-contrib/sse?status.svg)](https://godoc.org/github.com/gin-contrib/sse) +[![Build Status](https://travis-ci.org/gin-contrib/sse.svg)](https://travis-ci.org/gin-contrib/sse) +[![codecov](https://codecov.io/gh/gin-contrib/sse/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-contrib/sse) +[![Go Report Card](https://goreportcard.com/badge/github.com/gin-contrib/sse)](https://goreportcard.com/report/github.com/gin-contrib/sse) + +Server-sent events (SSE) is a technology where a browser receives automatic updates from a server via HTTP connection. The Server-Sent Events EventSource API is [standardized as part of HTML5[1] by the W3C](http://www.w3.org/TR/2009/WD-eventsource-20091029/). + +- [Read this great SSE introduction by the HTML5Rocks guys](http://www.html5rocks.com/en/tutorials/eventsource/basics/) +- [Browser support](http://caniuse.com/#feat=eventsource) + +## Sample code + +```go +import "github.com/gin-contrib/sse" + +func httpHandler(w http.ResponseWriter, req *http.Request) { + // data can be a primitive like a string, an integer or a float + sse.Encode(w, sse.Event{ + Event: "message", + Data: "some data\nmore data", + }) + + // also a complex type, like a map, a struct or a slice + sse.Encode(w, sse.Event{ + Id: "124", + Event: "message", + Data: map[string]interface{}{ + "user": "manu", + "date": time.Now().Unix(), + "content": "hi!", + }, + }) +} +``` +``` +event: message +data: some data\\nmore data + +id: 124 +event: message +data: {"content":"hi!","date":1431540810,"user":"manu"} + +``` + +## Content-Type + +```go +fmt.Println(sse.ContentType) +``` +``` +text/event-stream +``` + +## Decoding support + +There is a client-side implementation of SSE coming soon. diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/go.mod b/terraform-server/vendor/github.com/gin-contrib/sse/go.mod new file mode 100644 index 00000000..b9c03f47 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/go.mod @@ -0,0 +1,5 @@ +module github.com/gin-contrib/sse + +go 1.12 + +require github.com/stretchr/testify v1.3.0 diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/go.sum b/terraform-server/vendor/github.com/gin-contrib/sse/go.sum new file mode 100644 index 00000000..4347755a --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/go.sum @@ -0,0 +1,7 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/sse-decoder.go b/terraform-server/vendor/github.com/gin-contrib/sse/sse-decoder.go new file mode 100644 index 00000000..fd49b9c3 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/sse-decoder.go @@ -0,0 +1,116 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "bytes" + "io" + "io/ioutil" +) + +type decoder struct { + events []Event +} + +func Decode(r io.Reader) ([]Event, error) { + var dec decoder + return dec.decode(r) +} + +func (d *decoder) dispatchEvent(event Event, data string) { + dataLength := len(data) + if dataLength > 0 { + //If the data buffer's last character is a U+000A LINE FEED (LF) character, then remove the last character from the data buffer. + data = data[:dataLength-1] + dataLength-- + } + if dataLength == 0 && event.Event == "" { + return + } + if event.Event == "" { + event.Event = "message" + } + event.Data = data + d.events = append(d.events, event) +} + +func (d *decoder) decode(r io.Reader) ([]Event, error) { + buf, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + var currentEvent Event + var dataBuffer *bytes.Buffer = new(bytes.Buffer) + // TODO (and unit tests) + // Lines must be separated by either a U+000D CARRIAGE RETURN U+000A LINE FEED (CRLF) character pair, + // a single U+000A LINE FEED (LF) character, + // or a single U+000D CARRIAGE RETURN (CR) character. + lines := bytes.Split(buf, []byte{'\n'}) + for _, line := range lines { + if len(line) == 0 { + // If the line is empty (a blank line). Dispatch the event. + d.dispatchEvent(currentEvent, dataBuffer.String()) + + // reset current event and data buffer + currentEvent = Event{} + dataBuffer.Reset() + continue + } + if line[0] == byte(':') { + // If the line starts with a U+003A COLON character (:), ignore the line. + continue + } + + var field, value []byte + colonIndex := bytes.IndexRune(line, ':') + if colonIndex != -1 { + // If the line contains a U+003A COLON character character (:) + // Collect the characters on the line before the first U+003A COLON character (:), + // and let field be that string. + field = line[:colonIndex] + // Collect the characters on the line after the first U+003A COLON character (:), + // and let value be that string. + value = line[colonIndex+1:] + // If value starts with a single U+0020 SPACE character, remove it from value. + if len(value) > 0 && value[0] == ' ' { + value = value[1:] + } + } else { + // Otherwise, the string is not empty but does not contain a U+003A COLON character character (:) + // Use the whole line as the field name, and the empty string as the field value. + field = line + value = []byte{} + } + // The steps to process the field given a field name and a field value depend on the field name, + // as given in the following list. Field names must be compared literally, + // with no case folding performed. + switch string(field) { + case "event": + // Set the event name buffer to field value. + currentEvent.Event = string(value) + case "id": + // Set the event stream's last event ID to the field value. + currentEvent.Id = string(value) + case "retry": + // If the field value consists of only characters in the range U+0030 DIGIT ZERO (0) to U+0039 DIGIT NINE (9), + // then interpret the field value as an integer in base ten, and set the event stream's reconnection time to that integer. + // Otherwise, ignore the field. + currentEvent.Id = string(value) + case "data": + // Append the field value to the data buffer, + dataBuffer.Write(value) + // then append a single U+000A LINE FEED (LF) character to the data buffer. + dataBuffer.WriteString("\n") + default: + //Otherwise. The field is ignored. + continue + } + } + // Once the end of the file is reached, the user agent must dispatch the event one final time. + d.dispatchEvent(currentEvent, dataBuffer.String()) + + return d.events, nil +} diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/sse-encoder.go b/terraform-server/vendor/github.com/gin-contrib/sse/sse-encoder.go new file mode 100644 index 00000000..f9c80875 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/sse-encoder.go @@ -0,0 +1,110 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package sse + +import ( + "encoding/json" + "fmt" + "io" + "net/http" + "reflect" + "strconv" + "strings" +) + +// Server-Sent Events +// W3C Working Draft 29 October 2009 +// http://www.w3.org/TR/2009/WD-eventsource-20091029/ + +const ContentType = "text/event-stream" + +var contentType = []string{ContentType} +var noCache = []string{"no-cache"} + +var fieldReplacer = strings.NewReplacer( + "\n", "\\n", + "\r", "\\r") + +var dataReplacer = strings.NewReplacer( + "\n", "\ndata:", + "\r", "\\r") + +type Event struct { + Event string + Id string + Retry uint + Data interface{} +} + +func Encode(writer io.Writer, event Event) error { + w := checkWriter(writer) + writeId(w, event.Id) + writeEvent(w, event.Event) + writeRetry(w, event.Retry) + return writeData(w, event.Data) +} + +func writeId(w stringWriter, id string) { + if len(id) > 0 { + w.WriteString("id:") + fieldReplacer.WriteString(w, id) + w.WriteString("\n") + } +} + +func writeEvent(w stringWriter, event string) { + if len(event) > 0 { + w.WriteString("event:") + fieldReplacer.WriteString(w, event) + w.WriteString("\n") + } +} + +func writeRetry(w stringWriter, retry uint) { + if retry > 0 { + w.WriteString("retry:") + w.WriteString(strconv.FormatUint(uint64(retry), 10)) + w.WriteString("\n") + } +} + +func writeData(w stringWriter, data interface{}) error { + w.WriteString("data:") + switch kindOfData(data) { + case reflect.Struct, reflect.Slice, reflect.Map: + err := json.NewEncoder(w).Encode(data) + if err != nil { + return err + } + w.WriteString("\n") + default: + dataReplacer.WriteString(w, fmt.Sprint(data)) + w.WriteString("\n\n") + } + return nil +} + +func (r Event) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + return Encode(w, r) +} + +func (r Event) WriteContentType(w http.ResponseWriter) { + header := w.Header() + header["Content-Type"] = contentType + + if _, exist := header["Cache-Control"]; !exist { + header["Cache-Control"] = noCache + } +} + +func kindOfData(data interface{}) reflect.Kind { + value := reflect.ValueOf(data) + valueType := value.Kind() + if valueType == reflect.Ptr { + valueType = value.Elem().Kind() + } + return valueType +} diff --git a/terraform-server/vendor/github.com/gin-contrib/sse/writer.go b/terraform-server/vendor/github.com/gin-contrib/sse/writer.go new file mode 100644 index 00000000..6f9806c5 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-contrib/sse/writer.go @@ -0,0 +1,24 @@ +package sse + +import "io" + +type stringWriter interface { + io.Writer + WriteString(string) (int, error) +} + +type stringWrapper struct { + io.Writer +} + +func (w stringWrapper) WriteString(str string) (int, error) { + return w.Writer.Write([]byte(str)) +} + +func checkWriter(writer io.Writer) stringWriter { + if w, ok := writer.(stringWriter); ok { + return w + } else { + return stringWrapper{writer} + } +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/.gitignore b/terraform-server/vendor/github.com/gin-gonic/gin/.gitignore new file mode 100644 index 00000000..bdd50c95 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/.gitignore @@ -0,0 +1,7 @@ +vendor/* +!vendor/vendor.json +coverage.out +count.out +test +profile.out +tmp.out diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/.travis.yml b/terraform-server/vendor/github.com/gin-gonic/gin/.travis.yml new file mode 100644 index 00000000..8ebae712 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/.travis.yml @@ -0,0 +1,50 @@ +language: go + +matrix: + fast_finish: true + include: + - go: 1.12.x + env: GO111MODULE=on + - go: 1.13.x + - go: 1.13.x + env: + - TESTTAGS=nomsgpack + - go: 1.14.x + - go: 1.14.x + env: + - TESTTAGS=nomsgpack + - go: 1.15.x + - go: 1.15.x + env: + - TESTTAGS=nomsgpack + - go: master + +git: + depth: 10 + +before_install: + - if [[ "${GO111MODULE}" = "on" ]]; then mkdir "${HOME}/go"; export GOPATH="${HOME}/go"; fi + +install: + - if [[ "${GO111MODULE}" = "on" ]]; then go mod download; fi + - if [[ "${GO111MODULE}" = "on" ]]; then export PATH="${GOPATH}/bin:${GOROOT}/bin:${PATH}"; fi + - if [[ "${GO111MODULE}" = "on" ]]; then make tools; fi + +go_import_path: github.com/gin-gonic/gin + +script: + - make vet + - make fmt-check + - make misspell-check + - make test + +after_success: + - bash <(curl -s https://codecov.io/bash) + +notifications: + webhooks: + urls: + - https://webhooks.gitter.im/e/7f95bf605c4d356372f4 + on_success: change # options: [always|never|change] default: always + on_failure: always # options: [always|never|change] default: always + on_start: false # default: false diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/AUTHORS.md b/terraform-server/vendor/github.com/gin-gonic/gin/AUTHORS.md new file mode 100644 index 00000000..c634e6be --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/AUTHORS.md @@ -0,0 +1,233 @@ +List of all the awesome people working to make Gin the best Web Framework in Go. + +## gin 1.x series authors + +**Gin Core Team:** Bo-Yi Wu (@appleboy), 田欧 (@thinkerou), Javier Provecho (@javierprovecho) + +## gin 0.x series authors + +**Maintainers:** Manu Martinez-Almeida (@manucorporat), Javier Provecho (@javierprovecho) + +People and companies, who have contributed, in alphabetical order. + +**@858806258 (杰哥)** +- Fix typo in example + + +**@achedeuzot (Klemen Sever)** +- Fix newline debug printing + + +**@adammck (Adam Mckaig)** +- Add MIT license + + +**@AlexanderChen1989 (Alexander)** +- Typos in README + + +**@alexanderdidenko (Aleksandr Didenko)** +- Add support multipart/form-data + + +**@alexandernyquist (Alexander Nyquist)** +- Using template.Must to fix multiple return issue +- ★ Added support for OPTIONS verb +- ★ Setting response headers before calling WriteHeader +- Improved documentation for model binding +- ★ Added Content.Redirect() +- ★ Added tons of Unit tests + + +**@austinheap (Austin Heap)** +- Added travis CI integration + + +**@andredublin (Andre Dublin)** +- Fix typo in comment + + +**@bredov (Ludwig Valda Vasquez)** +- Fix html templating in debug mode + + +**@bluele (Jun Kimura)** +- Fixes code examples in README + + +**@chad-russell** +- ★ Support for serializing gin.H into XML + + +**@dickeyxxx (Jeff Dickey)** +- Typos in README +- Add example about serving static files + + +**@donileo (Adonis)** +- Add NoMethod handler + + +**@dutchcoders (DutchCoders)** +- ★ Fix security bug that allows client to spoof ip +- Fix typo. r.HTMLTemplates -> SetHTMLTemplate + + +**@el3ctro- (Joshua Loper)** +- Fix typo in example + + +**@ethankan (Ethan Kan)** +- Unsigned integers in binding + + +**(Evgeny Persienko)** +- Validate sub structures + + +**@frankbille (Frank Bille)** +- Add support for HTTP Realm Auth + + +**@fmd (Fareed Dudhia)** +- Fix typo. SetHTTPTemplate -> SetHTMLTemplate + + +**@ironiridis (Christopher Harrington)** +- Remove old reference + + +**@jammie-stackhouse (Jamie Stackhouse)** +- Add more shortcuts for router methods + + +**@jasonrhansen** +- Fix spelling and grammar errors in documentation + + +**@JasonSoft (Jason Lee)** +- Fix typo in comment + + +**@joiggama (Ignacio Galindo)** +- Add utf-8 charset header on renders + + +**@julienschmidt (Julien Schmidt)** +- gofmt the code examples + + +**@kelcecil (Kel Cecil)** +- Fix readme typo + + +**@kyledinh (Kyle Dinh)** +- Adds RunTLS() + + +**@LinusU (Linus Unnebäck)** +- Small fixes in README + + +**@loongmxbt (Saint Asky)** +- Fix typo in example + + +**@lucas-clemente (Lucas Clemente)** +- ★ work around path.Join removing trailing slashes from routes + + +**@mattn (Yasuhiro Matsumoto)** +- Improve color logger + + +**@mdigger (Dmitry Sedykh)** +- Fixes Form binding when content-type is x-www-form-urlencoded +- No repeat call c.Writer.Status() in gin.Logger +- Fixes Content-Type for json render + + +**@mirzac (Mirza Ceric)** +- Fix debug printing + + +**@mopemope (Yutaka Matsubara)** +- ★ Adds Godep support (Dependencies Manager) +- Fix variadic parameter in the flexible render API +- Fix Corrupted plain render +- Add Pluggable View Renderer Example + + +**@msemenistyi (Mykyta Semenistyi)** +- update Readme.md. Add code to String method + + +**@msoedov (Sasha Myasoedov)** +- ★ Adds tons of unit tests. + + +**@ngerakines (Nick Gerakines)** +- ★ Improves API, c.GET() doesn't panic +- Adds MustGet() method + + +**@r8k (Rajiv Kilaparti)** +- Fix Port usage in README. + + +**@rayrod2030 (Ray Rodriguez)** +- Fix typo in example + + +**@rns** +- Fix typo in example + + +**@RobAWilkinson (Robert Wilkinson)** +- Add example of forms and params + + +**@rogierlommers (Rogier Lommers)** +- Add updated static serve example + +**@rw-access (Ross Wolf)** +- Added support to mix exact and param routes + +**@se77en (Damon Zhao)** +- Improve color logging + + +**@silasb (Silas Baronda)** +- Fixing quotes in README + + +**@SkuliOskarsson (Skuli Oskarsson)** +- Fixes some texts in README II + + +**@slimmy (Jimmy Pettersson)** +- Added messages for required bindings + + +**@smira (Andrey Smirnov)** +- Add support for ignored/unexported fields in binding + + +**@superalsrk (SRK.Lyu)** +- Update httprouter godeps + + +**@tebeka (Miki Tebeka)** +- Use net/http constants instead of numeric values + + +**@techjanitor** +- Update context.go reserved IPs + + +**@yosssi (Keiji Yoshida)** +- Fix link in README + + +**@yuyabee** +- Fixed README diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/BENCHMARKS.md b/terraform-server/vendor/github.com/gin-gonic/gin/BENCHMARKS.md new file mode 100644 index 00000000..c11ee99a --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/BENCHMARKS.md @@ -0,0 +1,666 @@ + +# Benchmark System + +**VM HOST:** Travis +**Machine:** Ubuntu 16.04.6 LTS x64 +**Date:** May 04th, 2020 +**Version:** Gin v1.6.3 +**Go Version:** 1.14.2 linux/amd64 +**Source:** [Go HTTP Router Benchmark](https://github.com/gin-gonic/go-http-routing-benchmark) +**Result:** [See the gist](https://gist.github.com/appleboy/b5f2ecfaf50824ae9c64dcfb9165ae5e) or [Travis result](https://travis-ci.org/github/gin-gonic/go-http-routing-benchmark/jobs/682947061) + +## Static Routes: 157 + +```sh +Gin: 34936 Bytes + +HttpServeMux: 14512 Bytes +Ace: 30680 Bytes +Aero: 34536 Bytes +Bear: 30456 Bytes +Beego: 98456 Bytes +Bone: 40224 Bytes +Chi: 83608 Bytes +Denco: 10216 Bytes +Echo: 80328 Bytes +GocraftWeb: 55288 Bytes +Goji: 29744 Bytes +Gojiv2: 105840 Bytes +GoJsonRest: 137496 Bytes +GoRestful: 816936 Bytes +GorillaMux: 585632 Bytes +GowwwRouter: 24968 Bytes +HttpRouter: 21712 Bytes +HttpTreeMux: 73448 Bytes +Kocha: 115472 Bytes +LARS: 30640 Bytes +Macaron: 38592 Bytes +Martini: 310864 Bytes +Pat: 19696 Bytes +Possum: 89920 Bytes +R2router: 23712 Bytes +Rivet: 24608 Bytes +Tango: 28264 Bytes +TigerTonic: 78768 Bytes +Traffic: 538976 Bytes +Vulcan: 369960 Bytes +``` + +## GithubAPI Routes: 203 + +```sh +Gin: 58512 Bytes + +Ace: 48688 Bytes +Aero: 318568 Bytes +Bear: 84248 Bytes +Beego: 150936 Bytes +Bone: 100976 Bytes +Chi: 95112 Bytes +Denco: 36736 Bytes +Echo: 100296 Bytes +GocraftWeb: 95432 Bytes +Goji: 49680 Bytes +Gojiv2: 104704 Bytes +GoJsonRest: 141976 Bytes +GoRestful: 1241656 Bytes +GorillaMux: 1322784 Bytes +GowwwRouter: 80008 Bytes +HttpRouter: 37144 Bytes +HttpTreeMux: 78800 Bytes +Kocha: 785120 Bytes +LARS: 48600 Bytes +Macaron: 92784 Bytes +Martini: 485264 Bytes +Pat: 21200 Bytes +Possum: 85312 Bytes +R2router: 47104 Bytes +Rivet: 42840 Bytes +Tango: 54840 Bytes +TigerTonic: 95264 Bytes +Traffic: 921744 Bytes +Vulcan: 425992 Bytes +``` + +## GPlusAPI Routes: 13 + +```sh +Gin: 4384 Bytes + +Ace: 3712 Bytes +Aero: 26056 Bytes +Bear: 7112 Bytes +Beego: 10272 Bytes +Bone: 6688 Bytes +Chi: 8024 Bytes +Denco: 3264 Bytes +Echo: 9688 Bytes +GocraftWeb: 7496 Bytes +Goji: 3152 Bytes +Gojiv2: 7376 Bytes +GoJsonRest: 11400 Bytes +GoRestful: 74328 Bytes +GorillaMux: 66208 Bytes +GowwwRouter: 5744 Bytes +HttpRouter: 2808 Bytes +HttpTreeMux: 7440 Bytes +Kocha: 128880 Bytes +LARS: 3656 Bytes +Macaron: 8656 Bytes +Martini: 23920 Bytes +Pat: 1856 Bytes +Possum: 7248 Bytes +R2router: 3928 Bytes +Rivet: 3064 Bytes +Tango: 5168 Bytes +TigerTonic: 9408 Bytes +Traffic: 46400 Bytes +Vulcan: 25544 Bytes +``` + +## ParseAPI Routes: 26 + +```sh +Gin: 7776 Bytes + +Ace: 6704 Bytes +Aero: 28488 Bytes +Bear: 12320 Bytes +Beego: 19280 Bytes +Bone: 11440 Bytes +Chi: 9744 Bytes +Denco: 4192 Bytes +Echo: 11664 Bytes +GocraftWeb: 12800 Bytes +Goji: 5680 Bytes +Gojiv2: 14464 Bytes +GoJsonRest: 14072 Bytes +GoRestful: 116264 Bytes +GorillaMux: 105880 Bytes +GowwwRouter: 9344 Bytes +HttpRouter: 5072 Bytes +HttpTreeMux: 7848 Bytes +Kocha: 181712 Bytes +LARS: 6632 Bytes +Macaron: 13648 Bytes +Martini: 45888 Bytes +Pat: 2560 Bytes +Possum: 9200 Bytes +R2router: 7056 Bytes +Rivet: 5680 Bytes +Tango: 8920 Bytes +TigerTonic: 9840 Bytes +Traffic: 79096 Bytes +Vulcan: 44504 Bytes +``` + +## Static Routes + +```sh +BenchmarkGin_StaticAll 62169 19319 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_StaticAll 65428 18313 ns/op 0 B/op 0 allocs/op +BenchmarkAero_StaticAll 121132 9632 ns/op 0 B/op 0 allocs/op +BenchmarkHttpServeMux_StaticAll 52626 22758 ns/op 0 B/op 0 allocs/op +BenchmarkBeego_StaticAll 9962 179058 ns/op 55264 B/op 471 allocs/op +BenchmarkBear_StaticAll 14894 80966 ns/op 20272 B/op 469 allocs/op +BenchmarkBone_StaticAll 18718 64065 ns/op 0 B/op 0 allocs/op +BenchmarkChi_StaticAll 10000 149827 ns/op 67824 B/op 471 allocs/op +BenchmarkDenco_StaticAll 211393 5680 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_StaticAll 49341 24343 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_StaticAll 10000 126209 ns/op 46312 B/op 785 allocs/op +BenchmarkGoji_StaticAll 27956 43174 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_StaticAll 3430 370718 ns/op 205984 B/op 1570 allocs/op +BenchmarkGoJsonRest_StaticAll 9134 188888 ns/op 51653 B/op 1727 allocs/op +BenchmarkGoRestful_StaticAll 706 1703330 ns/op 613280 B/op 2053 allocs/op +BenchmarkGorillaMux_StaticAll 1268 924083 ns/op 153233 B/op 1413 allocs/op +BenchmarkGowwwRouter_StaticAll 63374 18935 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_StaticAll 109938 10902 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_StaticAll 109166 10861 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_StaticAll 92258 12992 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_StaticAll 65200 18387 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_StaticAll 5671 291501 ns/op 115553 B/op 1256 allocs/op +BenchmarkMartini_StaticAll 807 1460498 ns/op 125444 B/op 1717 allocs/op +BenchmarkPat_StaticAll 513 2342396 ns/op 602832 B/op 12559 allocs/op +BenchmarkPossum_StaticAll 10000 128270 ns/op 65312 B/op 471 allocs/op +BenchmarkR2router_StaticAll 16726 71760 ns/op 22608 B/op 628 allocs/op +BenchmarkRivet_StaticAll 41722 28723 ns/op 0 B/op 0 allocs/op +BenchmarkTango_StaticAll 7606 205082 ns/op 39209 B/op 1256 allocs/op +BenchmarkTigerTonic_StaticAll 26247 45806 ns/op 7376 B/op 157 allocs/op +BenchmarkTraffic_StaticAll 550 2284518 ns/op 754864 B/op 14601 allocs/op +BenchmarkVulcan_StaticAll 10000 131343 ns/op 15386 B/op 471 allocs/op +``` + +## Micro Benchmarks + +```sh +BenchmarkGin_Param 18785022 63.9 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_Param 14689765 81.5 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Param 23094770 51.2 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Param 1417045 845 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_Param 1000000 1080 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Param 1000000 1463 ns/op 816 B/op 6 allocs/op +BenchmarkChi_Param 1378756 885 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Param 8557899 143 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_Param 16433347 75.5 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param 1000000 1218 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_Param 1921248 617 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param 561848 2156 ns/op 1328 B/op 11 allocs/op +BenchmarkGoJsonRest_Param 1000000 1358 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_Param 224857 5307 ns/op 4192 B/op 14 allocs/op +BenchmarkGorillaMux_Param 498313 2459 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_Param 1864354 654 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Param 26269074 47.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Param 2109829 557 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_Param 5050216 243 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_Param 19811712 59.9 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param 662746 2329 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_Param 279902 4260 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_Param 1000000 1382 ns/op 536 B/op 11 allocs/op +BenchmarkPossum_Param 1000000 1014 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Param 1712559 707 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param 6648086 182 ns/op 48 B/op 1 allocs/op +BenchmarkTango_Param 1221504 994 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_Param 891661 2261 ns/op 776 B/op 16 allocs/op +BenchmarkTraffic_Param 350059 3598 ns/op 1856 B/op 21 allocs/op +BenchmarkVulcan_Param 2517823 472 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param5 9214365 130 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Param5 15369013 77.9 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Param5 1000000 1113 ns/op 501 B/op 5 allocs/op +BenchmarkBeego_Param5 1000000 1269 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Param5 986820 1873 ns/op 864 B/op 6 allocs/op +BenchmarkChi_Param5 1000000 1156 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Param5 3036331 400 ns/op 160 B/op 1 allocs/op +BenchmarkEcho_Param5 6447133 186 ns/op 0 B/op 0 allocs/op +BenchmarkGin_Param5 10786068 110 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param5 844820 1944 ns/op 920 B/op 11 allocs/op +BenchmarkGoji_Param5 1474965 827 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Param5 442820 2516 ns/op 1392 B/op 11 allocs/op +BenchmarkGoJsonRest_Param5 507555 2711 ns/op 1097 B/op 16 allocs/op +BenchmarkGoRestful_Param5 216481 6093 ns/op 4288 B/op 14 allocs/op +BenchmarkGorillaMux_Param5 314402 3628 ns/op 1344 B/op 10 allocs/op +BenchmarkGowwwRouter_Param5 1624660 733 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Param5 13167324 92.0 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Param5 1000000 1295 ns/op 576 B/op 6 allocs/op +BenchmarkKocha_Param5 1000000 1138 ns/op 440 B/op 10 allocs/op +BenchmarkLARS_Param5 11580613 105 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param5 473596 2755 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_Param5 230756 5111 ns/op 1232 B/op 11 allocs/op +BenchmarkPat_Param5 469190 3370 ns/op 888 B/op 29 allocs/op +BenchmarkPossum_Param5 1000000 1002 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Param5 1422129 844 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Param5 2263789 539 ns/op 240 B/op 1 allocs/op +BenchmarkTango_Param5 1000000 1256 ns/op 360 B/op 8 allocs/op +BenchmarkTigerTonic_Param5 175500 7492 ns/op 2279 B/op 39 allocs/op +BenchmarkTraffic_Param5 233631 5816 ns/op 2208 B/op 27 allocs/op +BenchmarkVulcan_Param5 1923416 629 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Param20 4321266 281 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Param20 31501641 35.2 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Param20 335204 3489 ns/op 1665 B/op 5 allocs/op +BenchmarkBeego_Param20 503674 2860 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Param20 298922 4741 ns/op 2031 B/op 6 allocs/op +BenchmarkChi_Param20 878181 1957 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Param20 1000000 1360 ns/op 640 B/op 1 allocs/op +BenchmarkEcho_Param20 2104946 580 ns/op 0 B/op 0 allocs/op +BenchmarkGin_Param20 4167204 290 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Param20 173064 7514 ns/op 3796 B/op 15 allocs/op +BenchmarkGoji_Param20 458778 2651 ns/op 1247 B/op 2 allocs/op +BenchmarkGojiv2_Param20 364862 3178 ns/op 1632 B/op 11 allocs/op +BenchmarkGoJsonRest_Param20 125514 9760 ns/op 4485 B/op 20 allocs/op +BenchmarkGoRestful_Param20 101217 11964 ns/op 6715 B/op 18 allocs/op +BenchmarkGorillaMux_Param20 147654 8132 ns/op 3452 B/op 12 allocs/op +BenchmarkGowwwRouter_Param20 1000000 1225 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Param20 4920895 247 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Param20 173202 6605 ns/op 3196 B/op 10 allocs/op +BenchmarkKocha_Param20 345988 3620 ns/op 1808 B/op 27 allocs/op +BenchmarkLARS_Param20 4592326 262 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Param20 166492 7286 ns/op 2924 B/op 12 allocs/op +BenchmarkMartini_Param20 122162 10653 ns/op 3595 B/op 13 allocs/op +BenchmarkPat_Param20 78630 15239 ns/op 4424 B/op 93 allocs/op +BenchmarkPossum_Param20 1000000 1008 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Param20 294981 4587 ns/op 2284 B/op 7 allocs/op +BenchmarkRivet_Param20 691798 2090 ns/op 1024 B/op 1 allocs/op +BenchmarkTango_Param20 842440 2505 ns/op 856 B/op 8 allocs/op +BenchmarkTigerTonic_Param20 38614 31509 ns/op 9870 B/op 119 allocs/op +BenchmarkTraffic_Param20 57633 21107 ns/op 7853 B/op 47 allocs/op +BenchmarkVulcan_Param20 1000000 1178 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParamWrite 7330743 180 ns/op 8 B/op 1 allocs/op +BenchmarkAero_ParamWrite 13833598 86.7 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParamWrite 1363321 867 ns/op 456 B/op 5 allocs/op +BenchmarkBeego_ParamWrite 1000000 1104 ns/op 360 B/op 4 allocs/op +BenchmarkBone_ParamWrite 1000000 1475 ns/op 816 B/op 6 allocs/op +BenchmarkChi_ParamWrite 1320590 892 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_ParamWrite 7093605 172 ns/op 32 B/op 1 allocs/op +BenchmarkEcho_ParamWrite 8434424 161 ns/op 8 B/op 1 allocs/op +BenchmarkGin_ParamWrite 10377034 118 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParamWrite 1000000 1266 ns/op 656 B/op 9 allocs/op +BenchmarkGoji_ParamWrite 1874168 654 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParamWrite 459032 2352 ns/op 1360 B/op 13 allocs/op +BenchmarkGoJsonRest_ParamWrite 499434 2145 ns/op 1128 B/op 18 allocs/op +BenchmarkGoRestful_ParamWrite 241087 5470 ns/op 4200 B/op 15 allocs/op +BenchmarkGorillaMux_ParamWrite 425686 2522 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_ParamWrite 922172 1778 ns/op 976 B/op 8 allocs/op +BenchmarkHttpRouter_ParamWrite 15392049 77.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParamWrite 1973385 597 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParamWrite 4262500 281 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParamWrite 10764410 113 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParamWrite 486769 2726 ns/op 1176 B/op 14 allocs/op +BenchmarkMartini_ParamWrite 264804 4842 ns/op 1176 B/op 14 allocs/op +BenchmarkPat_ParamWrite 735116 2047 ns/op 960 B/op 15 allocs/op +BenchmarkPossum_ParamWrite 1000000 1004 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_ParamWrite 1592136 768 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParamWrite 3582051 339 ns/op 112 B/op 2 allocs/op +BenchmarkTango_ParamWrite 2237337 534 ns/op 136 B/op 4 allocs/op +BenchmarkTigerTonic_ParamWrite 439608 3136 ns/op 1216 B/op 21 allocs/op +BenchmarkTraffic_ParamWrite 306979 4328 ns/op 2280 B/op 25 allocs/op +BenchmarkVulcan_ParamWrite 2529973 472 ns/op 98 B/op 3 allocs/op +``` + +## GitHub + +```sh +BenchmarkGin_GithubStatic 15629472 76.7 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GithubStatic 15542612 75.9 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GithubStatic 24777151 48.5 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubStatic 2788894 435 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_GithubStatic 1000000 1064 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GithubStatic 93507 12838 ns/op 2880 B/op 60 allocs/op +BenchmarkChi_GithubStatic 1387743 860 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GithubStatic 39384996 30.4 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GithubStatic 12076382 99.1 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubStatic 1596495 756 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_GithubStatic 6364876 189 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GithubStatic 550202 2098 ns/op 1312 B/op 10 allocs/op +BenchmarkGoRestful_GithubStatic 102183 12552 ns/op 4256 B/op 13 allocs/op +BenchmarkGoJsonRest_GithubStatic 1000000 1029 ns/op 329 B/op 11 allocs/op +BenchmarkGorillaMux_GithubStatic 255552 5190 ns/op 976 B/op 9 allocs/op +BenchmarkGowwwRouter_GithubStatic 15531916 77.1 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_GithubStatic 27920724 43.1 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubStatic 21448953 55.8 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GithubStatic 21405310 56.0 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GithubStatic 13625156 89.0 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubStatic 1000000 1747 ns/op 736 B/op 8 allocs/op +BenchmarkMartini_GithubStatic 187186 7326 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GithubStatic 109143 11563 ns/op 3648 B/op 76 allocs/op +BenchmarkPossum_GithubStatic 1575898 770 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GithubStatic 3046231 404 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GithubStatic 11484826 105 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GithubStatic 1000000 1153 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_GithubStatic 4929780 249 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_GithubStatic 106351 11819 ns/op 4664 B/op 90 allocs/op +BenchmarkVulcan_GithubStatic 1613271 722 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubParam 8386032 143 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GithubParam 11816200 102 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubParam 1000000 1012 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GithubParam 1000000 1157 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GithubParam 184653 6912 ns/op 1888 B/op 19 allocs/op +BenchmarkChi_GithubParam 1000000 1102 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GithubParam 3484798 352 ns/op 128 B/op 1 allocs/op +BenchmarkEcho_GithubParam 6337380 189 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GithubParam 9132032 131 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubParam 1000000 1446 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GithubParam 1248640 977 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GithubParam 383233 2784 ns/op 1408 B/op 13 allocs/op +BenchmarkGoJsonRest_GithubParam 1000000 1991 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GithubParam 76414 16015 ns/op 4352 B/op 16 allocs/op +BenchmarkGorillaMux_GithubParam 150026 7663 ns/op 1296 B/op 10 allocs/op +BenchmarkGowwwRouter_GithubParam 1592044 751 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_GithubParam 10420628 115 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubParam 1403755 835 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GithubParam 2286170 533 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GithubParam 9540374 129 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubParam 533154 2742 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_GithubParam 119397 9638 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_GithubParam 150675 8858 ns/op 2408 B/op 48 allocs/op +BenchmarkPossum_GithubParam 1000000 1001 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_GithubParam 1602886 761 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GithubParam 2986579 409 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GithubParam 1000000 1356 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GithubParam 388899 3429 ns/op 1176 B/op 22 allocs/op +BenchmarkTraffic_GithubParam 123160 9734 ns/op 2816 B/op 40 allocs/op +BenchmarkVulcan_GithubParam 1000000 1138 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GithubAll 40543 29670 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GithubAll 57632 20648 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GithubAll 9234 216179 ns/op 86448 B/op 943 allocs/op +BenchmarkBeego_GithubAll 7407 243496 ns/op 71456 B/op 609 allocs/op +BenchmarkBone_GithubAll 420 2922835 ns/op 720160 B/op 8620 allocs/op +BenchmarkChi_GithubAll 7620 238331 ns/op 87696 B/op 609 allocs/op +BenchmarkDenco_GithubAll 18355 64494 ns/op 20224 B/op 167 allocs/op +BenchmarkEcho_GithubAll 31251 38479 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GithubAll 43550 27364 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GithubAll 4117 300062 ns/op 131656 B/op 1686 allocs/op +BenchmarkGoji_GithubAll 3274 416158 ns/op 56112 B/op 334 allocs/op +BenchmarkGojiv2_GithubAll 1402 870518 ns/op 352720 B/op 4321 allocs/op +BenchmarkGoJsonRest_GithubAll 2976 401507 ns/op 134371 B/op 2737 allocs/op +BenchmarkGoRestful_GithubAll 410 2913158 ns/op 910144 B/op 2938 allocs/op +BenchmarkGorillaMux_GithubAll 346 3384987 ns/op 251650 B/op 1994 allocs/op +BenchmarkGowwwRouter_GithubAll 10000 143025 ns/op 72144 B/op 501 allocs/op +BenchmarkHttpRouter_GithubAll 55938 21360 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GithubAll 10000 153944 ns/op 65856 B/op 671 allocs/op +BenchmarkKocha_GithubAll 10000 106315 ns/op 23304 B/op 843 allocs/op +BenchmarkLARS_GithubAll 47779 25084 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GithubAll 3266 371907 ns/op 149409 B/op 1624 allocs/op +BenchmarkMartini_GithubAll 331 3444706 ns/op 226551 B/op 2325 allocs/op +BenchmarkPat_GithubAll 273 4381818 ns/op 1483152 B/op 26963 allocs/op +BenchmarkPossum_GithubAll 10000 164367 ns/op 84448 B/op 609 allocs/op +BenchmarkR2router_GithubAll 10000 160220 ns/op 77328 B/op 979 allocs/op +BenchmarkRivet_GithubAll 14625 82453 ns/op 16272 B/op 167 allocs/op +BenchmarkTango_GithubAll 6255 279611 ns/op 63826 B/op 1618 allocs/op +BenchmarkTigerTonic_GithubAll 2008 687874 ns/op 193856 B/op 4474 allocs/op +BenchmarkTraffic_GithubAll 355 3478508 ns/op 820744 B/op 14114 allocs/op +BenchmarkVulcan_GithubAll 6885 193333 ns/op 19894 B/op 609 allocs/op +``` + +## Google+ + +```sh +BenchmarkGin_GPlusStatic 19247326 62.2 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_GPlusStatic 20235060 59.2 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlusStatic 31978935 37.6 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusStatic 3516523 341 ns/op 104 B/op 3 allocs/op +BenchmarkBeego_GPlusStatic 1212036 991 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GPlusStatic 6736242 183 ns/op 32 B/op 1 allocs/op +BenchmarkChi_GPlusStatic 1490640 814 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GPlusStatic 55006856 21.8 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_GPlusStatic 17688258 67.9 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusStatic 1829181 666 ns/op 280 B/op 5 allocs/op +BenchmarkGoji_GPlusStatic 9147451 130 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_GPlusStatic 594015 2063 ns/op 1312 B/op 10 allocs/op +BenchmarkGoJsonRest_GPlusStatic 1264906 950 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_GPlusStatic 231558 5341 ns/op 3872 B/op 13 allocs/op +BenchmarkGorillaMux_GPlusStatic 908418 1809 ns/op 976 B/op 9 allocs/op +BenchmarkGowwwRouter_GPlusStatic 40684604 29.5 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_GPlusStatic 46742804 25.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusStatic 32567161 36.9 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_GPlusStatic 33800060 35.3 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_GPlusStatic 20431858 60.0 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusStatic 1000000 1745 ns/op 736 B/op 8 allocs/op +BenchmarkMartini_GPlusStatic 442248 3619 ns/op 768 B/op 9 allocs/op +BenchmarkPat_GPlusStatic 4328004 292 ns/op 96 B/op 2 allocs/op +BenchmarkPossum_GPlusStatic 1570753 763 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_GPlusStatic 3339474 355 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_GPlusStatic 18570961 64.7 ns/op 0 B/op 0 allocs/op +BenchmarkTango_GPlusStatic 1388702 860 ns/op 200 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusStatic 7803543 159 ns/op 32 B/op 1 allocs/op +BenchmarkTraffic_GPlusStatic 878605 2171 ns/op 1112 B/op 16 allocs/op +BenchmarkVulcan_GPlusStatic 2742446 437 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusParam 11626975 105 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlusParam 16914322 71.6 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusParam 1405173 832 ns/op 480 B/op 5 allocs/op +BenchmarkBeego_GPlusParam 1000000 1075 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GPlusParam 1000000 1557 ns/op 816 B/op 6 allocs/op +BenchmarkChi_GPlusParam 1347926 894 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GPlusParam 5513000 212 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlusParam 11884383 101 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GPlusParam 12898952 93.1 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusParam 1000000 1194 ns/op 648 B/op 8 allocs/op +BenchmarkGoji_GPlusParam 1857229 645 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlusParam 520939 2322 ns/op 1328 B/op 11 allocs/op +BenchmarkGoJsonRest_GPlusParam 1000000 1536 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_GPlusParam 205449 5800 ns/op 4192 B/op 14 allocs/op +BenchmarkGorillaMux_GPlusParam 395310 3188 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_GPlusParam 1851798 667 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_GPlusParam 18420789 65.2 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusParam 1878463 629 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_GPlusParam 4495610 273 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_GPlusParam 14615976 83.2 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusParam 584145 2549 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_GPlusParam 250501 4583 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_GPlusParam 1000000 1645 ns/op 576 B/op 11 allocs/op +BenchmarkPossum_GPlusParam 1000000 1008 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_GPlusParam 1708191 688 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlusParam 5795014 211 ns/op 48 B/op 1 allocs/op +BenchmarkTango_GPlusParam 1000000 1091 ns/op 264 B/op 8 allocs/op +BenchmarkTigerTonic_GPlusParam 760221 2489 ns/op 856 B/op 16 allocs/op +BenchmarkTraffic_GPlusParam 309774 4039 ns/op 1872 B/op 21 allocs/op +BenchmarkVulcan_GPlusParam 1935730 623 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlus2Params 9158314 134 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlus2Params 11300517 107 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlus2Params 1239238 961 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_GPlus2Params 1000000 1202 ns/op 352 B/op 3 allocs/op +BenchmarkBone_GPlus2Params 335576 3725 ns/op 1168 B/op 10 allocs/op +BenchmarkChi_GPlus2Params 1000000 1014 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_GPlus2Params 4394598 280 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_GPlus2Params 7851861 154 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GPlus2Params 9958588 120 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlus2Params 1000000 1433 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_GPlus2Params 1325134 909 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_GPlus2Params 405955 2870 ns/op 1408 B/op 14 allocs/op +BenchmarkGoJsonRest_GPlus2Params 977038 1987 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_GPlus2Params 205018 6142 ns/op 4384 B/op 16 allocs/op +BenchmarkGorillaMux_GPlus2Params 205641 6015 ns/op 1296 B/op 10 allocs/op +BenchmarkGowwwRouter_GPlus2Params 1748542 684 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_GPlus2Params 14047102 87.7 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlus2Params 1418673 828 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_GPlus2Params 2334562 520 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_GPlus2Params 11954094 101 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlus2Params 491552 2890 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_GPlus2Params 120532 9545 ns/op 1200 B/op 13 allocs/op +BenchmarkPat_GPlus2Params 194739 6766 ns/op 2168 B/op 33 allocs/op +BenchmarkPossum_GPlus2Params 1201224 1009 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_GPlus2Params 1575535 756 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_GPlus2Params 3698930 325 ns/op 96 B/op 1 allocs/op +BenchmarkTango_GPlus2Params 1000000 1212 ns/op 344 B/op 8 allocs/op +BenchmarkTigerTonic_GPlus2Params 349350 3660 ns/op 1200 B/op 22 allocs/op +BenchmarkTraffic_GPlus2Params 169714 7862 ns/op 2248 B/op 28 allocs/op +BenchmarkVulcan_GPlus2Params 1222288 974 ns/op 98 B/op 3 allocs/op +BenchmarkAce_GPlusAll 845606 1398 ns/op 0 B/op 0 allocs/op +BenchmarkAero_GPlusAll 1000000 1009 ns/op 0 B/op 0 allocs/op +BenchmarkBear_GPlusAll 103830 11386 ns/op 5488 B/op 61 allocs/op +BenchmarkBeego_GPlusAll 82653 14784 ns/op 4576 B/op 39 allocs/op +BenchmarkBone_GPlusAll 36601 33123 ns/op 11744 B/op 109 allocs/op +BenchmarkChi_GPlusAll 95264 12831 ns/op 5616 B/op 39 allocs/op +BenchmarkDenco_GPlusAll 567681 2950 ns/op 672 B/op 11 allocs/op +BenchmarkEcho_GPlusAll 720366 1665 ns/op 0 B/op 0 allocs/op +BenchmarkGin_GPlusAll 1000000 1185 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_GPlusAll 71575 16365 ns/op 8040 B/op 103 allocs/op +BenchmarkGoji_GPlusAll 136352 9191 ns/op 3696 B/op 22 allocs/op +BenchmarkGojiv2_GPlusAll 38006 31802 ns/op 17616 B/op 154 allocs/op +BenchmarkGoJsonRest_GPlusAll 57238 21561 ns/op 8117 B/op 170 allocs/op +BenchmarkGoRestful_GPlusAll 15147 79276 ns/op 55520 B/op 192 allocs/op +BenchmarkGorillaMux_GPlusAll 24446 48410 ns/op 16112 B/op 128 allocs/op +BenchmarkGowwwRouter_GPlusAll 150112 7770 ns/op 4752 B/op 33 allocs/op +BenchmarkHttpRouter_GPlusAll 1367820 878 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_GPlusAll 166628 8004 ns/op 4032 B/op 38 allocs/op +BenchmarkKocha_GPlusAll 265694 4570 ns/op 976 B/op 43 allocs/op +BenchmarkLARS_GPlusAll 1000000 1068 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_GPlusAll 54564 23305 ns/op 9568 B/op 104 allocs/op +BenchmarkMartini_GPlusAll 16274 73845 ns/op 14016 B/op 145 allocs/op +BenchmarkPat_GPlusAll 27181 44478 ns/op 15264 B/op 271 allocs/op +BenchmarkPossum_GPlusAll 122587 10277 ns/op 5408 B/op 39 allocs/op +BenchmarkR2router_GPlusAll 130137 9297 ns/op 5040 B/op 63 allocs/op +BenchmarkRivet_GPlusAll 532438 3323 ns/op 768 B/op 11 allocs/op +BenchmarkTango_GPlusAll 86054 14531 ns/op 3656 B/op 104 allocs/op +BenchmarkTigerTonic_GPlusAll 33936 35356 ns/op 11600 B/op 242 allocs/op +BenchmarkTraffic_GPlusAll 17833 68181 ns/op 26248 B/op 341 allocs/op +BenchmarkVulcan_GPlusAll 120109 9861 ns/op 1274 B/op 39 allocs/op +``` + +## Parse.com + +```sh +BenchmarkGin_ParseStatic 18877833 63.5 ns/op 0 B/op 0 allocs/op + +BenchmarkAce_ParseStatic 19663731 60.8 ns/op 0 B/op 0 allocs/op +BenchmarkAero_ParseStatic 28967341 41.5 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseStatic 3006984 402 ns/op 120 B/op 3 allocs/op +BenchmarkBeego_ParseStatic 1000000 1031 ns/op 352 B/op 3 allocs/op +BenchmarkBone_ParseStatic 1782482 675 ns/op 144 B/op 3 allocs/op +BenchmarkChi_ParseStatic 1453261 819 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_ParseStatic 45023595 26.5 ns/op 0 B/op 0 allocs/op +BenchmarkEcho_ParseStatic 17330470 69.3 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseStatic 1644006 731 ns/op 296 B/op 5 allocs/op +BenchmarkGoji_ParseStatic 7026930 170 ns/op 0 B/op 0 allocs/op +BenchmarkGojiv2_ParseStatic 517618 2037 ns/op 1312 B/op 10 allocs/op +BenchmarkGoJsonRest_ParseStatic 1227080 975 ns/op 329 B/op 11 allocs/op +BenchmarkGoRestful_ParseStatic 192458 6659 ns/op 4256 B/op 13 allocs/op +BenchmarkGorillaMux_ParseStatic 744062 2109 ns/op 976 B/op 9 allocs/op +BenchmarkGowwwRouter_ParseStatic 37781062 31.8 ns/op 0 B/op 0 allocs/op +BenchmarkHttpRouter_ParseStatic 45311223 26.5 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseStatic 21383475 56.1 ns/op 0 B/op 0 allocs/op +BenchmarkKocha_ParseStatic 29953290 40.1 ns/op 0 B/op 0 allocs/op +BenchmarkLARS_ParseStatic 20036196 62.7 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseStatic 1000000 1740 ns/op 736 B/op 8 allocs/op +BenchmarkMartini_ParseStatic 404156 3801 ns/op 768 B/op 9 allocs/op +BenchmarkPat_ParseStatic 1547180 772 ns/op 240 B/op 5 allocs/op +BenchmarkPossum_ParseStatic 1608991 757 ns/op 416 B/op 3 allocs/op +BenchmarkR2router_ParseStatic 3177936 385 ns/op 144 B/op 4 allocs/op +BenchmarkRivet_ParseStatic 17783205 67.4 ns/op 0 B/op 0 allocs/op +BenchmarkTango_ParseStatic 1210777 990 ns/op 248 B/op 8 allocs/op +BenchmarkTigerTonic_ParseStatic 5316440 231 ns/op 48 B/op 1 allocs/op +BenchmarkTraffic_ParseStatic 496050 2539 ns/op 1256 B/op 19 allocs/op +BenchmarkVulcan_ParseStatic 2462798 488 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseParam 13393669 89.6 ns/op 0 B/op 0 allocs/op +BenchmarkAero_ParseParam 19836619 60.4 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseParam 1405954 864 ns/op 467 B/op 5 allocs/op +BenchmarkBeego_ParseParam 1000000 1065 ns/op 352 B/op 3 allocs/op +BenchmarkBone_ParseParam 1000000 1698 ns/op 896 B/op 7 allocs/op +BenchmarkChi_ParseParam 1356037 873 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_ParseParam 6241392 204 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_ParseParam 14088100 85.1 ns/op 0 B/op 0 allocs/op +BenchmarkGin_ParseParam 17426064 68.9 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseParam 1000000 1254 ns/op 664 B/op 8 allocs/op +BenchmarkGoji_ParseParam 1682574 713 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_ParseParam 502224 2333 ns/op 1360 B/op 12 allocs/op +BenchmarkGoJsonRest_ParseParam 1000000 1401 ns/op 649 B/op 13 allocs/op +BenchmarkGoRestful_ParseParam 182623 7097 ns/op 4576 B/op 14 allocs/op +BenchmarkGorillaMux_ParseParam 482332 2477 ns/op 1280 B/op 10 allocs/op +BenchmarkGowwwRouter_ParseParam 1834873 657 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_ParseParam 23593393 51.0 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseParam 2100160 574 ns/op 352 B/op 3 allocs/op +BenchmarkKocha_ParseParam 4837220 252 ns/op 56 B/op 3 allocs/op +BenchmarkLARS_ParseParam 18411192 66.2 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseParam 571870 2398 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_ParseParam 286262 4268 ns/op 1072 B/op 10 allocs/op +BenchmarkPat_ParseParam 692906 2157 ns/op 992 B/op 15 allocs/op +BenchmarkPossum_ParseParam 1000000 1011 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_ParseParam 1722735 697 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_ParseParam 6058054 203 ns/op 48 B/op 1 allocs/op +BenchmarkTango_ParseParam 1000000 1061 ns/op 280 B/op 8 allocs/op +BenchmarkTigerTonic_ParseParam 890275 2277 ns/op 784 B/op 15 allocs/op +BenchmarkTraffic_ParseParam 351322 3543 ns/op 1896 B/op 21 allocs/op +BenchmarkVulcan_ParseParam 2076544 572 ns/op 98 B/op 3 allocs/op +BenchmarkAce_Parse2Params 11718074 101 ns/op 0 B/op 0 allocs/op +BenchmarkAero_Parse2Params 16264988 73.4 ns/op 0 B/op 0 allocs/op +BenchmarkBear_Parse2Params 1238322 973 ns/op 496 B/op 5 allocs/op +BenchmarkBeego_Parse2Params 1000000 1120 ns/op 352 B/op 3 allocs/op +BenchmarkBone_Parse2Params 1000000 1632 ns/op 848 B/op 6 allocs/op +BenchmarkChi_Parse2Params 1239477 955 ns/op 432 B/op 3 allocs/op +BenchmarkDenco_Parse2Params 4944133 245 ns/op 64 B/op 1 allocs/op +BenchmarkEcho_Parse2Params 10518286 114 ns/op 0 B/op 0 allocs/op +BenchmarkGin_Parse2Params 14505195 82.7 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_Parse2Params 1000000 1437 ns/op 712 B/op 9 allocs/op +BenchmarkGoji_Parse2Params 1689883 707 ns/op 336 B/op 2 allocs/op +BenchmarkGojiv2_Parse2Params 502334 2308 ns/op 1344 B/op 11 allocs/op +BenchmarkGoJsonRest_Parse2Params 1000000 1771 ns/op 713 B/op 14 allocs/op +BenchmarkGoRestful_Parse2Params 159092 7583 ns/op 4928 B/op 14 allocs/op +BenchmarkGorillaMux_Parse2Params 417548 2980 ns/op 1296 B/op 10 allocs/op +BenchmarkGowwwRouter_Parse2Params 1751737 686 ns/op 432 B/op 3 allocs/op +BenchmarkHttpRouter_Parse2Params 18089204 66.3 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_Parse2Params 1556986 777 ns/op 384 B/op 4 allocs/op +BenchmarkKocha_Parse2Params 2493082 485 ns/op 128 B/op 5 allocs/op +BenchmarkLARS_Parse2Params 15350108 78.5 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_Parse2Params 530974 2605 ns/op 1072 B/op 10 allocs/op +BenchmarkMartini_Parse2Params 247069 4673 ns/op 1152 B/op 11 allocs/op +BenchmarkPat_Parse2Params 816295 2126 ns/op 752 B/op 16 allocs/op +BenchmarkPossum_Parse2Params 1000000 1002 ns/op 496 B/op 5 allocs/op +BenchmarkR2router_Parse2Params 1569771 733 ns/op 432 B/op 5 allocs/op +BenchmarkRivet_Parse2Params 4080546 295 ns/op 96 B/op 1 allocs/op +BenchmarkTango_Parse2Params 1000000 1121 ns/op 312 B/op 8 allocs/op +BenchmarkTigerTonic_Parse2Params 399556 3470 ns/op 1168 B/op 22 allocs/op +BenchmarkTraffic_Parse2Params 314194 4159 ns/op 1944 B/op 22 allocs/op +BenchmarkVulcan_Parse2Params 1827559 664 ns/op 98 B/op 3 allocs/op +BenchmarkAce_ParseAll 478395 2503 ns/op 0 B/op 0 allocs/op +BenchmarkAero_ParseAll 715392 1658 ns/op 0 B/op 0 allocs/op +BenchmarkBear_ParseAll 59191 20124 ns/op 8928 B/op 110 allocs/op +BenchmarkBeego_ParseAll 45507 27266 ns/op 9152 B/op 78 allocs/op +BenchmarkBone_ParseAll 29328 41459 ns/op 16208 B/op 147 allocs/op +BenchmarkChi_ParseAll 48531 25053 ns/op 11232 B/op 78 allocs/op +BenchmarkDenco_ParseAll 325532 4284 ns/op 928 B/op 16 allocs/op +BenchmarkEcho_ParseAll 433771 2759 ns/op 0 B/op 0 allocs/op +BenchmarkGin_ParseAll 576316 2082 ns/op 0 B/op 0 allocs/op +BenchmarkGocraftWeb_ParseAll 41500 29692 ns/op 13728 B/op 181 allocs/op +BenchmarkGoji_ParseAll 80833 15563 ns/op 5376 B/op 32 allocs/op +BenchmarkGojiv2_ParseAll 19836 60335 ns/op 34448 B/op 277 allocs/op +BenchmarkGoJsonRest_ParseAll 32210 38027 ns/op 13866 B/op 321 allocs/op +BenchmarkGoRestful_ParseAll 6644 190842 ns/op 117600 B/op 354 allocs/op +BenchmarkGorillaMux_ParseAll 12634 95894 ns/op 30288 B/op 250 allocs/op +BenchmarkGowwwRouter_ParseAll 98152 12159 ns/op 6912 B/op 48 allocs/op +BenchmarkHttpRouter_ParseAll 933208 1273 ns/op 0 B/op 0 allocs/op +BenchmarkHttpTreeMux_ParseAll 107191 11554 ns/op 5728 B/op 51 allocs/op +BenchmarkKocha_ParseAll 184862 6225 ns/op 1112 B/op 54 allocs/op +BenchmarkLARS_ParseAll 644546 1858 ns/op 0 B/op 0 allocs/op +BenchmarkMacaron_ParseAll 26145 46484 ns/op 19136 B/op 208 allocs/op +BenchmarkMartini_ParseAll 10000 121838 ns/op 25072 B/op 253 allocs/op +BenchmarkPat_ParseAll 25417 47196 ns/op 15216 B/op 308 allocs/op +BenchmarkPossum_ParseAll 58550 20735 ns/op 10816 B/op 78 allocs/op +BenchmarkR2router_ParseAll 72732 16584 ns/op 8352 B/op 120 allocs/op +BenchmarkRivet_ParseAll 281365 4968 ns/op 912 B/op 16 allocs/op +BenchmarkTango_ParseAll 42831 28668 ns/op 7168 B/op 208 allocs/op +BenchmarkTigerTonic_ParseAll 23774 49972 ns/op 16048 B/op 332 allocs/op +BenchmarkTraffic_ParseAll 10000 104679 ns/op 45520 B/op 605 allocs/op +BenchmarkVulcan_ParseAll 64810 18108 ns/op 2548 B/op 78 allocs/op +``` diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/CHANGELOG.md b/terraform-server/vendor/github.com/gin-gonic/gin/CHANGELOG.md new file mode 100644 index 00000000..dc2c2f55 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/CHANGELOG.md @@ -0,0 +1,412 @@ +# Gin ChangeLog + +## Gin v1.7.1 + +### BUGFIXES + +* fix: data race with trustedCIDRs from [#2674](https://github.com/gin-gonic/gin/issues/2674)([#2675](https://github.com/gin-gonic/gin/pull/2675)) + +## Gin v1.7.0 + +### BUGFIXES + +* fix compile error from [#2572](https://github.com/gin-gonic/gin/pull/2572) ([#2600](https://github.com/gin-gonic/gin/pull/2600)) +* fix: print headers without Authorization header on broken pipe ([#2528](https://github.com/gin-gonic/gin/pull/2528)) +* fix(tree): reassign fullpath when register new node ([#2366](https://github.com/gin-gonic/gin/pull/2366)) + +### ENHANCEMENTS + +* Support params and exact routes without creating conflicts ([#2663](https://github.com/gin-gonic/gin/pull/2663)) +* chore: improve render string performance ([#2365](https://github.com/gin-gonic/gin/pull/2365)) +* Sync route tree to httprouter latest code ([#2368](https://github.com/gin-gonic/gin/pull/2368)) +* chore: rename getQueryCache/getFormCache to initQueryCache/initFormCa ([#2375](https://github.com/gin-gonic/gin/pull/2375)) +* chore(performance): improve countParams ([#2378](https://github.com/gin-gonic/gin/pull/2378)) +* Remove some functions that have the same effect as the bytes package ([#2387](https://github.com/gin-gonic/gin/pull/2387)) +* update:SetMode function ([#2321](https://github.com/gin-gonic/gin/pull/2321)) +* remove a unused type SecureJSONPrefix ([#2391](https://github.com/gin-gonic/gin/pull/2391)) +* Add a redirect sample for POST method ([#2389](https://github.com/gin-gonic/gin/pull/2389)) +* Add CustomRecovery builtin middleware ([#2322](https://github.com/gin-gonic/gin/pull/2322)) +* binding: avoid 2038 problem on 32-bit architectures ([#2450](https://github.com/gin-gonic/gin/pull/2450)) +* Prevent panic in Context.GetQuery() when there is no Request ([#2412](https://github.com/gin-gonic/gin/pull/2412)) +* Add GetUint and GetUint64 method on gin.context ([#2487](https://github.com/gin-gonic/gin/pull/2487)) +* update content-disposition header to MIME-style ([#2512](https://github.com/gin-gonic/gin/pull/2512)) +* reduce allocs and improve the render `WriteString` ([#2508](https://github.com/gin-gonic/gin/pull/2508)) +* implement ".Unwrap() error" on Error type ([#2525](https://github.com/gin-gonic/gin/pull/2525)) ([#2526](https://github.com/gin-gonic/gin/pull/2526)) +* Allow bind with a map[string]string ([#2484](https://github.com/gin-gonic/gin/pull/2484)) +* chore: update tree ([#2371](https://github.com/gin-gonic/gin/pull/2371)) +* Support binding for slice/array obj [Rewrite] ([#2302](https://github.com/gin-gonic/gin/pull/2302)) +* basic auth: fix timing oracle ([#2609](https://github.com/gin-gonic/gin/pull/2609)) +* Add mixed param and non-param paths (port of httprouter[#329](https://github.com/gin-gonic/gin/pull/329)) ([#2663](https://github.com/gin-gonic/gin/pull/2663)) +* feat(engine): add trustedproxies and remoteIP ([#2632](https://github.com/gin-gonic/gin/pull/2632)) + +## Gin v1.6.3 + +### ENHANCEMENTS + + * Improve performance: Change `*sync.RWMutex` to `sync.RWMutex` in context. [#2351](https://github.com/gin-gonic/gin/pull/2351) + +## Gin v1.6.2 + +### BUGFIXES + * fix missing initial sync.RWMutex [#2305](https://github.com/gin-gonic/gin/pull/2305) +### ENHANCEMENTS + * Add set samesite in cookie. [#2306](https://github.com/gin-gonic/gin/pull/2306) + +## Gin v1.6.1 + +### BUGFIXES + * Revert "fix accept incoming network connections" [#2294](https://github.com/gin-gonic/gin/pull/2294) + +## Gin v1.6.0 + +### BREAKING + * chore(performance): Improve performance for adding RemoveExtraSlash flag [#2159](https://github.com/gin-gonic/gin/pull/2159) + * drop support govendor [#2148](https://github.com/gin-gonic/gin/pull/2148) + * Added support for SameSite cookie flag [#1615](https://github.com/gin-gonic/gin/pull/1615) +### FEATURES + * add yaml negotiation [#2220](https://github.com/gin-gonic/gin/pull/2220) + * FileFromFS [#2112](https://github.com/gin-gonic/gin/pull/2112) +### BUGFIXES + * Unix Socket Handling [#2280](https://github.com/gin-gonic/gin/pull/2280) + * Use json marshall in context json to fix breaking new line issue. Fixes #2209 [#2228](https://github.com/gin-gonic/gin/pull/2228) + * fix accept incoming network connections [#2216](https://github.com/gin-gonic/gin/pull/2216) + * Fixed a bug in the calculation of the maximum number of parameters [#2166](https://github.com/gin-gonic/gin/pull/2166) + * [FIX] allow empty headers on DataFromReader [#2121](https://github.com/gin-gonic/gin/pull/2121) + * Add mutex for protect Context.Keys map [#1391](https://github.com/gin-gonic/gin/pull/1391) +### ENHANCEMENTS + * Add mitigation for log injection [#2277](https://github.com/gin-gonic/gin/pull/2277) + * tree: range over nodes values [#2229](https://github.com/gin-gonic/gin/pull/2229) + * tree: remove duplicate assignment [#2222](https://github.com/gin-gonic/gin/pull/2222) + * chore: upgrade go-isatty and json-iterator/go [#2215](https://github.com/gin-gonic/gin/pull/2215) + * path: sync code with httprouter [#2212](https://github.com/gin-gonic/gin/pull/2212) + * Use zero-copy approach to convert types between string and byte slice [#2206](https://github.com/gin-gonic/gin/pull/2206) + * Reuse bytes when cleaning the URL paths [#2179](https://github.com/gin-gonic/gin/pull/2179) + * tree: remove one else statement [#2177](https://github.com/gin-gonic/gin/pull/2177) + * tree: sync httprouter update (#2173) (#2172) [#2171](https://github.com/gin-gonic/gin/pull/2171) + * tree: sync part httprouter codes and reduce if/else [#2163](https://github.com/gin-gonic/gin/pull/2163) + * use http method constant [#2155](https://github.com/gin-gonic/gin/pull/2155) + * upgrade go-validator to v10 [#2149](https://github.com/gin-gonic/gin/pull/2149) + * Refactor redirect request in gin.go [#1970](https://github.com/gin-gonic/gin/pull/1970) + * Add build tag nomsgpack [#1852](https://github.com/gin-gonic/gin/pull/1852) +### DOCS + * docs(path): improve comments [#2223](https://github.com/gin-gonic/gin/pull/2223) + * Renew README to fit the modification of SetCookie method [#2217](https://github.com/gin-gonic/gin/pull/2217) + * Fix spelling [#2202](https://github.com/gin-gonic/gin/pull/2202) + * Remove broken link from README. [#2198](https://github.com/gin-gonic/gin/pull/2198) + * Update docs on Context.Done(), Context.Deadline() and Context.Err() [#2196](https://github.com/gin-gonic/gin/pull/2196) + * Update validator to v10 [#2190](https://github.com/gin-gonic/gin/pull/2190) + * upgrade go-validator to v10 for README [#2189](https://github.com/gin-gonic/gin/pull/2189) + * Update to currently output [#2188](https://github.com/gin-gonic/gin/pull/2188) + * Fix "Custom Validators" example [#2186](https://github.com/gin-gonic/gin/pull/2186) + * Add project to README [#2165](https://github.com/gin-gonic/gin/pull/2165) + * docs(benchmarks): for gin v1.5 [#2153](https://github.com/gin-gonic/gin/pull/2153) + * Changed wording for clarity in README.md [#2122](https://github.com/gin-gonic/gin/pull/2122) +### MISC + * ci support go1.14 [#2262](https://github.com/gin-gonic/gin/pull/2262) + * chore: upgrade depend version [#2231](https://github.com/gin-gonic/gin/pull/2231) + * Drop support go1.10 [#2147](https://github.com/gin-gonic/gin/pull/2147) + * fix comment in `mode.go` [#2129](https://github.com/gin-gonic/gin/pull/2129) + +## Gin v1.5.0 + +- [FIX] Use DefaultWriter and DefaultErrorWriter for debug messages [#1891](https://github.com/gin-gonic/gin/pull/1891) +- [NEW] Now you can parse the inline lowercase start structure [#1893](https://github.com/gin-gonic/gin/pull/1893) +- [FIX] Some code improvements [#1909](https://github.com/gin-gonic/gin/pull/1909) +- [FIX] Use encode replace json marshal increase json encoder speed [#1546](https://github.com/gin-gonic/gin/pull/1546) +- [NEW] Hold matched route full path in the Context [#1826](https://github.com/gin-gonic/gin/pull/1826) +- [FIX] Fix context.Params race condition on Copy() [#1841](https://github.com/gin-gonic/gin/pull/1841) +- [NEW] Add context param query cache [#1450](https://github.com/gin-gonic/gin/pull/1450) +- [FIX] Improve GetQueryMap performance [#1918](https://github.com/gin-gonic/gin/pull/1918) +- [FIX] Improve get post data [#1920](https://github.com/gin-gonic/gin/pull/1920) +- [FIX] Use context instead of x/net/context [#1922](https://github.com/gin-gonic/gin/pull/1922) +- [FIX] Attempt to fix PostForm cache bug [#1931](https://github.com/gin-gonic/gin/pull/1931) +- [NEW] Add support of multipart multi files [#1949](https://github.com/gin-gonic/gin/pull/1949) +- [NEW] Support bind http header param [#1957](https://github.com/gin-gonic/gin/pull/1957) +- [FIX] Drop support for go1.8 and go1.9 [#1933](https://github.com/gin-gonic/gin/pull/1933) +- [FIX] Bugfix for the FullPath feature [#1919](https://github.com/gin-gonic/gin/pull/1919) +- [FIX] Gin1.5 bytes.Buffer to strings.Builder [#1939](https://github.com/gin-gonic/gin/pull/1939) +- [FIX] Upgrade github.com/ugorji/go/codec [#1969](https://github.com/gin-gonic/gin/pull/1969) +- [NEW] Support bind unix time [#1980](https://github.com/gin-gonic/gin/pull/1980) +- [FIX] Simplify code [#2004](https://github.com/gin-gonic/gin/pull/2004) +- [NEW] Support negative Content-Length in DataFromReader [#1981](https://github.com/gin-gonic/gin/pull/1981) +- [FIX] Identify terminal on a RISC-V architecture for auto-colored logs [#2019](https://github.com/gin-gonic/gin/pull/2019) +- [BREAKING] `Context.JSONP()` now expects a semicolon (`;`) at the end [#2007](https://github.com/gin-gonic/gin/pull/2007) +- [BREAKING] Upgrade default `binding.Validator` to v9 (see [its changelog](https://github.com/go-playground/validator/releases/tag/v9.0.0)) [#1015](https://github.com/gin-gonic/gin/pull/1015) +- [NEW] Add `DisallowUnknownFields()` in `Context.BindJSON()` [#2028](https://github.com/gin-gonic/gin/pull/2028) +- [NEW] Use specific `net.Listener` with `Engine.RunListener()` [#2023](https://github.com/gin-gonic/gin/pull/2023) +- [FIX] Fix some typo [#2079](https://github.com/gin-gonic/gin/pull/2079) [#2080](https://github.com/gin-gonic/gin/pull/2080) +- [FIX] Relocate binding body tests [#2086](https://github.com/gin-gonic/gin/pull/2086) +- [FIX] Use Writer in Context.Status [#1606](https://github.com/gin-gonic/gin/pull/1606) +- [FIX] `Engine.RunUnix()` now returns the error if it can't change the file mode [#2093](https://github.com/gin-gonic/gin/pull/2093) +- [FIX] `RouterGroup.StaticFS()` leaked files. Now it closes them. [#2118](https://github.com/gin-gonic/gin/pull/2118) +- [FIX] `Context.Request.FormFile` leaked file. Now it closes it. [#2114](https://github.com/gin-gonic/gin/pull/2114) +- [FIX] Ignore walking on `form:"-"` mapping [#1943](https://github.com/gin-gonic/gin/pull/1943) + +### Gin v1.4.0 + +- [NEW] Support for [Go Modules](https://github.com/golang/go/wiki/Modules) [#1569](https://github.com/gin-gonic/gin/pull/1569) +- [NEW] Refactor of form mapping multipart request [#1829](https://github.com/gin-gonic/gin/pull/1829) +- [FIX] Truncate Latency precision in long running request [#1830](https://github.com/gin-gonic/gin/pull/1830) +- [FIX] IsTerm flag should not be affected by DisableConsoleColor method. [#1802](https://github.com/gin-gonic/gin/pull/1802) +- [NEW] Supporting file binding [#1264](https://github.com/gin-gonic/gin/pull/1264) +- [NEW] Add support for mapping arrays [#1797](https://github.com/gin-gonic/gin/pull/1797) +- [FIX] Readme updates [#1793](https://github.com/gin-gonic/gin/pull/1793) [#1788](https://github.com/gin-gonic/gin/pull/1788) [1789](https://github.com/gin-gonic/gin/pull/1789) +- [FIX] StaticFS: Fixed Logging two log lines on 404. [#1805](https://github.com/gin-gonic/gin/pull/1805), [#1804](https://github.com/gin-gonic/gin/pull/1804) +- [NEW] Make context.Keys available as LogFormatterParams [#1779](https://github.com/gin-gonic/gin/pull/1779) +- [NEW] Use internal/json for Marshal/Unmarshal [#1791](https://github.com/gin-gonic/gin/pull/1791) +- [NEW] Support mapping time.Duration [#1794](https://github.com/gin-gonic/gin/pull/1794) +- [NEW] Refactor form mappings [#1749](https://github.com/gin-gonic/gin/pull/1749) +- [NEW] Added flag to context.Stream indicates if client disconnected in middle of stream [#1252](https://github.com/gin-gonic/gin/pull/1252) +- [FIX] Moved [examples](https://github.com/gin-gonic/examples) to stand alone Repo [#1775](https://github.com/gin-gonic/gin/pull/1775) +- [NEW] Extend context.File to allow for the content-disposition attachments via a new method context.Attachment [#1260](https://github.com/gin-gonic/gin/pull/1260) +- [FIX] Support HTTP content negotiation wildcards [#1112](https://github.com/gin-gonic/gin/pull/1112) +- [NEW] Add prefix from X-Forwarded-Prefix in redirectTrailingSlash [#1238](https://github.com/gin-gonic/gin/pull/1238) +- [FIX] context.Copy() race condition [#1020](https://github.com/gin-gonic/gin/pull/1020) +- [NEW] Add context.HandlerNames() [#1729](https://github.com/gin-gonic/gin/pull/1729) +- [FIX] Change color methods to public in the defaultLogger. [#1771](https://github.com/gin-gonic/gin/pull/1771) +- [FIX] Update writeHeaders method to use http.Header.Set [#1722](https://github.com/gin-gonic/gin/pull/1722) +- [NEW] Add response size to LogFormatterParams [#1752](https://github.com/gin-gonic/gin/pull/1752) +- [NEW] Allow ignoring field on form mapping [#1733](https://github.com/gin-gonic/gin/pull/1733) +- [NEW] Add a function to force color in console output. [#1724](https://github.com/gin-gonic/gin/pull/1724) +- [FIX] Context.Next() - recheck len of handlers on every iteration. [#1745](https://github.com/gin-gonic/gin/pull/1745) +- [FIX] Fix all errcheck warnings [#1739](https://github.com/gin-gonic/gin/pull/1739) [#1653](https://github.com/gin-gonic/gin/pull/1653) +- [NEW] context: inherits context cancellation and deadline from http.Request context for Go>=1.7 [#1690](https://github.com/gin-gonic/gin/pull/1690) +- [NEW] Binding for URL Params [#1694](https://github.com/gin-gonic/gin/pull/1694) +- [NEW] Add LoggerWithFormatter method [#1677](https://github.com/gin-gonic/gin/pull/1677) +- [FIX] CI testing updates [#1671](https://github.com/gin-gonic/gin/pull/1671) [#1670](https://github.com/gin-gonic/gin/pull/1670) [#1682](https://github.com/gin-gonic/gin/pull/1682) [#1669](https://github.com/gin-gonic/gin/pull/1669) +- [FIX] StaticFS(): Send 404 when path does not exist [#1663](https://github.com/gin-gonic/gin/pull/1663) +- [FIX] Handle nil body for JSON binding [#1638](https://github.com/gin-gonic/gin/pull/1638) +- [FIX] Support bind uri param [#1612](https://github.com/gin-gonic/gin/pull/1612) +- [FIX] recovery: fix issue with syscall import on google app engine [#1640](https://github.com/gin-gonic/gin/pull/1640) +- [FIX] Make sure the debug log contains line breaks [#1650](https://github.com/gin-gonic/gin/pull/1650) +- [FIX] Panic stack trace being printed during recovery of broken pipe [#1089](https://github.com/gin-gonic/gin/pull/1089) [#1259](https://github.com/gin-gonic/gin/pull/1259) +- [NEW] RunFd method to run http.Server through a file descriptor [#1609](https://github.com/gin-gonic/gin/pull/1609) +- [NEW] Yaml binding support [#1618](https://github.com/gin-gonic/gin/pull/1618) +- [FIX] Pass MaxMultipartMemory when FormFile is called [#1600](https://github.com/gin-gonic/gin/pull/1600) +- [FIX] LoadHTML* tests [#1559](https://github.com/gin-gonic/gin/pull/1559) +- [FIX] Removed use of sync.pool from HandleContext [#1565](https://github.com/gin-gonic/gin/pull/1565) +- [FIX] Format output log to os.Stderr [#1571](https://github.com/gin-gonic/gin/pull/1571) +- [FIX] Make logger use a yellow background and a darkgray text for legibility [#1570](https://github.com/gin-gonic/gin/pull/1570) +- [FIX] Remove sensitive request information from panic log. [#1370](https://github.com/gin-gonic/gin/pull/1370) +- [FIX] log.Println() does not print timestamp [#829](https://github.com/gin-gonic/gin/pull/829) [#1560](https://github.com/gin-gonic/gin/pull/1560) +- [NEW] Add PureJSON renderer [#694](https://github.com/gin-gonic/gin/pull/694) +- [FIX] Add missing copyright and update if/else [#1497](https://github.com/gin-gonic/gin/pull/1497) +- [FIX] Update msgpack usage [#1498](https://github.com/gin-gonic/gin/pull/1498) +- [FIX] Use protobuf on render [#1496](https://github.com/gin-gonic/gin/pull/1496) +- [FIX] Add support for Protobuf format response [#1479](https://github.com/gin-gonic/gin/pull/1479) +- [NEW] Set default time format in form binding [#1487](https://github.com/gin-gonic/gin/pull/1487) +- [FIX] Add BindXML and ShouldBindXML [#1485](https://github.com/gin-gonic/gin/pull/1485) +- [NEW] Upgrade dependency libraries [#1491](https://github.com/gin-gonic/gin/pull/1491) + + +## Gin v1.3.0 + +- [NEW] Add [`func (*Context) QueryMap`](https://godoc.org/github.com/gin-gonic/gin#Context.QueryMap), [`func (*Context) GetQueryMap`](https://godoc.org/github.com/gin-gonic/gin#Context.GetQueryMap), [`func (*Context) PostFormMap`](https://godoc.org/github.com/gin-gonic/gin#Context.PostFormMap) and [`func (*Context) GetPostFormMap`](https://godoc.org/github.com/gin-gonic/gin#Context.GetPostFormMap) to support `type map[string]string` as query string or form parameters, see [#1383](https://github.com/gin-gonic/gin/pull/1383) +- [NEW] Add [`func (*Context) AsciiJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.AsciiJSON), see [#1358](https://github.com/gin-gonic/gin/pull/1358) +- [NEW] Add `Pusher()` in [`type ResponseWriter`](https://godoc.org/github.com/gin-gonic/gin#ResponseWriter) for supporting http2 push, see [#1273](https://github.com/gin-gonic/gin/pull/1273) +- [NEW] Add [`func (*Context) DataFromReader`](https://godoc.org/github.com/gin-gonic/gin#Context.DataFromReader) for serving dynamic data, see [#1304](https://github.com/gin-gonic/gin/pull/1304) +- [NEW] Add [`func (*Context) ShouldBindBodyWith`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindBodyWith) allowing to call binding multiple times, see [#1341](https://github.com/gin-gonic/gin/pull/1341) +- [NEW] Support pointers in form binding, see [#1336](https://github.com/gin-gonic/gin/pull/1336) +- [NEW] Add [`func (*Context) JSONP`](https://godoc.org/github.com/gin-gonic/gin#Context.JSONP), see [#1333](https://github.com/gin-gonic/gin/pull/1333) +- [NEW] Support default value in form binding, see [#1138](https://github.com/gin-gonic/gin/pull/1138) +- [NEW] Expose validator engine in [`type StructValidator`](https://godoc.org/github.com/gin-gonic/gin/binding#StructValidator), see [#1277](https://github.com/gin-gonic/gin/pull/1277) +- [NEW] Add [`func (*Context) ShouldBind`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBind), [`func (*Context) ShouldBindQuery`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindQuery) and [`func (*Context) ShouldBindJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.ShouldBindJSON), see [#1047](https://github.com/gin-gonic/gin/pull/1047) +- [NEW] Add support for `time.Time` location in form binding, see [#1117](https://github.com/gin-gonic/gin/pull/1117) +- [NEW] Add [`func (*Context) BindQuery`](https://godoc.org/github.com/gin-gonic/gin#Context.BindQuery), see [#1029](https://github.com/gin-gonic/gin/pull/1029) +- [NEW] Make [jsonite](https://github.com/json-iterator/go) optional with build tags, see [#1026](https://github.com/gin-gonic/gin/pull/1026) +- [NEW] Show query string in logger, see [#999](https://github.com/gin-gonic/gin/pull/999) +- [NEW] Add [`func (*Context) SecureJSON`](https://godoc.org/github.com/gin-gonic/gin#Context.SecureJSON), see [#987](https://github.com/gin-gonic/gin/pull/987) and [#993](https://github.com/gin-gonic/gin/pull/993) +- [DEPRECATE] `func (*Context) GetCookie` for [`func (*Context) Cookie`](https://godoc.org/github.com/gin-gonic/gin#Context.Cookie) +- [FIX] Don't display color tags if [`func DisableConsoleColor`](https://godoc.org/github.com/gin-gonic/gin#DisableConsoleColor) called, see [#1072](https://github.com/gin-gonic/gin/pull/1072) +- [FIX] Gin Mode `""` when calling [`func Mode`](https://godoc.org/github.com/gin-gonic/gin#Mode) now returns `const DebugMode`, see [#1250](https://github.com/gin-gonic/gin/pull/1250) +- [FIX] `Flush()` now doesn't overwrite `responseWriter` status code, see [#1460](https://github.com/gin-gonic/gin/pull/1460) + +## Gin 1.2.0 + +- [NEW] Switch from godeps to govendor +- [NEW] Add support for Let's Encrypt via gin-gonic/autotls +- [NEW] Improve README examples and add extra at examples folder +- [NEW] Improved support with App Engine +- [NEW] Add custom template delimiters, see #860 +- [NEW] Add Template Func Maps, see #962 +- [NEW] Add \*context.Handler(), see #928 +- [NEW] Add \*context.GetRawData() +- [NEW] Add \*context.GetHeader() (request) +- [NEW] Add \*context.AbortWithStatusJSON() (JSON content type) +- [NEW] Add \*context.Keys type cast helpers +- [NEW] Add \*context.ShouldBindWith() +- [NEW] Add \*context.MustBindWith() +- [NEW] Add \*engine.SetFuncMap() +- [DEPRECATE] On next release: \*context.BindWith(), see #855 +- [FIX] Refactor render +- [FIX] Reworked tests +- [FIX] logger now supports cygwin +- [FIX] Use X-Forwarded-For before X-Real-Ip +- [FIX] time.Time binding (#904) + +## Gin 1.1.4 + +- [NEW] Support google appengine for IsTerminal func + +## Gin 1.1.3 + +- [FIX] Reverted Logger: skip ANSI color commands + +## Gin 1.1 + +- [NEW] Implement QueryArray and PostArray methods +- [NEW] Refactor GetQuery and GetPostForm +- [NEW] Add contribution guide +- [FIX] Corrected typos in README +- [FIX] Removed additional Iota +- [FIX] Changed imports to gopkg instead of github in README (#733) +- [FIX] Logger: skip ANSI color commands if output is not a tty + +## Gin 1.0rc2 (...) + +- [PERFORMANCE] Fast path for writing Content-Type. +- [PERFORMANCE] Much faster 404 routing +- [PERFORMANCE] Allocation optimizations +- [PERFORMANCE] Faster root tree lookup +- [PERFORMANCE] Zero overhead, String() and JSON() rendering. +- [PERFORMANCE] Faster ClientIP parsing +- [PERFORMANCE] Much faster SSE implementation +- [NEW] Benchmarks suite +- [NEW] Bind validation can be disabled and replaced with custom validators. +- [NEW] More flexible HTML render +- [NEW] Multipart and PostForm bindings +- [NEW] Adds method to return all the registered routes +- [NEW] Context.HandlerName() returns the main handler's name +- [NEW] Adds Error.IsType() helper +- [FIX] Binding multipart form +- [FIX] Integration tests +- [FIX] Crash when binding non struct object in Context. +- [FIX] RunTLS() implementation +- [FIX] Logger() unit tests +- [FIX] Adds SetHTMLTemplate() warning +- [FIX] Context.IsAborted() +- [FIX] More unit tests +- [FIX] JSON, XML, HTML renders accept custom content-types +- [FIX] gin.AbortIndex is unexported +- [FIX] Better approach to avoid directory listing in StaticFS() +- [FIX] Context.ClientIP() always returns the IP with trimmed spaces. +- [FIX] Better warning when running in debug mode. +- [FIX] Google App Engine integration. debugPrint does not use os.Stdout +- [FIX] Fixes integer overflow in error type +- [FIX] Error implements the json.Marshaller interface +- [FIX] MIT license in every file + + +## Gin 1.0rc1 (May 22, 2015) + +- [PERFORMANCE] Zero allocation router +- [PERFORMANCE] Faster JSON, XML and text rendering +- [PERFORMANCE] Custom hand optimized HttpRouter for Gin +- [PERFORMANCE] Misc code optimizations. Inlining, tail call optimizations +- [NEW] Built-in support for golang.org/x/net/context +- [NEW] Any(path, handler). Create a route that matches any path +- [NEW] Refactored rendering pipeline (faster and static typed) +- [NEW] Refactored errors API +- [NEW] IndentedJSON() prints pretty JSON +- [NEW] Added gin.DefaultWriter +- [NEW] UNIX socket support +- [NEW] RouterGroup.BasePath is exposed +- [NEW] JSON validation using go-validate-yourself (very powerful options) +- [NEW] Completed suite of unit tests +- [NEW] HTTP streaming with c.Stream() +- [NEW] StaticFile() creates a router for serving just one file. +- [NEW] StaticFS() has an option to disable directory listing. +- [NEW] StaticFS() for serving static files through virtual filesystems +- [NEW] Server-Sent Events native support +- [NEW] WrapF() and WrapH() helpers for wrapping http.HandlerFunc and http.Handler +- [NEW] Added LoggerWithWriter() middleware +- [NEW] Added RecoveryWithWriter() middleware +- [NEW] Added DefaultPostFormValue() +- [NEW] Added DefaultFormValue() +- [NEW] Added DefaultParamValue() +- [FIX] BasicAuth() when using custom realm +- [FIX] Bug when serving static files in nested routing group +- [FIX] Redirect using built-in http.Redirect() +- [FIX] Logger when printing the requested path +- [FIX] Documentation typos +- [FIX] Context.Engine renamed to Context.engine +- [FIX] Better debugging messages +- [FIX] ErrorLogger +- [FIX] Debug HTTP render +- [FIX] Refactored binding and render modules +- [FIX] Refactored Context initialization +- [FIX] Refactored BasicAuth() +- [FIX] NoMethod/NoRoute handlers +- [FIX] Hijacking http +- [FIX] Better support for Google App Engine (using log instead of fmt) + + +## Gin 0.6 (Mar 9, 2015) + +- [NEW] Support multipart/form-data +- [NEW] NoMethod handler +- [NEW] Validate sub structures +- [NEW] Support for HTTP Realm Auth +- [FIX] Unsigned integers in binding +- [FIX] Improve color logger + + +## Gin 0.5 (Feb 7, 2015) + +- [NEW] Content Negotiation +- [FIX] Solved security bug that allow a client to spoof ip +- [FIX] Fix unexported/ignored fields in binding + + +## Gin 0.4 (Aug 21, 2014) + +- [NEW] Development mode +- [NEW] Unit tests +- [NEW] Add Content.Redirect() +- [FIX] Deferring WriteHeader() +- [FIX] Improved documentation for model binding + + +## Gin 0.3 (Jul 18, 2014) + +- [PERFORMANCE] Normal log and error log are printed in the same call. +- [PERFORMANCE] Improve performance of NoRouter() +- [PERFORMANCE] Improve context's memory locality, reduce CPU cache faults. +- [NEW] Flexible rendering API +- [NEW] Add Context.File() +- [NEW] Add shortcut RunTLS() for http.ListenAndServeTLS +- [FIX] Rename NotFound404() to NoRoute() +- [FIX] Errors in context are purged +- [FIX] Adds HEAD method in Static file serving +- [FIX] Refactors Static() file serving +- [FIX] Using keyed initialization to fix app-engine integration +- [FIX] Can't unmarshal JSON array, #63 +- [FIX] Renaming Context.Req to Context.Request +- [FIX] Check application/x-www-form-urlencoded when parsing form + + +## Gin 0.2b (Jul 08, 2014) +- [PERFORMANCE] Using sync.Pool to allocatio/gc overhead +- [NEW] Travis CI integration +- [NEW] Completely new logger +- [NEW] New API for serving static files. gin.Static() +- [NEW] gin.H() can be serialized into XML +- [NEW] Typed errors. Errors can be typed. Internet/external/custom. +- [NEW] Support for Godeps +- [NEW] Travis/Godocs badges in README +- [NEW] New Bind() and BindWith() methods for parsing request body. +- [NEW] Add Content.Copy() +- [NEW] Add context.LastError() +- [NEW] Add shortcut for OPTIONS HTTP method +- [FIX] Tons of README fixes +- [FIX] Header is written before body +- [FIX] BasicAuth() and changes API a little bit +- [FIX] Recovery() middleware only prints panics +- [FIX] Context.Get() does not panic anymore. Use MustGet() instead. +- [FIX] Multiple http.WriteHeader() in NotFound handlers +- [FIX] Engine.Run() panics if http server can't be set up +- [FIX] Crash when route path doesn't start with '/' +- [FIX] Do not update header when status code is negative +- [FIX] Setting response headers before calling WriteHeader in context.String() +- [FIX] Add MIT license +- [FIX] Changes behaviour of ErrorLogger() and Logger() diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md b/terraform-server/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md new file mode 100644 index 00000000..4ea14f39 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/CODE_OF_CONDUCT.md @@ -0,0 +1,46 @@ +# Contributor Covenant Code of Conduct + +## Our Pledge + +In the interest of fostering an open and welcoming environment, we as contributors and maintainers pledge to making participation in our project and our community a harassment-free experience for everyone, regardless of age, body size, disability, ethnicity, gender identity and expression, level of experience, nationality, personal appearance, race, religion, or sexual identity and orientation. + +## Our Standards + +Examples of behavior that contributes to creating a positive environment include: + +* Using welcoming and inclusive language +* Being respectful of differing viewpoints and experiences +* Gracefully accepting constructive criticism +* Focusing on what is best for the community +* Showing empathy towards other community members + +Examples of unacceptable behavior by participants include: + +* The use of sexualized language or imagery and unwelcome sexual attention or advances +* Trolling, insulting/derogatory comments, and personal or political attacks +* Public or private harassment +* Publishing others' private information, such as a physical or electronic address, without explicit permission +* Other conduct which could reasonably be considered inappropriate in a professional setting + +## Our Responsibilities + +Project maintainers are responsible for clarifying the standards of acceptable behavior and are expected to take appropriate and fair corrective action in response to any instances of unacceptable behavior. + +Project maintainers have the right and responsibility to remove, edit, or reject comments, commits, code, wiki edits, issues, and other contributions that are not aligned to this Code of Conduct, or to ban temporarily or permanently any contributor for other behaviors that they deem inappropriate, threatening, offensive, or harmful. + +## Scope + +This Code of Conduct applies both within project spaces and in public spaces when an individual is representing the project or its community. Examples of representing a project or community include using an official project e-mail address, posting via an official social media account, or acting as an appointed representative at an online or offline event. Representation of a project may be further defined and clarified by project maintainers. + +## Enforcement + +Instances of abusive, harassing, or otherwise unacceptable behavior may be reported by contacting the project team at teamgingonic@gmail.com. The project team will review and investigate all complaints, and will respond in a way that it deems appropriate to the circumstances. The project team is obligated to maintain confidentiality with regard to the reporter of an incident. Further details of specific enforcement policies may be posted separately. + +Project maintainers who do not follow or enforce the Code of Conduct in good faith may face temporary or permanent repercussions as determined by other members of the project's leadership. + +## Attribution + +This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4, available at [http://contributor-covenant.org/version/1/4][version] + +[homepage]: http://contributor-covenant.org +[version]: http://contributor-covenant.org/version/1/4/ diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md b/terraform-server/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md new file mode 100644 index 00000000..97daa808 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/CONTRIBUTING.md @@ -0,0 +1,13 @@ +## Contributing + +- With issues: + - Use the search tool before opening a new issue. + - Please provide source code and commit sha if you found a bug. + - Review existing issues and provide feedback or react to them. + +- With pull requests: + - Open your pull request against `master` + - Your pull request should have no more than two commits, if not you should squash them. + - It should pass all tests in the available continuous integration systems such as TravisCI. + - You should add/modify tests to cover your proposed code changes. + - If your pull request contains a new feature, please document it on the README. diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/LICENSE b/terraform-server/vendor/github.com/gin-gonic/gin/LICENSE new file mode 100644 index 00000000..1ff7f370 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2014 Manuel Martínez-Almeida + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in +all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN +THE SOFTWARE. diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/Makefile b/terraform-server/vendor/github.com/gin-gonic/gin/Makefile new file mode 100644 index 00000000..1a991939 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/Makefile @@ -0,0 +1,71 @@ +GO ?= go +GOFMT ?= gofmt "-s" +PACKAGES ?= $(shell $(GO) list ./...) +VETPACKAGES ?= $(shell $(GO) list ./... | grep -v /examples/) +GOFILES := $(shell find . -name "*.go") +TESTFOLDER := $(shell $(GO) list ./... | grep -E 'gin$$|binding$$|render$$' | grep -v examples) +TESTTAGS ?= "" + +.PHONY: test +test: + echo "mode: count" > coverage.out + for d in $(TESTFOLDER); do \ + $(GO) test -tags $(TESTTAGS) -v -covermode=count -coverprofile=profile.out $$d > tmp.out; \ + cat tmp.out; \ + if grep -q "^--- FAIL" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "build failed" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + elif grep -q "setup failed" tmp.out; then \ + rm tmp.out; \ + exit 1; \ + fi; \ + if [ -f profile.out ]; then \ + cat profile.out | grep -v "mode:" >> coverage.out; \ + rm profile.out; \ + fi; \ + done + +.PHONY: fmt +fmt: + $(GOFMT) -w $(GOFILES) + +.PHONY: fmt-check +fmt-check: + @diff=$$($(GOFMT) -d $(GOFILES)); \ + if [ -n "$$diff" ]; then \ + echo "Please run 'make fmt' and commit the result:"; \ + echo "$${diff}"; \ + exit 1; \ + fi; + +vet: + $(GO) vet $(VETPACKAGES) + +.PHONY: lint +lint: + @hash golint > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u golang.org/x/lint/golint; \ + fi + for PKG in $(PACKAGES); do golint -set_exit_status $$PKG || exit 1; done; + +.PHONY: misspell-check +misspell-check: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -error $(GOFILES) + +.PHONY: misspell +misspell: + @hash misspell > /dev/null 2>&1; if [ $$? -ne 0 ]; then \ + $(GO) get -u github.com/client9/misspell/cmd/misspell; \ + fi + misspell -w $(GOFILES) + +.PHONY: tools +tools: + go install golang.org/x/lint/golint; \ + go install github.com/client9/misspell/cmd/misspell; diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/README.md b/terraform-server/vendor/github.com/gin-gonic/gin/README.md new file mode 100644 index 00000000..d4772d76 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/README.md @@ -0,0 +1,2217 @@ +# Gin Web Framework + + + +[![Build Status](https://travis-ci.org/gin-gonic/gin.svg)](https://travis-ci.org/gin-gonic/gin) +[![codecov](https://codecov.io/gh/gin-gonic/gin/branch/master/graph/badge.svg)](https://codecov.io/gh/gin-gonic/gin) +[![Go Report Card](https://goreportcard.com/badge/github.com/gin-gonic/gin)](https://goreportcard.com/report/github.com/gin-gonic/gin) +[![GoDoc](https://pkg.go.dev/badge/github.com/gin-gonic/gin?status.svg)](https://pkg.go.dev/github.com/gin-gonic/gin?tab=doc) +[![Join the chat at https://gitter.im/gin-gonic/gin](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/gin-gonic/gin?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +[![Sourcegraph](https://sourcegraph.com/github.com/gin-gonic/gin/-/badge.svg)](https://sourcegraph.com/github.com/gin-gonic/gin?badge) +[![Open Source Helpers](https://www.codetriage.com/gin-gonic/gin/badges/users.svg)](https://www.codetriage.com/gin-gonic/gin) +[![Release](https://img.shields.io/github/release/gin-gonic/gin.svg?style=flat-square)](https://github.com/gin-gonic/gin/releases) +[![TODOs](https://badgen.net/https/api.tickgit.com/badgen/github.com/gin-gonic/gin)](https://www.tickgit.com/browse?repo=github.com/gin-gonic/gin) + +Gin is a web framework written in Go (Golang). It features a martini-like API with performance that is up to 40 times faster thanks to [httprouter](https://github.com/julienschmidt/httprouter). If you need performance and good productivity, you will love Gin. + + +## Contents + +- [Gin Web Framework](#gin-web-framework) + - [Contents](#contents) + - [Installation](#installation) + - [Quick start](#quick-start) + - [Benchmarks](#benchmarks) + - [Gin v1. stable](#gin-v1-stable) + - [Build with jsoniter](#build-with-jsoniter) + - [API Examples](#api-examples) + - [Using GET, POST, PUT, PATCH, DELETE and OPTIONS](#using-get-post-put-patch-delete-and-options) + - [Parameters in path](#parameters-in-path) + - [Querystring parameters](#querystring-parameters) + - [Multipart/Urlencoded Form](#multiparturlencoded-form) + - [Another example: query + post form](#another-example-query--post-form) + - [Map as querystring or postform parameters](#map-as-querystring-or-postform-parameters) + - [Upload files](#upload-files) + - [Single file](#single-file) + - [Multiple files](#multiple-files) + - [Grouping routes](#grouping-routes) + - [Blank Gin without middleware by default](#blank-gin-without-middleware-by-default) + - [Using middleware](#using-middleware) + - [How to write log file](#how-to-write-log-file) + - [Custom Log Format](#custom-log-format) + - [Controlling Log output coloring](#controlling-log-output-coloring) + - [Model binding and validation](#model-binding-and-validation) + - [Custom Validators](#custom-validators) + - [Only Bind Query String](#only-bind-query-string) + - [Bind Query String or Post Data](#bind-query-string-or-post-data) + - [Bind Uri](#bind-uri) + - [Bind Header](#bind-header) + - [Bind HTML checkboxes](#bind-html-checkboxes) + - [Multipart/Urlencoded binding](#multiparturlencoded-binding) + - [XML, JSON, YAML and ProtoBuf rendering](#xml-json-yaml-and-protobuf-rendering) + - [SecureJSON](#securejson) + - [JSONP](#jsonp) + - [AsciiJSON](#asciijson) + - [PureJSON](#purejson) + - [Serving static files](#serving-static-files) + - [Serving data from file](#serving-data-from-file) + - [Serving data from reader](#serving-data-from-reader) + - [HTML rendering](#html-rendering) + - [Custom Template renderer](#custom-template-renderer) + - [Custom Delimiters](#custom-delimiters) + - [Custom Template Funcs](#custom-template-funcs) + - [Multitemplate](#multitemplate) + - [Redirects](#redirects) + - [Custom Middleware](#custom-middleware) + - [Using BasicAuth() middleware](#using-basicauth-middleware) + - [Goroutines inside a middleware](#goroutines-inside-a-middleware) + - [Custom HTTP configuration](#custom-http-configuration) + - [Support Let's Encrypt](#support-lets-encrypt) + - [Run multiple service using Gin](#run-multiple-service-using-gin) + - [Graceful shutdown or restart](#graceful-shutdown-or-restart) + - [Third-party packages](#third-party-packages) + - [Manually](#manually) + - [Build a single binary with templates](#build-a-single-binary-with-templates) + - [Bind form-data request with custom struct](#bind-form-data-request-with-custom-struct) + - [Try to bind body into different structs](#try-to-bind-body-into-different-structs) + - [http2 server push](#http2-server-push) + - [Define format for the log of routes](#define-format-for-the-log-of-routes) + - [Set and get a cookie](#set-and-get-a-cookie) + - [Testing](#testing) + - [Users](#users) + +## Installation + +To install Gin package, you need to install Go and set your Go workspace first. + +1. The first need [Go](https://golang.org/) installed (**version 1.12+ is required**), then you can use the below Go command to install Gin. + +```sh +$ go get -u github.com/gin-gonic/gin +``` + +2. Import it in your code: + +```go +import "github.com/gin-gonic/gin" +``` + +3. (Optional) Import `net/http`. This is required for example if using constants such as `http.StatusOK`. + +```go +import "net/http" +``` + +## Quick start + +```sh +# assume the following codes in example.go file +$ cat example.go +``` + +```go +package main + +import "github.com/gin-gonic/gin" + +func main() { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.JSON(200, gin.H{ + "message": "pong", + }) + }) + r.Run() // listen and serve on 0.0.0.0:8080 (for windows "localhost:8080") +} +``` + +``` +# run example.go and visit 0.0.0.0:8080/ping (for windows "localhost:8080/ping") on browser +$ go run example.go +``` + +## Benchmarks + +Gin uses a custom version of [HttpRouter](https://github.com/julienschmidt/httprouter) + +[See all benchmarks](/BENCHMARKS.md) + +| Benchmark name | (1) | (2) | (3) | (4) | +| ------------------------------ | ---------:| ---------------:| ------------:| ---------------:| +| BenchmarkGin_GithubAll | **43550** | **27364 ns/op** | **0 B/op** | **0 allocs/op** | +| BenchmarkAce_GithubAll | 40543 | 29670 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkAero_GithubAll | 57632 | 20648 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkBear_GithubAll | 9234 | 216179 ns/op | 86448 B/op | 943 allocs/op | +| BenchmarkBeego_GithubAll | 7407 | 243496 ns/op | 71456 B/op | 609 allocs/op | +| BenchmarkBone_GithubAll | 420 | 2922835 ns/op | 720160 B/op | 8620 allocs/op | +| BenchmarkChi_GithubAll | 7620 | 238331 ns/op | 87696 B/op | 609 allocs/op | +| BenchmarkDenco_GithubAll | 18355 | 64494 ns/op | 20224 B/op | 167 allocs/op | +| BenchmarkEcho_GithubAll | 31251 | 38479 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkGocraftWeb_GithubAll | 4117 | 300062 ns/op | 131656 B/op | 1686 allocs/op | +| BenchmarkGoji_GithubAll | 3274 | 416158 ns/op | 56112 B/op | 334 allocs/op | +| BenchmarkGojiv2_GithubAll | 1402 | 870518 ns/op | 352720 B/op | 4321 allocs/op | +| BenchmarkGoJsonRest_GithubAll | 2976 | 401507 ns/op | 134371 B/op | 2737 allocs/op | +| BenchmarkGoRestful_GithubAll | 410 | 2913158 ns/op | 910144 B/op | 2938 allocs/op | +| BenchmarkGorillaMux_GithubAll | 346 | 3384987 ns/op | 251650 B/op | 1994 allocs/op | +| BenchmarkGowwwRouter_GithubAll | 10000 | 143025 ns/op | 72144 B/op | 501 allocs/op | +| BenchmarkHttpRouter_GithubAll | 55938 | 21360 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkHttpTreeMux_GithubAll | 10000 | 153944 ns/op | 65856 B/op | 671 allocs/op | +| BenchmarkKocha_GithubAll | 10000 | 106315 ns/op | 23304 B/op | 843 allocs/op | +| BenchmarkLARS_GithubAll | 47779 | 25084 ns/op | 0 B/op | 0 allocs/op | +| BenchmarkMacaron_GithubAll | 3266 | 371907 ns/op | 149409 B/op | 1624 allocs/op | +| BenchmarkMartini_GithubAll | 331 | 3444706 ns/op | 226551 B/op | 2325 allocs/op | +| BenchmarkPat_GithubAll | 273 | 4381818 ns/op | 1483152 B/op | 26963 allocs/op | +| BenchmarkPossum_GithubAll | 10000 | 164367 ns/op | 84448 B/op | 609 allocs/op | +| BenchmarkR2router_GithubAll | 10000 | 160220 ns/op | 77328 B/op | 979 allocs/op | +| BenchmarkRivet_GithubAll | 14625 | 82453 ns/op | 16272 B/op | 167 allocs/op | +| BenchmarkTango_GithubAll | 6255 | 279611 ns/op | 63826 B/op | 1618 allocs/op | +| BenchmarkTigerTonic_GithubAll | 2008 | 687874 ns/op | 193856 B/op | 4474 allocs/op | +| BenchmarkTraffic_GithubAll | 355 | 3478508 ns/op | 820744 B/op | 14114 allocs/op | +| BenchmarkVulcan_GithubAll | 6885 | 193333 ns/op | 19894 B/op | 609 allocs/op | + +- (1): Total Repetitions achieved in constant time, higher means more confident result +- (2): Single Repetition Duration (ns/op), lower is better +- (3): Heap Memory (B/op), lower is better +- (4): Average Allocations per Repetition (allocs/op), lower is better + +## Gin v1. stable + +- [x] Zero allocation router. +- [x] Still the fastest http router and framework. From routing to writing. +- [x] Complete suite of unit tests. +- [x] Battle tested. +- [x] API frozen, new releases will not break your code. + +## Build with [jsoniter](https://github.com/json-iterator/go) + +Gin uses `encoding/json` as default json package but you can change to [jsoniter](https://github.com/json-iterator/go) by build from other tags. + +```sh +$ go build -tags=jsoniter . +``` + +## API Examples + +You can find a number of ready-to-run examples at [Gin examples repository](https://github.com/gin-gonic/examples). + +### Using GET, POST, PUT, PATCH, DELETE and OPTIONS + +```go +func main() { + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/someGet", getting) + router.POST("/somePost", posting) + router.PUT("/somePut", putting) + router.DELETE("/someDelete", deleting) + router.PATCH("/somePatch", patching) + router.HEAD("/someHead", head) + router.OPTIONS("/someOptions", options) + + // By default it serves on :8080 unless a + // PORT environment variable was defined. + router.Run() + // router.Run(":3000") for a hard coded port +} +``` + +### Parameters in path + +```go +func main() { + router := gin.Default() + + // This handler will match /user/john but will not match /user/ or /user + router.GET("/user/:name", func(c *gin.Context) { + name := c.Param("name") + c.String(http.StatusOK, "Hello %s", name) + }) + + // However, this one will match /user/john/ and also /user/john/send + // If no other routers match /user/john, it will redirect to /user/john/ + router.GET("/user/:name/*action", func(c *gin.Context) { + name := c.Param("name") + action := c.Param("action") + message := name + " is " + action + c.String(http.StatusOK, message) + }) + + // For each matched request Context will hold the route definition + router.POST("/user/:name/*action", func(c *gin.Context) { + c.FullPath() == "/user/:name/*action" // true + }) + + // This handler will add a new router for /user/groups. + // Exact routes are resolved before param routes, regardless of the order they were defined. + // Routes starting with /user/groups are never interpreted as /user/:name/... routes + router.GET("/user/groups", func(c *gin.Context) { + c.String(http.StatusOK, "The available groups are [...]", name) + }) + + router.Run(":8080") +} +``` + +### Querystring parameters + +```go +func main() { + router := gin.Default() + + // Query string parameters are parsed using the existing underlying request object. + // The request responds to a url matching: /welcome?firstname=Jane&lastname=Doe + router.GET("/welcome", func(c *gin.Context) { + firstname := c.DefaultQuery("firstname", "Guest") + lastname := c.Query("lastname") // shortcut for c.Request.URL.Query().Get("lastname") + + c.String(http.StatusOK, "Hello %s %s", firstname, lastname) + }) + router.Run(":8080") +} +``` + +### Multipart/Urlencoded Form + +```go +func main() { + router := gin.Default() + + router.POST("/form_post", func(c *gin.Context) { + message := c.PostForm("message") + nick := c.DefaultPostForm("nick", "anonymous") + + c.JSON(200, gin.H{ + "status": "posted", + "message": message, + "nick": nick, + }) + }) + router.Run(":8080") +} +``` + +### Another example: query + post form + +``` +POST /post?id=1234&page=1 HTTP/1.1 +Content-Type: application/x-www-form-urlencoded + +name=manu&message=this_is_great +``` + +```go +func main() { + router := gin.Default() + + router.POST("/post", func(c *gin.Context) { + + id := c.Query("id") + page := c.DefaultQuery("page", "0") + name := c.PostForm("name") + message := c.PostForm("message") + + fmt.Printf("id: %s; page: %s; name: %s; message: %s", id, page, name, message) + }) + router.Run(":8080") +} +``` + +``` +id: 1234; page: 1; name: manu; message: this_is_great +``` + +### Map as querystring or postform parameters + +``` +POST /post?ids[a]=1234&ids[b]=hello HTTP/1.1 +Content-Type: application/x-www-form-urlencoded + +names[first]=thinkerou&names[second]=tianou +``` + +```go +func main() { + router := gin.Default() + + router.POST("/post", func(c *gin.Context) { + + ids := c.QueryMap("ids") + names := c.PostFormMap("names") + + fmt.Printf("ids: %v; names: %v", ids, names) + }) + router.Run(":8080") +} +``` + +``` +ids: map[b:hello a:1234]; names: map[second:tianou first:thinkerou] +``` + +### Upload files + +#### Single file + +References issue [#774](https://github.com/gin-gonic/gin/issues/774) and detail [example code](https://github.com/gin-gonic/examples/tree/master/upload-file/single). + +`file.Filename` **SHOULD NOT** be trusted. See [`Content-Disposition` on MDN](https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/Content-Disposition#Directives) and [#1693](https://github.com/gin-gonic/gin/issues/1693) + +> The filename is always optional and must not be used blindly by the application: path information should be stripped, and conversion to the server file system rules should be done. + +```go +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.POST("/upload", func(c *gin.Context) { + // single file + file, _ := c.FormFile("file") + log.Println(file.Filename) + + // Upload the file to specific dst. + c.SaveUploadedFile(file, dst) + + c.String(http.StatusOK, fmt.Sprintf("'%s' uploaded!", file.Filename)) + }) + router.Run(":8080") +} +``` + +How to `curl`: + +```bash +curl -X POST http://localhost:8080/upload \ + -F "file=@/Users/appleboy/test.zip" \ + -H "Content-Type: multipart/form-data" +``` + +#### Multiple files + +See the detail [example code](https://github.com/gin-gonic/examples/tree/master/upload-file/multiple). + +```go +func main() { + router := gin.Default() + // Set a lower memory limit for multipart forms (default is 32 MiB) + router.MaxMultipartMemory = 8 << 20 // 8 MiB + router.POST("/upload", func(c *gin.Context) { + // Multipart form + form, _ := c.MultipartForm() + files := form.File["upload[]"] + + for _, file := range files { + log.Println(file.Filename) + + // Upload the file to specific dst. + c.SaveUploadedFile(file, dst) + } + c.String(http.StatusOK, fmt.Sprintf("%d files uploaded!", len(files))) + }) + router.Run(":8080") +} +``` + +How to `curl`: + +```bash +curl -X POST http://localhost:8080/upload \ + -F "upload[]=@/Users/appleboy/test1.zip" \ + -F "upload[]=@/Users/appleboy/test2.zip" \ + -H "Content-Type: multipart/form-data" +``` + +### Grouping routes + +```go +func main() { + router := gin.Default() + + // Simple group: v1 + v1 := router.Group("/v1") + { + v1.POST("/login", loginEndpoint) + v1.POST("/submit", submitEndpoint) + v1.POST("/read", readEndpoint) + } + + // Simple group: v2 + v2 := router.Group("/v2") + { + v2.POST("/login", loginEndpoint) + v2.POST("/submit", submitEndpoint) + v2.POST("/read", readEndpoint) + } + + router.Run(":8080") +} +``` + +### Blank Gin without middleware by default + +Use + +```go +r := gin.New() +``` + +instead of + +```go +// Default With the Logger and Recovery middleware already attached +r := gin.Default() +``` + + +### Using middleware +```go +func main() { + // Creates a router without any middleware by default + r := gin.New() + + // Global middleware + // Logger middleware will write the logs to gin.DefaultWriter even if you set with GIN_MODE=release. + // By default gin.DefaultWriter = os.Stdout + r.Use(gin.Logger()) + + // Recovery middleware recovers from any panics and writes a 500 if there was one. + r.Use(gin.Recovery()) + + // Per route middleware, you can add as many as you desire. + r.GET("/benchmark", MyBenchLogger(), benchEndpoint) + + // Authorization group + // authorized := r.Group("/", AuthRequired()) + // exactly the same as: + authorized := r.Group("/") + // per group middleware! in this case we use the custom created + // AuthRequired() middleware just in the "authorized" group. + authorized.Use(AuthRequired()) + { + authorized.POST("/login", loginEndpoint) + authorized.POST("/submit", submitEndpoint) + authorized.POST("/read", readEndpoint) + + // nested group + testing := authorized.Group("testing") + testing.GET("/analytics", analyticsEndpoint) + } + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Custom Recovery behavior +```go +func main() { + // Creates a router without any middleware by default + r := gin.New() + + // Global middleware + // Logger middleware will write the logs to gin.DefaultWriter even if you set with GIN_MODE=release. + // By default gin.DefaultWriter = os.Stdout + r.Use(gin.Logger()) + + // Recovery middleware recovers from any panics and writes a 500 if there was one. + r.Use(gin.CustomRecovery(func(c *gin.Context, recovered interface{}) { + if err, ok := recovered.(string); ok { + c.String(http.StatusInternalServerError, fmt.Sprintf("error: %s", err)) + } + c.AbortWithStatus(http.StatusInternalServerError) + })) + + r.GET("/panic", func(c *gin.Context) { + // panic with a string -- the custom middleware could save this to a database or report it to the user + panic("foo") + }) + + r.GET("/", func(c *gin.Context) { + c.String(http.StatusOK, "ohai") + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### How to write log file +```go +func main() { + // Disable Console Color, you don't need console color when writing the logs to file. + gin.DisableConsoleColor() + + // Logging to a file. + f, _ := os.Create("gin.log") + gin.DefaultWriter = io.MultiWriter(f) + + // Use the following code if you need to write the logs to file and console at the same time. + // gin.DefaultWriter = io.MultiWriter(f, os.Stdout) + + router := gin.Default() + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + +    router.Run(":8080") +} +``` + +### Custom Log Format +```go +func main() { + router := gin.New() + + // LoggerWithFormatter middleware will write the logs to gin.DefaultWriter + // By default gin.DefaultWriter = os.Stdout + router.Use(gin.LoggerWithFormatter(func(param gin.LogFormatterParams) string { + + // your custom format + return fmt.Sprintf("%s - [%s] \"%s %s %s %d %s \"%s\" %s\"\n", + param.ClientIP, + param.TimeStamp.Format(time.RFC1123), + param.Method, + param.Path, + param.Request.Proto, + param.StatusCode, + param.Latency, + param.Request.UserAgent(), + param.ErrorMessage, + ) + })) + router.Use(gin.Recovery()) + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +**Sample Output** +``` +::1 - [Fri, 07 Dec 2018 17:04:38 JST] "GET /ping HTTP/1.1 200 122.767µs "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_11_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/71.0.3578.80 Safari/537.36" " +``` + +### Controlling Log output coloring + +By default, logs output on console should be colorized depending on the detected TTY. + +Never colorize logs: + +```go +func main() { + // Disable log's color + gin.DisableConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +Always colorize logs: + +```go +func main() { + // Force log's color + gin.ForceConsoleColor() + + // Creates a gin router with default middleware: + // logger and recovery (crash-free) middleware + router := gin.Default() + + router.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + router.Run(":8080") +} +``` + +### Model binding and validation + +To bind a request body into a type, use model binding. We currently support binding of JSON, XML, YAML and standard form values (foo=bar&boo=baz). + +Gin uses [**go-playground/validator/v10**](https://github.com/go-playground/validator) for validation. Check the full docs on tags usage [here](https://godoc.org/github.com/go-playground/validator#hdr-Baked_In_Validators_and_Tags). + +Note that you need to set the corresponding binding tag on all fields you want to bind. For example, when binding from JSON, set `json:"fieldname"`. + +Also, Gin provides two sets of methods for binding: +- **Type** - Must bind + - **Methods** - `Bind`, `BindJSON`, `BindXML`, `BindQuery`, `BindYAML`, `BindHeader` + - **Behavior** - These methods use `MustBindWith` under the hood. If there is a binding error, the request is aborted with `c.AbortWithError(400, err).SetType(ErrorTypeBind)`. This sets the response status code to 400 and the `Content-Type` header is set to `text/plain; charset=utf-8`. Note that if you try to set the response code after this, it will result in a warning `[GIN-debug] [WARNING] Headers were already written. Wanted to override status code 400 with 422`. If you wish to have greater control over the behavior, consider using the `ShouldBind` equivalent method. +- **Type** - Should bind + - **Methods** - `ShouldBind`, `ShouldBindJSON`, `ShouldBindXML`, `ShouldBindQuery`, `ShouldBindYAML`, `ShouldBindHeader` + - **Behavior** - These methods use `ShouldBindWith` under the hood. If there is a binding error, the error is returned and it is the developer's responsibility to handle the request and error appropriately. + +When using the Bind-method, Gin tries to infer the binder depending on the Content-Type header. If you are sure what you are binding, you can use `MustBindWith` or `ShouldBindWith`. + +You can also specify that specific fields are required. If a field is decorated with `binding:"required"` and has a empty value when binding, an error will be returned. + +```go +// Binding from JSON +type Login struct { + User string `form:"user" json:"user" xml:"user" binding:"required"` + Password string `form:"password" json:"password" xml:"password" binding:"required"` +} + +func main() { + router := gin.Default() + + // Example for binding JSON ({"user": "manu", "password": "123"}) + router.POST("/loginJSON", func(c *gin.Context) { + var json Login + if err := c.ShouldBindJSON(&json); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if json.User != "manu" || json.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Example for binding XML ( + // + // + // user + // 123 + // ) + router.POST("/loginXML", func(c *gin.Context) { + var xml Login + if err := c.ShouldBindXML(&xml); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if xml.User != "manu" || xml.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Example for binding a HTML form (user=manu&password=123) + router.POST("/loginForm", func(c *gin.Context) { + var form Login + // This will infer what binder to use depending on the content-type header. + if err := c.ShouldBind(&form); err != nil { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + return + } + + if form.User != "manu" || form.Password != "123" { + c.JSON(http.StatusUnauthorized, gin.H{"status": "unauthorized"}) + return + } + + c.JSON(http.StatusOK, gin.H{"status": "you are logged in"}) + }) + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +**Sample request** +```shell +$ curl -v -X POST \ + http://localhost:8080/loginJSON \ + -H 'content-type: application/json' \ + -d '{ "user": "manu" }' +> POST /loginJSON HTTP/1.1 +> Host: localhost:8080 +> User-Agent: curl/7.51.0 +> Accept: */* +> content-type: application/json +> Content-Length: 18 +> +* upload completely sent off: 18 out of 18 bytes +< HTTP/1.1 400 Bad Request +< Content-Type: application/json; charset=utf-8 +< Date: Fri, 04 Aug 2017 03:51:31 GMT +< Content-Length: 100 +< +{"error":"Key: 'Login.Password' Error:Field validation for 'Password' failed on the 'required' tag"} +``` + +**Skip validate** + +When running the above example using the above the `curl` command, it returns error. Because the example use `binding:"required"` for `Password`. If use `binding:"-"` for `Password`, then it will not return error when running the above example again. + +### Custom Validators + +It is also possible to register custom validators. See the [example code](https://github.com/gin-gonic/examples/tree/master/custom-validation/server.go). + +```go +package main + +import ( + "net/http" + "time" + + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "github.com/go-playground/validator/v10" +) + +// Booking contains binded and validated data. +type Booking struct { + CheckIn time.Time `form:"check_in" binding:"required,bookabledate" time_format:"2006-01-02"` + CheckOut time.Time `form:"check_out" binding:"required,gtfield=CheckIn" time_format:"2006-01-02"` +} + +var bookableDate validator.Func = func(fl validator.FieldLevel) bool { + date, ok := fl.Field().Interface().(time.Time) + if ok { + today := time.Now() + if today.After(date) { + return false + } + } + return true +} + +func main() { + route := gin.Default() + + if v, ok := binding.Validator.Engine().(*validator.Validate); ok { + v.RegisterValidation("bookabledate", bookableDate) + } + + route.GET("/bookable", getBookable) + route.Run(":8085") +} + +func getBookable(c *gin.Context) { + var b Booking + if err := c.ShouldBindWith(&b, binding.Query); err == nil { + c.JSON(http.StatusOK, gin.H{"message": "Booking dates are valid!"}) + } else { + c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()}) + } +} +``` + +```console +$ curl "localhost:8085/bookable?check_in=2030-04-16&check_out=2030-04-17" +{"message":"Booking dates are valid!"} + +$ curl "localhost:8085/bookable?check_in=2030-03-10&check_out=2030-03-09" +{"error":"Key: 'Booking.CheckOut' Error:Field validation for 'CheckOut' failed on the 'gtfield' tag"} + +$ curl "localhost:8085/bookable?check_in=2000-03-09&check_out=2000-03-10" +{"error":"Key: 'Booking.CheckIn' Error:Field validation for 'CheckIn' failed on the 'bookabledate' tag"}% +``` + +[Struct level validations](https://github.com/go-playground/validator/releases/tag/v8.7) can also be registered this way. +See the [struct-lvl-validation example](https://github.com/gin-gonic/examples/tree/master/struct-lvl-validations) to learn more. + +### Only Bind Query String + +`ShouldBindQuery` function only binds the query params and not the post data. See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-315953017). + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/gin" +) + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` +} + +func main() { + route := gin.Default() + route.Any("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + if c.ShouldBindQuery(&person) == nil { + log.Println("====== Only Bind By Query String ======") + log.Println(person.Name) + log.Println(person.Address) + } + c.String(200, "Success") +} + +``` + +### Bind Query String or Post Data + +See the [detail information](https://github.com/gin-gonic/gin/issues/742#issuecomment-264681292). + +```go +package main + +import ( + "log" + "time" + + "github.com/gin-gonic/gin" +) + +type Person struct { + Name string `form:"name"` + Address string `form:"address"` + Birthday time.Time `form:"birthday" time_format:"2006-01-02" time_utc:"1"` + CreateTime time.Time `form:"createTime" time_format:"unixNano"` + UnixTime time.Time `form:"unixTime" time_format:"unix"` +} + +func main() { + route := gin.Default() + route.GET("/testing", startPage) + route.Run(":8085") +} + +func startPage(c *gin.Context) { + var person Person + // If `GET`, only `Form` binding engine (`query`) used. + // If `POST`, first checks the `content-type` for `JSON` or `XML`, then uses `Form` (`form-data`). + // See more at https://github.com/gin-gonic/gin/blob/master/binding/binding.go#L48 + if c.ShouldBind(&person) == nil { + log.Println(person.Name) + log.Println(person.Address) + log.Println(person.Birthday) + log.Println(person.CreateTime) + log.Println(person.UnixTime) + } + + c.String(200, "Success") +} +``` + +Test it with: +```sh +$ curl -X GET "localhost:8085/testing?name=appleboy&address=xyz&birthday=1992-03-15&createTime=1562400033000000123&unixTime=1562400033" +``` + +### Bind Uri + +See the [detail information](https://github.com/gin-gonic/gin/issues/846). + +```go +package main + +import "github.com/gin-gonic/gin" + +type Person struct { + ID string `uri:"id" binding:"required,uuid"` + Name string `uri:"name" binding:"required"` +} + +func main() { + route := gin.Default() + route.GET("/:name/:id", func(c *gin.Context) { + var person Person + if err := c.ShouldBindUri(&person); err != nil { + c.JSON(400, gin.H{"msg": err}) + return + } + c.JSON(200, gin.H{"name": person.Name, "uuid": person.ID}) + }) + route.Run(":8088") +} +``` + +Test it with: +```sh +$ curl -v localhost:8088/thinkerou/987fbc97-4bed-5078-9f07-9141ba07c9f3 +$ curl -v localhost:8088/thinkerou/not-uuid +``` + +### Bind Header + +```go +package main + +import ( + "fmt" + "github.com/gin-gonic/gin" +) + +type testHeader struct { + Rate int `header:"Rate"` + Domain string `header:"Domain"` +} + +func main() { + r := gin.Default() + r.GET("/", func(c *gin.Context) { + h := testHeader{} + + if err := c.ShouldBindHeader(&h); err != nil { + c.JSON(200, err) + } + + fmt.Printf("%#v\n", h) + c.JSON(200, gin.H{"Rate": h.Rate, "Domain": h.Domain}) + }) + + r.Run() + +// client +// curl -H "rate:300" -H "domain:music" 127.0.0.1:8080/ +// output +// {"Domain":"music","Rate":300} +} +``` + +### Bind HTML checkboxes + +See the [detail information](https://github.com/gin-gonic/gin/issues/129#issuecomment-124260092) + +main.go + +```go +... + +type myForm struct { + Colors []string `form:"colors[]"` +} + +... + +func formHandler(c *gin.Context) { + var fakeForm myForm + c.ShouldBind(&fakeForm) + c.JSON(200, gin.H{"color": fakeForm.Colors}) +} + +... + +``` + +form.html + +```html +
+

Check some colors

+ + + + + + + +
+``` + +result: + +``` +{"color":["red","green","blue"]} +``` + +### Multipart/Urlencoded binding + +```go +type ProfileForm struct { + Name string `form:"name" binding:"required"` + Avatar *multipart.FileHeader `form:"avatar" binding:"required"` + + // or for multiple files + // Avatars []*multipart.FileHeader `form:"avatar" binding:"required"` +} + +func main() { + router := gin.Default() + router.POST("/profile", func(c *gin.Context) { + // you can bind multipart form with explicit binding declaration: + // c.ShouldBindWith(&form, binding.Form) + // or you can simply use autobinding with ShouldBind method: + var form ProfileForm + // in this case proper binding will be automatically selected + if err := c.ShouldBind(&form); err != nil { + c.String(http.StatusBadRequest, "bad request") + return + } + + err := c.SaveUploadedFile(form.Avatar, form.Avatar.Filename) + if err != nil { + c.String(http.StatusInternalServerError, "unknown error") + return + } + + // db.Save(&form) + + c.String(http.StatusOK, "ok") + }) + router.Run(":8080") +} +``` + +Test it with: +```sh +$ curl -X POST -v --form name=user --form "avatar=@./avatar.png" http://localhost:8080/profile +``` + +### XML, JSON, YAML and ProtoBuf rendering + +```go +func main() { + r := gin.Default() + + // gin.H is a shortcut for map[string]interface{} + r.GET("/someJSON", func(c *gin.Context) { + c.JSON(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/moreJSON", func(c *gin.Context) { + // You also can use a struct + var msg struct { + Name string `json:"user"` + Message string + Number int + } + msg.Name = "Lena" + msg.Message = "hey" + msg.Number = 123 + // Note that msg.Name becomes "user" in the JSON + // Will output : {"user": "Lena", "Message": "hey", "Number": 123} + c.JSON(http.StatusOK, msg) + }) + + r.GET("/someXML", func(c *gin.Context) { + c.XML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/someYAML", func(c *gin.Context) { + c.YAML(http.StatusOK, gin.H{"message": "hey", "status": http.StatusOK}) + }) + + r.GET("/someProtoBuf", func(c *gin.Context) { + reps := []int64{int64(1), int64(2)} + label := "test" + // The specific definition of protobuf is written in the testdata/protoexample file. + data := &protoexample.Test{ + Label: &label, + Reps: reps, + } + // Note that data becomes binary data in the response + // Will output protoexample.Test protobuf serialized data + c.ProtoBuf(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### SecureJSON + +Using SecureJSON to prevent json hijacking. Default prepends `"while(1),"` to response body if the given struct is array values. + +```go +func main() { + r := gin.Default() + + // You can also use your own secure json prefix + // r.SecureJsonPrefix(")]}',\n") + + r.GET("/someJSON", func(c *gin.Context) { + names := []string{"lena", "austin", "foo"} + + // Will output : while(1);["lena","austin","foo"] + c.SecureJSON(http.StatusOK, names) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` +#### JSONP + +Using JSONP to request data from a server in a different domain. Add callback to response body if the query parameter callback exists. + +```go +func main() { + r := gin.Default() + + r.GET("/JSONP", func(c *gin.Context) { + data := gin.H{ + "foo": "bar", + } + + //callback is x + // Will output : x({\"foo\":\"bar\"}) + c.JSONP(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") + + // client + // curl http://127.0.0.1:8080/JSONP?callback=x +} +``` + +#### AsciiJSON + +Using AsciiJSON to Generates ASCII-only JSON with escaped non-ASCII characters. + +```go +func main() { + r := gin.Default() + + r.GET("/someJSON", func(c *gin.Context) { + data := gin.H{ + "lang": "GO语言", + "tag": "
", + } + + // will output : {"lang":"GO\u8bed\u8a00","tag":"\u003cbr\u003e"} + c.AsciiJSON(http.StatusOK, data) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +#### PureJSON + +Normally, JSON replaces special HTML characters with their unicode entities, e.g. `<` becomes `\u003c`. If you want to encode such characters literally, you can use PureJSON instead. +This feature is unavailable in Go 1.6 and lower. + +```go +func main() { + r := gin.Default() + + // Serves unicode entities + r.GET("/json", func(c *gin.Context) { + c.JSON(200, gin.H{ + "html": "Hello, world!", + }) + }) + + // Serves literal characters + r.GET("/purejson", func(c *gin.Context) { + c.PureJSON(200, gin.H{ + "html": "Hello, world!", + }) + }) + + // listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Serving static files + +```go +func main() { + router := gin.Default() + router.Static("/assets", "./assets") + router.StaticFS("/more_static", http.Dir("my_file_system")) + router.StaticFile("/favicon.ico", "./resources/favicon.ico") + + // Listen and serve on 0.0.0.0:8080 + router.Run(":8080") +} +``` + +### Serving data from file + +```go +func main() { + router := gin.Default() + + router.GET("/local/file", func(c *gin.Context) { + c.File("local/file.go") + }) + + var fs http.FileSystem = // ... + router.GET("/fs/file", func(c *gin.Context) { + c.FileFromFS("fs/file.go", fs) + }) +} + +``` + +### Serving data from reader + +```go +func main() { + router := gin.Default() + router.GET("/someDataFromReader", func(c *gin.Context) { + response, err := http.Get("https://raw.githubusercontent.com/gin-gonic/logo/master/color.png") + if err != nil || response.StatusCode != http.StatusOK { + c.Status(http.StatusServiceUnavailable) + return + } + + reader := response.Body + defer reader.Close() + contentLength := response.ContentLength + contentType := response.Header.Get("Content-Type") + + extraHeaders := map[string]string{ + "Content-Disposition": `attachment; filename="gopher.png"`, + } + + c.DataFromReader(http.StatusOK, contentLength, contentType, reader, extraHeaders) + }) + router.Run(":8080") +} +``` + +### HTML rendering + +Using LoadHTMLGlob() or LoadHTMLFiles() + +```go +func main() { + router := gin.Default() + router.LoadHTMLGlob("templates/*") + //router.LoadHTMLFiles("templates/template1.html", "templates/template2.html") + router.GET("/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "index.tmpl", gin.H{ + "title": "Main website", + }) + }) + router.Run(":8080") +} +``` + +templates/index.tmpl + +```html + +

+ {{ .title }} +

+ +``` + +Using templates with same name in different directories + +```go +func main() { + router := gin.Default() + router.LoadHTMLGlob("templates/**/*") + router.GET("/posts/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "posts/index.tmpl", gin.H{ + "title": "Posts", + }) + }) + router.GET("/users/index", func(c *gin.Context) { + c.HTML(http.StatusOK, "users/index.tmpl", gin.H{ + "title": "Users", + }) + }) + router.Run(":8080") +} +``` + +templates/posts/index.tmpl + +```html +{{ define "posts/index.tmpl" }} +

+ {{ .title }} +

+

Using posts/index.tmpl

+ +{{ end }} +``` + +templates/users/index.tmpl + +```html +{{ define "users/index.tmpl" }} +

+ {{ .title }} +

+

Using users/index.tmpl

+ +{{ end }} +``` + +#### Custom Template renderer + +You can also use your own html template render + +```go +import "html/template" + +func main() { + router := gin.Default() + html := template.Must(template.ParseFiles("file1", "file2")) + router.SetHTMLTemplate(html) + router.Run(":8080") +} +``` + +#### Custom Delimiters + +You may use custom delims + +```go + r := gin.Default() + r.Delims("{[{", "}]}") + r.LoadHTMLGlob("/path/to/templates") +``` + +#### Custom Template Funcs + +See the detail [example code](https://github.com/gin-gonic/examples/tree/master/template). + +main.go + +```go +import ( + "fmt" + "html/template" + "net/http" + "time" + + "github.com/gin-gonic/gin" +) + +func formatAsDate(t time.Time) string { + year, month, day := t.Date() + return fmt.Sprintf("%d%02d/%02d", year, month, day) +} + +func main() { + router := gin.Default() + router.Delims("{[{", "}]}") + router.SetFuncMap(template.FuncMap{ + "formatAsDate": formatAsDate, + }) + router.LoadHTMLFiles("./testdata/template/raw.tmpl") + + router.GET("/raw", func(c *gin.Context) { + c.HTML(http.StatusOK, "raw.tmpl", gin.H{ + "now": time.Date(2017, 07, 01, 0, 0, 0, 0, time.UTC), + }) + }) + + router.Run(":8080") +} + +``` + +raw.tmpl + +```html +Date: {[{.now | formatAsDate}]} +``` + +Result: +``` +Date: 2017/07/01 +``` + +### Multitemplate + +Gin allow by default use only one html.Template. Check [a multitemplate render](https://github.com/gin-contrib/multitemplate) for using features like go 1.6 `block template`. + +### Redirects + +Issuing a HTTP redirect is easy. Both internal and external locations are supported. + +```go +r.GET("/test", func(c *gin.Context) { + c.Redirect(http.StatusMovedPermanently, "http://www.google.com/") +}) +``` + +Issuing a HTTP redirect from POST. Refer to issue: [#444](https://github.com/gin-gonic/gin/issues/444) +```go +r.POST("/test", func(c *gin.Context) { + c.Redirect(http.StatusFound, "/foo") +}) +``` + +Issuing a Router redirect, use `HandleContext` like below. + +``` go +r.GET("/test", func(c *gin.Context) { + c.Request.URL.Path = "/test2" + r.HandleContext(c) +}) +r.GET("/test2", func(c *gin.Context) { + c.JSON(200, gin.H{"hello": "world"}) +}) +``` + + +### Custom Middleware + +```go +func Logger() gin.HandlerFunc { + return func(c *gin.Context) { + t := time.Now() + + // Set example variable + c.Set("example", "12345") + + // before request + + c.Next() + + // after request + latency := time.Since(t) + log.Print(latency) + + // access the status we are sending + status := c.Writer.Status() + log.Println(status) + } +} + +func main() { + r := gin.New() + r.Use(Logger()) + + r.GET("/test", func(c *gin.Context) { + example := c.MustGet("example").(string) + + // it would print: "12345" + log.Println(example) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Using BasicAuth() middleware + +```go +// simulate some private data +var secrets = gin.H{ + "foo": gin.H{"email": "foo@bar.com", "phone": "123433"}, + "austin": gin.H{"email": "austin@example.com", "phone": "666"}, + "lena": gin.H{"email": "lena@guapa.com", "phone": "523443"}, +} + +func main() { + r := gin.Default() + + // Group using gin.BasicAuth() middleware + // gin.Accounts is a shortcut for map[string]string + authorized := r.Group("/admin", gin.BasicAuth(gin.Accounts{ + "foo": "bar", + "austin": "1234", + "lena": "hello2", + "manu": "4321", + })) + + // /admin/secrets endpoint + // hit "localhost:8080/admin/secrets + authorized.GET("/secrets", func(c *gin.Context) { + // get user, it was set by the BasicAuth middleware + user := c.MustGet(gin.AuthUserKey).(string) + if secret, ok := secrets[user]; ok { + c.JSON(http.StatusOK, gin.H{"user": user, "secret": secret}) + } else { + c.JSON(http.StatusOK, gin.H{"user": user, "secret": "NO SECRET :("}) + } + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Goroutines inside a middleware + +When starting new Goroutines inside a middleware or handler, you **SHOULD NOT** use the original context inside it, you have to use a read-only copy. + +```go +func main() { + r := gin.Default() + + r.GET("/long_async", func(c *gin.Context) { + // create copy to be used inside the goroutine + cCp := c.Copy() + go func() { + // simulate a long task with time.Sleep(). 5 seconds + time.Sleep(5 * time.Second) + + // note that you are using the copied context "cCp", IMPORTANT + log.Println("Done! in path " + cCp.Request.URL.Path) + }() + }) + + r.GET("/long_sync", func(c *gin.Context) { + // simulate a long task with time.Sleep(). 5 seconds + time.Sleep(5 * time.Second) + + // since we are NOT using a goroutine, we do not have to copy the context + log.Println("Done! in path " + c.Request.URL.Path) + }) + + // Listen and serve on 0.0.0.0:8080 + r.Run(":8080") +} +``` + +### Custom HTTP configuration + +Use `http.ListenAndServe()` directly, like this: + +```go +func main() { + router := gin.Default() + http.ListenAndServe(":8080", router) +} +``` +or + +```go +func main() { + router := gin.Default() + + s := &http.Server{ + Addr: ":8080", + Handler: router, + ReadTimeout: 10 * time.Second, + WriteTimeout: 10 * time.Second, + MaxHeaderBytes: 1 << 20, + } + s.ListenAndServe() +} +``` + +### Support Let's Encrypt + +example for 1-line LetsEncrypt HTTPS servers. + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + log.Fatal(autotls.Run(r, "example1.com", "example2.com")) +} +``` + +example for custom autocert manager. + +```go +package main + +import ( + "log" + + "github.com/gin-gonic/autotls" + "github.com/gin-gonic/gin" + "golang.org/x/crypto/acme/autocert" +) + +func main() { + r := gin.Default() + + // Ping handler + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + + m := autocert.Manager{ + Prompt: autocert.AcceptTOS, + HostPolicy: autocert.HostWhitelist("example1.com", "example2.com"), + Cache: autocert.DirCache("/var/www/.cache"), + } + + log.Fatal(autotls.RunWithManager(r, &m)) +} +``` + +### Run multiple service using Gin + +See the [question](https://github.com/gin-gonic/gin/issues/346) and try the following example: + +```go +package main + +import ( + "log" + "net/http" + "time" + + "github.com/gin-gonic/gin" + "golang.org/x/sync/errgroup" +) + +var ( + g errgroup.Group +) + +func router01() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 01", + }, + ) + }) + + return e +} + +func router02() http.Handler { + e := gin.New() + e.Use(gin.Recovery()) + e.GET("/", func(c *gin.Context) { + c.JSON( + http.StatusOK, + gin.H{ + "code": http.StatusOK, + "error": "Welcome server 02", + }, + ) + }) + + return e +} + +func main() { + server01 := &http.Server{ + Addr: ":8080", + Handler: router01(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + server02 := &http.Server{ + Addr: ":8081", + Handler: router02(), + ReadTimeout: 5 * time.Second, + WriteTimeout: 10 * time.Second, + } + + g.Go(func() error { + err := server01.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + log.Fatal(err) + } + return err + }) + + g.Go(func() error { + err := server02.ListenAndServe() + if err != nil && err != http.ErrServerClosed { + log.Fatal(err) + } + return err + }) + + if err := g.Wait(); err != nil { + log.Fatal(err) + } +} +``` + +### Graceful shutdown or restart + +There are a few approaches you can use to perform a graceful shutdown or restart. You can make use of third-party packages specifically built for that, or you can manually do the same with the functions and methods from the built-in packages. + +#### Third-party packages + +We can use [fvbock/endless](https://github.com/fvbock/endless) to replace the default `ListenAndServe`. Refer to issue [#296](https://github.com/gin-gonic/gin/issues/296) for more details. + +```go +router := gin.Default() +router.GET("/", handler) +// [...] +endless.ListenAndServe(":4242", router) +``` + +Alternatives: + +* [manners](https://github.com/braintree/manners): A polite Go HTTP server that shuts down gracefully. +* [graceful](https://github.com/tylerb/graceful): Graceful is a Go package enabling graceful shutdown of an http.Handler server. +* [grace](https://github.com/facebookgo/grace): Graceful restart & zero downtime deploy for Go servers. + +#### Manually + +In case you are using Go 1.8 or a later version, you may not need to use those libraries. Consider using `http.Server`'s built-in [Shutdown()](https://golang.org/pkg/net/http/#Server.Shutdown) method for graceful shutdowns. The example below describes its usage, and we've got more examples using gin [here](https://github.com/gin-gonic/examples/tree/master/graceful-shutdown). + +```go +// +build go1.8 + +package main + +import ( + "context" + "log" + "net/http" + "os" + "os/signal" + "syscall" + "time" + + "github.com/gin-gonic/gin" +) + +func main() { + router := gin.Default() + router.GET("/", func(c *gin.Context) { + time.Sleep(5 * time.Second) + c.String(http.StatusOK, "Welcome Gin Server") + }) + + srv := &http.Server{ + Addr: ":8080", + Handler: router, + } + + // Initializing the server in a goroutine so that + // it won't block the graceful shutdown handling below + go func() { + if err := srv.ListenAndServe(); err != nil && errors.Is(err, http.ErrServerClosed) { + log.Printf("listen: %s\n", err) + } + }() + + // Wait for interrupt signal to gracefully shutdown the server with + // a timeout of 5 seconds. + quit := make(chan os.Signal) + // kill (no param) default send syscall.SIGTERM + // kill -2 is syscall.SIGINT + // kill -9 is syscall.SIGKILL but can't be catch, so don't need add it + signal.Notify(quit, syscall.SIGINT, syscall.SIGTERM) + <-quit + log.Println("Shutting down server...") + + // The context is used to inform the server it has 5 seconds to finish + // the request it is currently handling + ctx, cancel := context.WithTimeout(context.Background(), 5*time.Second) + defer cancel() + + if err := srv.Shutdown(ctx); err != nil { + log.Fatal("Server forced to shutdown:", err) + } + + log.Println("Server exiting") +} +``` + +### Build a single binary with templates + +You can build a server into a single binary containing templates by using [go-assets][]. + +[go-assets]: https://github.com/jessevdk/go-assets + +```go +func main() { + r := gin.New() + + t, err := loadTemplate() + if err != nil { + panic(err) + } + r.SetHTMLTemplate(t) + + r.GET("/", func(c *gin.Context) { + c.HTML(http.StatusOK, "/html/index.tmpl",nil) + }) + r.Run(":8080") +} + +// loadTemplate loads templates embedded by go-assets-builder +func loadTemplate() (*template.Template, error) { + t := template.New("") + for name, file := range Assets.Files { + defer file.Close() + if file.IsDir() || !strings.HasSuffix(name, ".tmpl") { + continue + } + h, err := ioutil.ReadAll(file) + if err != nil { + return nil, err + } + t, err = t.New(name).Parse(string(h)) + if err != nil { + return nil, err + } + } + return t, nil +} +``` + +See a complete example in the `https://github.com/gin-gonic/examples/tree/master/assets-in-binary` directory. + +### Bind form-data request with custom struct + +The follow example using custom struct: + +```go +type StructA struct { + FieldA string `form:"field_a"` +} + +type StructB struct { + NestedStruct StructA + FieldB string `form:"field_b"` +} + +type StructC struct { + NestedStructPointer *StructA + FieldC string `form:"field_c"` +} + +type StructD struct { + NestedAnonyStruct struct { + FieldX string `form:"field_x"` + } + FieldD string `form:"field_d"` +} + +func GetDataB(c *gin.Context) { + var b StructB + c.Bind(&b) + c.JSON(200, gin.H{ + "a": b.NestedStruct, + "b": b.FieldB, + }) +} + +func GetDataC(c *gin.Context) { + var b StructC + c.Bind(&b) + c.JSON(200, gin.H{ + "a": b.NestedStructPointer, + "c": b.FieldC, + }) +} + +func GetDataD(c *gin.Context) { + var b StructD + c.Bind(&b) + c.JSON(200, gin.H{ + "x": b.NestedAnonyStruct, + "d": b.FieldD, + }) +} + +func main() { + r := gin.Default() + r.GET("/getb", GetDataB) + r.GET("/getc", GetDataC) + r.GET("/getd", GetDataD) + + r.Run() +} +``` + +Using the command `curl` command result: + +``` +$ curl "http://localhost:8080/getb?field_a=hello&field_b=world" +{"a":{"FieldA":"hello"},"b":"world"} +$ curl "http://localhost:8080/getc?field_a=hello&field_c=world" +{"a":{"FieldA":"hello"},"c":"world"} +$ curl "http://localhost:8080/getd?field_x=hello&field_d=world" +{"d":"world","x":{"FieldX":"hello"}} +``` + +### Try to bind body into different structs + +The normal methods for binding request body consumes `c.Request.Body` and they +cannot be called multiple times. + +```go +type formA struct { + Foo string `json:"foo" xml:"foo" binding:"required"` +} + +type formB struct { + Bar string `json:"bar" xml:"bar" binding:"required"` +} + +func SomeHandler(c *gin.Context) { + objA := formA{} + objB := formB{} + // This c.ShouldBind consumes c.Request.Body and it cannot be reused. + if errA := c.ShouldBind(&objA); errA == nil { + c.String(http.StatusOK, `the body should be formA`) + // Always an error is occurred by this because c.Request.Body is EOF now. + } else if errB := c.ShouldBind(&objB); errB == nil { + c.String(http.StatusOK, `the body should be formB`) + } else { + ... + } +} +``` + +For this, you can use `c.ShouldBindBodyWith`. + +```go +func SomeHandler(c *gin.Context) { + objA := formA{} + objB := formB{} + // This reads c.Request.Body and stores the result into the context. + if errA := c.ShouldBindBodyWith(&objA, binding.JSON); errA == nil { + c.String(http.StatusOK, `the body should be formA`) + // At this time, it reuses body stored in the context. + } else if errB := c.ShouldBindBodyWith(&objB, binding.JSON); errB == nil { + c.String(http.StatusOK, `the body should be formB JSON`) + // And it can accepts other formats + } else if errB2 := c.ShouldBindBodyWith(&objB, binding.XML); errB2 == nil { + c.String(http.StatusOK, `the body should be formB XML`) + } else { + ... + } +} +``` + +* `c.ShouldBindBodyWith` stores body into the context before binding. This has +a slight impact to performance, so you should not use this method if you are +enough to call binding at once. +* This feature is only needed for some formats -- `JSON`, `XML`, `MsgPack`, +`ProtoBuf`. For other formats, `Query`, `Form`, `FormPost`, `FormMultipart`, +can be called by `c.ShouldBind()` multiple times without any damage to +performance (See [#1341](https://github.com/gin-gonic/gin/pull/1341)). + +### http2 server push + +http.Pusher is supported only **go1.8+**. See the [golang blog](https://blog.golang.org/h2push) for detail information. + +```go +package main + +import ( + "html/template" + "log" + + "github.com/gin-gonic/gin" +) + +var html = template.Must(template.New("https").Parse(` + + + Https Test + + + +

Welcome, Ginner!

+ + +`)) + +func main() { + r := gin.Default() + r.Static("/assets", "./assets") + r.SetHTMLTemplate(html) + + r.GET("/", func(c *gin.Context) { + if pusher := c.Writer.Pusher(); pusher != nil { + // use pusher.Push() to do server push + if err := pusher.Push("/assets/app.js", nil); err != nil { + log.Printf("Failed to push: %v", err) + } + } + c.HTML(200, "https", gin.H{ + "status": "success", + }) + }) + + // Listen and Server in https://127.0.0.1:8080 + r.RunTLS(":8080", "./testdata/server.pem", "./testdata/server.key") +} +``` + +### Define format for the log of routes + +The default log of routes is: +``` +[GIN-debug] POST /foo --> main.main.func1 (3 handlers) +[GIN-debug] GET /bar --> main.main.func2 (3 handlers) +[GIN-debug] GET /status --> main.main.func3 (3 handlers) +``` + +If you want to log this information in given format (e.g. JSON, key values or something else), then you can define this format with `gin.DebugPrintRouteFunc`. +In the example below, we log all routes with standard log package but you can use another log tools that suits of your needs. +```go +import ( + "log" + "net/http" + + "github.com/gin-gonic/gin" +) + +func main() { + r := gin.Default() + gin.DebugPrintRouteFunc = func(httpMethod, absolutePath, handlerName string, nuHandlers int) { + log.Printf("endpoint %v %v %v %v\n", httpMethod, absolutePath, handlerName, nuHandlers) + } + + r.POST("/foo", func(c *gin.Context) { + c.JSON(http.StatusOK, "foo") + }) + + r.GET("/bar", func(c *gin.Context) { + c.JSON(http.StatusOK, "bar") + }) + + r.GET("/status", func(c *gin.Context) { + c.JSON(http.StatusOK, "ok") + }) + + // Listen and Server in http://0.0.0.0:8080 + r.Run() +} +``` + +### Set and get a cookie + +```go +import ( + "fmt" + + "github.com/gin-gonic/gin" +) + +func main() { + + router := gin.Default() + + router.GET("/cookie", func(c *gin.Context) { + + cookie, err := c.Cookie("gin_cookie") + + if err != nil { + cookie = "NotSet" + c.SetCookie("gin_cookie", "test", 3600, "/", "localhost", false, true) + } + + fmt.Printf("Cookie value: %s \n", cookie) + }) + + router.Run() +} +``` + +## Don't trust all proxies + +Gin lets you specify which headers to hold the real client IP (if any), +as well as specifying which proxies (or direct clients) you trust to +specify one of these headers. + +The `TrustedProxies` slice on your `gin.Engine` specifes network addresses or +network CIDRs from where clients which their request headers related to client +IP can be trusted. They can be IPv4 addresses, IPv4 CIDRs, IPv6 addresses or +IPv6 CIDRs. + +```go +import ( + "fmt" + + "github.com/gin-gonic/gin" +) + +func main() { + + router := gin.Default() + router.TrustedProxies = []string{"192.168.1.2"} + + router.GET("/", func(c *gin.Context) { + // If the client is 192.168.1.2, use the X-Forwarded-For + // header to deduce the original client IP from the trust- + // worthy parts of that header. + // Otherwise, simply return the direct client IP + fmt.Printf("ClientIP: %s\n", c.ClientIP()) + }) + router.Run() +} +``` + +## Testing + +The `net/http/httptest` package is preferable way for HTTP testing. + +```go +package main + +func setupRouter() *gin.Engine { + r := gin.Default() + r.GET("/ping", func(c *gin.Context) { + c.String(200, "pong") + }) + return r +} + +func main() { + r := setupRouter() + r.Run(":8080") +} +``` + +Test for code example above: + +```go +package main + +import ( + "net/http" + "net/http/httptest" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestPingRoute(t *testing.T) { + router := setupRouter() + + w := httptest.NewRecorder() + req, _ := http.NewRequest("GET", "/ping", nil) + router.ServeHTTP(w, req) + + assert.Equal(t, 200, w.Code) + assert.Equal(t, "pong", w.Body.String()) +} +``` + +## Users + +Awesome project lists using [Gin](https://github.com/gin-gonic/gin) web framework. + +* [gorush](https://github.com/appleboy/gorush): A push notification server written in Go. +* [fnproject](https://github.com/fnproject/fn): The container native, cloud agnostic serverless platform. +* [photoprism](https://github.com/photoprism/photoprism): Personal photo management powered by Go and Google TensorFlow. +* [krakend](https://github.com/devopsfaith/krakend): Ultra performant API Gateway with middlewares. +* [picfit](https://github.com/thoas/picfit): An image resizing server written in Go. +* [brigade](https://github.com/brigadecore/brigade): Event-based Scripting for Kubernetes. +* [dkron](https://github.com/distribworks/dkron): Distributed, fault tolerant job scheduling system. diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/auth.go b/terraform-server/vendor/github.com/gin-gonic/gin/auth.go new file mode 100644 index 00000000..4d8a6ce4 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/auth.go @@ -0,0 +1,91 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "crypto/subtle" + "encoding/base64" + "net/http" + "strconv" + + "github.com/gin-gonic/gin/internal/bytesconv" +) + +// AuthUserKey is the cookie name for user credential in basic auth. +const AuthUserKey = "user" + +// Accounts defines a key/value for user/pass list of authorized logins. +type Accounts map[string]string + +type authPair struct { + value string + user string +} + +type authPairs []authPair + +func (a authPairs) searchCredential(authValue string) (string, bool) { + if authValue == "" { + return "", false + } + for _, pair := range a { + if subtle.ConstantTimeCompare([]byte(pair.value), []byte(authValue)) == 1 { + return pair.user, true + } + } + return "", false +} + +// BasicAuthForRealm returns a Basic HTTP Authorization middleware. It takes as arguments a map[string]string where +// the key is the user name and the value is the password, as well as the name of the Realm. +// If the realm is empty, "Authorization Required" will be used by default. +// (see http://tools.ietf.org/html/rfc2617#section-1.2) +func BasicAuthForRealm(accounts Accounts, realm string) HandlerFunc { + if realm == "" { + realm = "Authorization Required" + } + realm = "Basic realm=" + strconv.Quote(realm) + pairs := processAccounts(accounts) + return func(c *Context) { + // Search user in the slice of allowed credentials + user, found := pairs.searchCredential(c.requestHeader("Authorization")) + if !found { + // Credentials doesn't match, we return 401 and abort handlers chain. + c.Header("WWW-Authenticate", realm) + c.AbortWithStatus(http.StatusUnauthorized) + return + } + + // The user credentials was found, set user's id to key AuthUserKey in this context, the user's id can be read later using + // c.MustGet(gin.AuthUserKey). + c.Set(AuthUserKey, user) + } +} + +// BasicAuth returns a Basic HTTP Authorization middleware. It takes as argument a map[string]string where +// the key is the user name and the value is the password. +func BasicAuth(accounts Accounts) HandlerFunc { + return BasicAuthForRealm(accounts, "") +} + +func processAccounts(accounts Accounts) authPairs { + length := len(accounts) + assert1(length > 0, "Empty list of authorized credentials") + pairs := make(authPairs, 0, length) + for user, password := range accounts { + assert1(user != "", "User can not be empty") + value := authorizationHeader(user, password) + pairs = append(pairs, authPair{ + value: value, + user: user, + }) + } + return pairs +} + +func authorizationHeader(user, password string) string { + base := user + ":" + password + return "Basic " + base64.StdEncoding.EncodeToString(bytesconv.StringToBytes(base)) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/binding.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/binding.go new file mode 100644 index 00000000..5caeb581 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/binding.go @@ -0,0 +1,118 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build !nomsgpack +// +build !nomsgpack + +package binding + +import "net/http" + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = "application/json" + MIMEHTML = "text/html" + MIMEXML = "application/xml" + MIMEXML2 = "text/xml" + MIMEPlain = "text/plain" + MIMEPOSTForm = "application/x-www-form-urlencoded" + MIMEMultipartPOSTForm = "multipart/form-data" + MIMEPROTOBUF = "application/x-protobuf" + MIMEMSGPACK = "application/x-msgpack" + MIMEMSGPACK2 = "application/msgpack" + MIMEYAML = "application/x-yaml" +) + +// Binding describes the interface which needs to be implemented for binding the +// data present in the request such as JSON request body, query parameters or +// the form POST. +type Binding interface { + Name() string + Bind(*http.Request, interface{}) error +} + +// BindingBody adds BindBody method to Binding. BindBody is similar with Bind, +// but it reads the body from supplied bytes instead of req.Body. +type BindingBody interface { + Binding + BindBody([]byte, interface{}) error +} + +// BindingUri adds BindUri method to Binding. BindUri is similar with Bind, +// but it read the Params. +type BindingUri interface { + Name() string + BindUri(map[string][]string, interface{}) error +} + +// StructValidator is the minimal interface which needs to be implemented in +// order for it to be used as the validator engine for ensuring the correctness +// of the request. Gin provides a default implementation for this using +// https://github.com/go-playground/validator/tree/v8.18.2. +type StructValidator interface { + // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. + // If the received type is a slice|array, the validation should be performed travel on every element. + // If the received type is not a struct or slice|array, any validation should be skipped and nil must be returned. + // If the received type is a struct or pointer to a struct, the validation should be performed. + // If the struct is not valid or the validation itself fails, a descriptive error should be returned. + // Otherwise nil must be returned. + ValidateStruct(interface{}) error + + // Engine returns the underlying validator engine which powers the + // StructValidator implementation. + Engine() interface{} +} + +// Validator is the default validator which implements the StructValidator +// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2 +// under the hood. +var Validator StructValidator = &defaultValidator{} + +// These implement the Binding interface and can be used to bind the data +// present in the request to struct instances. +var ( + JSON = jsonBinding{} + XML = xmlBinding{} + Form = formBinding{} + Query = queryBinding{} + FormPost = formPostBinding{} + FormMultipart = formMultipartBinding{} + ProtoBuf = protobufBinding{} + MsgPack = msgpackBinding{} + YAML = yamlBinding{} + Uri = uriBinding{} + Header = headerBinding{} +) + +// Default returns the appropriate Binding instance based on the HTTP method +// and the content type. +func Default(method, contentType string) Binding { + if method == http.MethodGet { + return Form + } + + switch contentType { + case MIMEJSON: + return JSON + case MIMEXML, MIMEXML2: + return XML + case MIMEPROTOBUF: + return ProtoBuf + case MIMEMSGPACK, MIMEMSGPACK2: + return MsgPack + case MIMEYAML: + return YAML + case MIMEMultipartPOSTForm: + return FormMultipart + default: // case MIMEPOSTForm: + return Form + } +} + +func validate(obj interface{}) error { + if Validator == nil { + return nil + } + return Validator.ValidateStruct(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go new file mode 100644 index 00000000..9afa3dcf --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/binding_nomsgpack.go @@ -0,0 +1,112 @@ +// Copyright 2020 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build nomsgpack +// +build nomsgpack + +package binding + +import "net/http" + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = "application/json" + MIMEHTML = "text/html" + MIMEXML = "application/xml" + MIMEXML2 = "text/xml" + MIMEPlain = "text/plain" + MIMEPOSTForm = "application/x-www-form-urlencoded" + MIMEMultipartPOSTForm = "multipart/form-data" + MIMEPROTOBUF = "application/x-protobuf" + MIMEYAML = "application/x-yaml" +) + +// Binding describes the interface which needs to be implemented for binding the +// data present in the request such as JSON request body, query parameters or +// the form POST. +type Binding interface { + Name() string + Bind(*http.Request, interface{}) error +} + +// BindingBody adds BindBody method to Binding. BindBody is similar with Bind, +// but it reads the body from supplied bytes instead of req.Body. +type BindingBody interface { + Binding + BindBody([]byte, interface{}) error +} + +// BindingUri adds BindUri method to Binding. BindUri is similar with Bind, +// but it read the Params. +type BindingUri interface { + Name() string + BindUri(map[string][]string, interface{}) error +} + +// StructValidator is the minimal interface which needs to be implemented in +// order for it to be used as the validator engine for ensuring the correctness +// of the request. Gin provides a default implementation for this using +// https://github.com/go-playground/validator/tree/v8.18.2. +type StructValidator interface { + // ValidateStruct can receive any kind of type and it should never panic, even if the configuration is not right. + // If the received type is not a struct, any validation should be skipped and nil must be returned. + // If the received type is a struct or pointer to a struct, the validation should be performed. + // If the struct is not valid or the validation itself fails, a descriptive error should be returned. + // Otherwise nil must be returned. + ValidateStruct(interface{}) error + + // Engine returns the underlying validator engine which powers the + // StructValidator implementation. + Engine() interface{} +} + +// Validator is the default validator which implements the StructValidator +// interface. It uses https://github.com/go-playground/validator/tree/v8.18.2 +// under the hood. +var Validator StructValidator = &defaultValidator{} + +// These implement the Binding interface and can be used to bind the data +// present in the request to struct instances. +var ( + JSON = jsonBinding{} + XML = xmlBinding{} + Form = formBinding{} + Query = queryBinding{} + FormPost = formPostBinding{} + FormMultipart = formMultipartBinding{} + ProtoBuf = protobufBinding{} + YAML = yamlBinding{} + Uri = uriBinding{} + Header = headerBinding{} +) + +// Default returns the appropriate Binding instance based on the HTTP method +// and the content type. +func Default(method, contentType string) Binding { + if method == "GET" { + return Form + } + + switch contentType { + case MIMEJSON: + return JSON + case MIMEXML, MIMEXML2: + return XML + case MIMEPROTOBUF: + return ProtoBuf + case MIMEYAML: + return YAML + case MIMEMultipartPOSTForm: + return FormMultipart + default: // case MIMEPOSTForm: + return Form + } +} + +func validate(obj interface{}) error { + if Validator == nil { + return nil + } + return Validator.ValidateStruct(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/default_validator.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/default_validator.go new file mode 100644 index 00000000..c57a120f --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/default_validator.go @@ -0,0 +1,85 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "fmt" + "reflect" + "strings" + "sync" + + "github.com/go-playground/validator/v10" +) + +type defaultValidator struct { + once sync.Once + validate *validator.Validate +} + +type sliceValidateError []error + +func (err sliceValidateError) Error() string { + var errMsgs []string + for i, e := range err { + if e == nil { + continue + } + errMsgs = append(errMsgs, fmt.Sprintf("[%d]: %s", i, e.Error())) + } + return strings.Join(errMsgs, "\n") +} + +var _ StructValidator = &defaultValidator{} + +// ValidateStruct receives any kind of type, but only performed struct or pointer to struct type. +func (v *defaultValidator) ValidateStruct(obj interface{}) error { + if obj == nil { + return nil + } + + value := reflect.ValueOf(obj) + switch value.Kind() { + case reflect.Ptr: + return v.ValidateStruct(value.Elem().Interface()) + case reflect.Struct: + return v.validateStruct(obj) + case reflect.Slice, reflect.Array: + count := value.Len() + validateRet := make(sliceValidateError, 0) + for i := 0; i < count; i++ { + if err := v.ValidateStruct(value.Index(i).Interface()); err != nil { + validateRet = append(validateRet, err) + } + } + if len(validateRet) == 0 { + return nil + } + return validateRet + default: + return nil + } +} + +// validateStruct receives struct type +func (v *defaultValidator) validateStruct(obj interface{}) error { + v.lazyinit() + return v.validate.Struct(obj) +} + +// Engine returns the underlying validator engine which powers the default +// Validator instance. This is useful if you want to register custom validations +// or struct level validations. See validator GoDoc for more info - +// https://godoc.org/gopkg.in/go-playground/validator.v8 +func (v *defaultValidator) Engine() interface{} { + v.lazyinit() + return v.validate +} + +func (v *defaultValidator) lazyinit() { + v.once.Do(func() { + v.validate = validator.New() + v.validate.SetTagName("binding") + }) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/form.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/form.go new file mode 100644 index 00000000..b93c34cf --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/form.go @@ -0,0 +1,63 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "net/http" +) + +const defaultMemory = 32 << 20 + +type formBinding struct{} +type formPostBinding struct{} +type formMultipartBinding struct{} + +func (formBinding) Name() string { + return "form" +} + +func (formBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseForm(); err != nil { + return err + } + if err := req.ParseMultipartForm(defaultMemory); err != nil { + if err != http.ErrNotMultipart { + return err + } + } + if err := mapForm(obj, req.Form); err != nil { + return err + } + return validate(obj) +} + +func (formPostBinding) Name() string { + return "form-urlencoded" +} + +func (formPostBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseForm(); err != nil { + return err + } + if err := mapForm(obj, req.PostForm); err != nil { + return err + } + return validate(obj) +} + +func (formMultipartBinding) Name() string { + return "multipart/form-data" +} + +func (formMultipartBinding) Bind(req *http.Request, obj interface{}) error { + if err := req.ParseMultipartForm(defaultMemory); err != nil { + return err + } + if err := mappingByPtr(obj, (*multipartRequest)(req), "form"); err != nil { + return err + } + + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/form_mapping.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/form_mapping.go new file mode 100644 index 00000000..2f4e45b4 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/form_mapping.go @@ -0,0 +1,392 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "time" + + "github.com/gin-gonic/gin/internal/bytesconv" + "github.com/gin-gonic/gin/internal/json" +) + +var errUnknownType = errors.New("unknown type") + +func mapUri(ptr interface{}, m map[string][]string) error { + return mapFormByTag(ptr, m, "uri") +} + +func mapForm(ptr interface{}, form map[string][]string) error { + return mapFormByTag(ptr, form, "form") +} + +var emptyField = reflect.StructField{} + +func mapFormByTag(ptr interface{}, form map[string][]string, tag string) error { + // Check if ptr is a map + ptrVal := reflect.ValueOf(ptr) + var pointed interface{} + if ptrVal.Kind() == reflect.Ptr { + ptrVal = ptrVal.Elem() + pointed = ptrVal.Interface() + } + if ptrVal.Kind() == reflect.Map && + ptrVal.Type().Key().Kind() == reflect.String { + if pointed != nil { + ptr = pointed + } + return setFormMap(ptr, form) + } + + return mappingByPtr(ptr, formSource(form), tag) +} + +// setter tries to set value on a walking by fields of a struct +type setter interface { + TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) +} + +type formSource map[string][]string + +var _ setter = formSource(nil) + +// TrySet tries to set a value by request's form source (like map[string][]string) +func (form formSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) { + return setByForm(value, field, form, tagValue, opt) +} + +func mappingByPtr(ptr interface{}, setter setter, tag string) error { + _, err := mapping(reflect.ValueOf(ptr), emptyField, setter, tag) + return err +} + +func mapping(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + if field.Tag.Get(tag) == "-" { // just ignoring this field + return false, nil + } + + var vKind = value.Kind() + + if vKind == reflect.Ptr { + var isNew bool + vPtr := value + if value.IsNil() { + isNew = true + vPtr = reflect.New(value.Type().Elem()) + } + isSetted, err := mapping(vPtr.Elem(), field, setter, tag) + if err != nil { + return false, err + } + if isNew && isSetted { + value.Set(vPtr) + } + return isSetted, nil + } + + if vKind != reflect.Struct || !field.Anonymous { + ok, err := tryToSetValue(value, field, setter, tag) + if err != nil { + return false, err + } + if ok { + return true, nil + } + } + + if vKind == reflect.Struct { + tValue := value.Type() + + var isSetted bool + for i := 0; i < value.NumField(); i++ { + sf := tValue.Field(i) + if sf.PkgPath != "" && !sf.Anonymous { // unexported + continue + } + ok, err := mapping(value.Field(i), tValue.Field(i), setter, tag) + if err != nil { + return false, err + } + isSetted = isSetted || ok + } + return isSetted, nil + } + return false, nil +} + +type setOptions struct { + isDefaultExists bool + defaultValue string +} + +func tryToSetValue(value reflect.Value, field reflect.StructField, setter setter, tag string) (bool, error) { + var tagValue string + var setOpt setOptions + + tagValue = field.Tag.Get(tag) + tagValue, opts := head(tagValue, ",") + + if tagValue == "" { // default value is FieldName + tagValue = field.Name + } + if tagValue == "" { // when field is "emptyField" variable + return false, nil + } + + var opt string + for len(opts) > 0 { + opt, opts = head(opts, ",") + + if k, v := head(opt, "="); k == "default" { + setOpt.isDefaultExists = true + setOpt.defaultValue = v + } + } + + return setter.TrySet(value, field, tagValue, setOpt) +} + +func setByForm(value reflect.Value, field reflect.StructField, form map[string][]string, tagValue string, opt setOptions) (isSetted bool, err error) { + vs, ok := form[tagValue] + if !ok && !opt.isDefaultExists { + return false, nil + } + + switch value.Kind() { + case reflect.Slice: + if !ok { + vs = []string{opt.defaultValue} + } + return true, setSlice(vs, value, field) + case reflect.Array: + if !ok { + vs = []string{opt.defaultValue} + } + if len(vs) != value.Len() { + return false, fmt.Errorf("%q is not valid value for %s", vs, value.Type().String()) + } + return true, setArray(vs, value, field) + default: + var val string + if !ok { + val = opt.defaultValue + } + + if len(vs) > 0 { + val = vs[0] + } + return true, setWithProperType(val, value, field) + } +} + +func setWithProperType(val string, value reflect.Value, field reflect.StructField) error { + switch value.Kind() { + case reflect.Int: + return setIntField(val, 0, value) + case reflect.Int8: + return setIntField(val, 8, value) + case reflect.Int16: + return setIntField(val, 16, value) + case reflect.Int32: + return setIntField(val, 32, value) + case reflect.Int64: + switch value.Interface().(type) { + case time.Duration: + return setTimeDuration(val, value, field) + } + return setIntField(val, 64, value) + case reflect.Uint: + return setUintField(val, 0, value) + case reflect.Uint8: + return setUintField(val, 8, value) + case reflect.Uint16: + return setUintField(val, 16, value) + case reflect.Uint32: + return setUintField(val, 32, value) + case reflect.Uint64: + return setUintField(val, 64, value) + case reflect.Bool: + return setBoolField(val, value) + case reflect.Float32: + return setFloatField(val, 32, value) + case reflect.Float64: + return setFloatField(val, 64, value) + case reflect.String: + value.SetString(val) + case reflect.Struct: + switch value.Interface().(type) { + case time.Time: + return setTimeField(val, field, value) + } + return json.Unmarshal(bytesconv.StringToBytes(val), value.Addr().Interface()) + case reflect.Map: + return json.Unmarshal(bytesconv.StringToBytes(val), value.Addr().Interface()) + default: + return errUnknownType + } + return nil +} + +func setIntField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + intVal, err := strconv.ParseInt(val, 10, bitSize) + if err == nil { + field.SetInt(intVal) + } + return err +} + +func setUintField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0" + } + uintVal, err := strconv.ParseUint(val, 10, bitSize) + if err == nil { + field.SetUint(uintVal) + } + return err +} + +func setBoolField(val string, field reflect.Value) error { + if val == "" { + val = "false" + } + boolVal, err := strconv.ParseBool(val) + if err == nil { + field.SetBool(boolVal) + } + return err +} + +func setFloatField(val string, bitSize int, field reflect.Value) error { + if val == "" { + val = "0.0" + } + floatVal, err := strconv.ParseFloat(val, bitSize) + if err == nil { + field.SetFloat(floatVal) + } + return err +} + +func setTimeField(val string, structField reflect.StructField, value reflect.Value) error { + timeFormat := structField.Tag.Get("time_format") + if timeFormat == "" { + timeFormat = time.RFC3339 + } + + switch tf := strings.ToLower(timeFormat); tf { + case "unix", "unixnano": + tv, err := strconv.ParseInt(val, 10, 64) + if err != nil { + return err + } + + d := time.Duration(1) + if tf == "unixnano" { + d = time.Second + } + + t := time.Unix(tv/int64(d), tv%int64(d)) + value.Set(reflect.ValueOf(t)) + return nil + + } + + if val == "" { + value.Set(reflect.ValueOf(time.Time{})) + return nil + } + + l := time.Local + if isUTC, _ := strconv.ParseBool(structField.Tag.Get("time_utc")); isUTC { + l = time.UTC + } + + if locTag := structField.Tag.Get("time_location"); locTag != "" { + loc, err := time.LoadLocation(locTag) + if err != nil { + return err + } + l = loc + } + + t, err := time.ParseInLocation(timeFormat, val, l) + if err != nil { + return err + } + + value.Set(reflect.ValueOf(t)) + return nil +} + +func setArray(vals []string, value reflect.Value, field reflect.StructField) error { + for i, s := range vals { + err := setWithProperType(s, value.Index(i), field) + if err != nil { + return err + } + } + return nil +} + +func setSlice(vals []string, value reflect.Value, field reflect.StructField) error { + slice := reflect.MakeSlice(value.Type(), len(vals), len(vals)) + err := setArray(vals, slice, field) + if err != nil { + return err + } + value.Set(slice) + return nil +} + +func setTimeDuration(val string, value reflect.Value, field reflect.StructField) error { + d, err := time.ParseDuration(val) + if err != nil { + return err + } + value.Set(reflect.ValueOf(d)) + return nil +} + +func head(str, sep string) (head string, tail string) { + idx := strings.Index(str, sep) + if idx < 0 { + return str, "" + } + return str[:idx], str[idx+len(sep):] +} + +func setFormMap(ptr interface{}, form map[string][]string) error { + el := reflect.TypeOf(ptr).Elem() + + if el.Kind() == reflect.Slice { + ptrMap, ok := ptr.(map[string][]string) + if !ok { + return errors.New("cannot convert to map slices of strings") + } + for k, v := range form { + ptrMap[k] = v + } + + return nil + } + + ptrMap, ok := ptr.(map[string]string) + if !ok { + return errors.New("cannot convert to map of strings") + } + for k, v := range form { + ptrMap[k] = v[len(v)-1] // pick last + } + + return nil +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/header.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/header.go new file mode 100644 index 00000000..179ce4ea --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/header.go @@ -0,0 +1,34 @@ +package binding + +import ( + "net/http" + "net/textproto" + "reflect" +) + +type headerBinding struct{} + +func (headerBinding) Name() string { + return "header" +} + +func (headerBinding) Bind(req *http.Request, obj interface{}) error { + + if err := mapHeader(obj, req.Header); err != nil { + return err + } + + return validate(obj) +} + +func mapHeader(ptr interface{}, h map[string][]string) error { + return mappingByPtr(ptr, headerSource(h), "header") +} + +type headerSource map[string][]string + +var _ setter = headerSource(nil) + +func (hs headerSource) TrySet(value reflect.Value, field reflect.StructField, tagValue string, opt setOptions) (isSetted bool, err error) { + return setByForm(value, field, hs, textproto.CanonicalMIMEHeaderKey(tagValue), opt) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/json.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/json.go new file mode 100644 index 00000000..d62e0705 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/json.go @@ -0,0 +1,56 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "fmt" + "io" + "net/http" + + "github.com/gin-gonic/gin/internal/json" +) + +// EnableDecoderUseNumber is used to call the UseNumber method on the JSON +// Decoder instance. UseNumber causes the Decoder to unmarshal a number into an +// interface{} as a Number instead of as a float64. +var EnableDecoderUseNumber = false + +// EnableDecoderDisallowUnknownFields is used to call the DisallowUnknownFields method +// on the JSON Decoder instance. DisallowUnknownFields causes the Decoder to +// return an error when the destination is a struct and the input contains object +// keys which do not match any non-ignored, exported fields in the destination. +var EnableDecoderDisallowUnknownFields = false + +type jsonBinding struct{} + +func (jsonBinding) Name() string { + return "json" +} + +func (jsonBinding) Bind(req *http.Request, obj interface{}) error { + if req == nil || req.Body == nil { + return fmt.Errorf("invalid request") + } + return decodeJSON(req.Body, obj) +} + +func (jsonBinding) BindBody(body []byte, obj interface{}) error { + return decodeJSON(bytes.NewReader(body), obj) +} + +func decodeJSON(r io.Reader, obj interface{}) error { + decoder := json.NewDecoder(r) + if EnableDecoderUseNumber { + decoder.UseNumber() + } + if EnableDecoderDisallowUnknownFields { + decoder.DisallowUnknownFields() + } + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/msgpack.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/msgpack.go new file mode 100644 index 00000000..2a442996 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/msgpack.go @@ -0,0 +1,38 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build !nomsgpack +// +build !nomsgpack + +package binding + +import ( + "bytes" + "io" + "net/http" + + "github.com/ugorji/go/codec" +) + +type msgpackBinding struct{} + +func (msgpackBinding) Name() string { + return "msgpack" +} + +func (msgpackBinding) Bind(req *http.Request, obj interface{}) error { + return decodeMsgPack(req.Body, obj) +} + +func (msgpackBinding) BindBody(body []byte, obj interface{}) error { + return decodeMsgPack(bytes.NewReader(body), obj) +} + +func decodeMsgPack(r io.Reader, obj interface{}) error { + cdc := new(codec.MsgpackHandle) + if err := codec.NewDecoder(r, cdc).Decode(&obj); err != nil { + return err + } + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go new file mode 100644 index 00000000..f85a1aa6 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/multipart_form_mapping.go @@ -0,0 +1,66 @@ +// Copyright 2019 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "errors" + "mime/multipart" + "net/http" + "reflect" +) + +type multipartRequest http.Request + +var _ setter = (*multipartRequest)(nil) + +// TrySet tries to set a value by the multipart request with the binding a form file +func (r *multipartRequest) TrySet(value reflect.Value, field reflect.StructField, key string, opt setOptions) (isSetted bool, err error) { + if files := r.MultipartForm.File[key]; len(files) != 0 { + return setByMultipartFormFile(value, field, files) + } + + return setByForm(value, field, r.MultipartForm.Value, key, opt) +} + +func setByMultipartFormFile(value reflect.Value, field reflect.StructField, files []*multipart.FileHeader) (isSetted bool, err error) { + switch value.Kind() { + case reflect.Ptr: + switch value.Interface().(type) { + case *multipart.FileHeader: + value.Set(reflect.ValueOf(files[0])) + return true, nil + } + case reflect.Struct: + switch value.Interface().(type) { + case multipart.FileHeader: + value.Set(reflect.ValueOf(*files[0])) + return true, nil + } + case reflect.Slice: + slice := reflect.MakeSlice(value.Type(), len(files), len(files)) + isSetted, err = setArrayOfMultipartFormFiles(slice, field, files) + if err != nil || !isSetted { + return isSetted, err + } + value.Set(slice) + return true, nil + case reflect.Array: + return setArrayOfMultipartFormFiles(value, field, files) + } + return false, errors.New("unsupported field type for multipart.FileHeader") +} + +func setArrayOfMultipartFormFiles(value reflect.Value, field reflect.StructField, files []*multipart.FileHeader) (isSetted bool, err error) { + if value.Len() != len(files) { + return false, errors.New("unsupported len of array for []*multipart.FileHeader") + } + for i := range files { + setted, err := setByMultipartFormFile(value.Index(i), field, files[i:i+1]) + if err != nil || !setted { + return setted, err + } + } + return true, nil +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/protobuf.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/protobuf.go new file mode 100644 index 00000000..f9ece928 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/protobuf.go @@ -0,0 +1,36 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "io/ioutil" + "net/http" + + "github.com/golang/protobuf/proto" +) + +type protobufBinding struct{} + +func (protobufBinding) Name() string { + return "protobuf" +} + +func (b protobufBinding) Bind(req *http.Request, obj interface{}) error { + buf, err := ioutil.ReadAll(req.Body) + if err != nil { + return err + } + return b.BindBody(buf, obj) +} + +func (protobufBinding) BindBody(body []byte, obj interface{}) error { + if err := proto.Unmarshal(body, obj.(proto.Message)); err != nil { + return err + } + // Here it's same to return validate(obj), but util now we can't add + // `binding:""` to the struct which automatically generate by gen-proto + return nil + // return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/query.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/query.go new file mode 100644 index 00000000..219743f2 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/query.go @@ -0,0 +1,21 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import "net/http" + +type queryBinding struct{} + +func (queryBinding) Name() string { + return "query" +} + +func (queryBinding) Bind(req *http.Request, obj interface{}) error { + values := req.URL.Query() + if err := mapForm(obj, values); err != nil { + return err + } + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/uri.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/uri.go new file mode 100644 index 00000000..f91ec381 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/uri.go @@ -0,0 +1,18 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +type uriBinding struct{} + +func (uriBinding) Name() string { + return "uri" +} + +func (uriBinding) BindUri(m map[string][]string, obj interface{}) error { + if err := mapUri(obj, m); err != nil { + return err + } + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/xml.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/xml.go new file mode 100644 index 00000000..4e901149 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/xml.go @@ -0,0 +1,33 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "encoding/xml" + "io" + "net/http" +) + +type xmlBinding struct{} + +func (xmlBinding) Name() string { + return "xml" +} + +func (xmlBinding) Bind(req *http.Request, obj interface{}) error { + return decodeXML(req.Body, obj) +} + +func (xmlBinding) BindBody(body []byte, obj interface{}) error { + return decodeXML(bytes.NewReader(body), obj) +} +func decodeXML(r io.Reader, obj interface{}) error { + decoder := xml.NewDecoder(r) + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/binding/yaml.go b/terraform-server/vendor/github.com/gin-gonic/gin/binding/yaml.go new file mode 100644 index 00000000..a2d36d6a --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/binding/yaml.go @@ -0,0 +1,35 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package binding + +import ( + "bytes" + "io" + "net/http" + + "gopkg.in/yaml.v2" +) + +type yamlBinding struct{} + +func (yamlBinding) Name() string { + return "yaml" +} + +func (yamlBinding) Bind(req *http.Request, obj interface{}) error { + return decodeYAML(req.Body, obj) +} + +func (yamlBinding) BindBody(body []byte, obj interface{}) error { + return decodeYAML(bytes.NewReader(body), obj) +} + +func decodeYAML(r io.Reader, obj interface{}) error { + decoder := yaml.NewDecoder(r) + if err := decoder.Decode(obj); err != nil { + return err + } + return validate(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/codecov.yml b/terraform-server/vendor/github.com/gin-gonic/gin/codecov.yml new file mode 100644 index 00000000..c9c9a522 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/codecov.yml @@ -0,0 +1,5 @@ +coverage: + notify: + gitter: + default: + url: https://webhooks.gitter.im/e/d90dcdeeab2f1e357165 diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/context.go b/terraform-server/vendor/github.com/gin-gonic/gin/context.go new file mode 100644 index 00000000..dc03c358 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/context.go @@ -0,0 +1,1178 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "mime/multipart" + "net" + "net/http" + "net/url" + "os" + "strings" + "sync" + "time" + + "github.com/gin-contrib/sse" + "github.com/gin-gonic/gin/binding" + "github.com/gin-gonic/gin/render" +) + +// Content-Type MIME of the most common data formats. +const ( + MIMEJSON = binding.MIMEJSON + MIMEHTML = binding.MIMEHTML + MIMEXML = binding.MIMEXML + MIMEXML2 = binding.MIMEXML2 + MIMEPlain = binding.MIMEPlain + MIMEPOSTForm = binding.MIMEPOSTForm + MIMEMultipartPOSTForm = binding.MIMEMultipartPOSTForm + MIMEYAML = binding.MIMEYAML +) + +// BodyBytesKey indicates a default body bytes key. +const BodyBytesKey = "_gin-gonic/gin/bodybyteskey" + +const abortIndex int8 = math.MaxInt8 / 2 + +// Context is the most important part of gin. It allows us to pass variables between middleware, +// manage the flow, validate the JSON of a request and render a JSON response for example. +type Context struct { + writermem responseWriter + Request *http.Request + Writer ResponseWriter + + Params Params + handlers HandlersChain + index int8 + fullPath string + + engine *Engine + params *Params + + // This mutex protect Keys map + mu sync.RWMutex + + // Keys is a key/value pair exclusively for the context of each request. + Keys map[string]interface{} + + // Errors is a list of errors attached to all the handlers/middlewares who used this context. + Errors errorMsgs + + // Accepted defines a list of manually accepted formats for content negotiation. + Accepted []string + + // queryCache use url.ParseQuery cached the param query result from c.Request.URL.Query() + queryCache url.Values + + // formCache use url.ParseQuery cached PostForm contains the parsed form data from POST, PATCH, + // or PUT body parameters. + formCache url.Values + + // SameSite allows a server to define a cookie attribute making it impossible for + // the browser to send this cookie along with cross-site requests. + sameSite http.SameSite +} + +/************************************/ +/********** CONTEXT CREATION ********/ +/************************************/ + +func (c *Context) reset() { + c.Writer = &c.writermem + c.Params = c.Params[0:0] + c.handlers = nil + c.index = -1 + + c.fullPath = "" + c.Keys = nil + c.Errors = c.Errors[0:0] + c.Accepted = nil + c.queryCache = nil + c.formCache = nil + *c.params = (*c.params)[0:0] +} + +// Copy returns a copy of the current context that can be safely used outside the request's scope. +// This has to be used when the context has to be passed to a goroutine. +func (c *Context) Copy() *Context { + cp := Context{ + writermem: c.writermem, + Request: c.Request, + Params: c.Params, + engine: c.engine, + } + cp.writermem.ResponseWriter = nil + cp.Writer = &cp.writermem + cp.index = abortIndex + cp.handlers = nil + cp.Keys = map[string]interface{}{} + for k, v := range c.Keys { + cp.Keys[k] = v + } + paramCopy := make([]Param, len(cp.Params)) + copy(paramCopy, cp.Params) + cp.Params = paramCopy + return &cp +} + +// HandlerName returns the main handler's name. For example if the handler is "handleGetUsers()", +// this function will return "main.handleGetUsers". +func (c *Context) HandlerName() string { + return nameOfFunction(c.handlers.Last()) +} + +// HandlerNames returns a list of all registered handlers for this context in descending order, +// following the semantics of HandlerName() +func (c *Context) HandlerNames() []string { + hn := make([]string, 0, len(c.handlers)) + for _, val := range c.handlers { + hn = append(hn, nameOfFunction(val)) + } + return hn +} + +// Handler returns the main handler. +func (c *Context) Handler() HandlerFunc { + return c.handlers.Last() +} + +// FullPath returns a matched route full path. For not found routes +// returns an empty string. +// router.GET("/user/:id", func(c *gin.Context) { +// c.FullPath() == "/user/:id" // true +// }) +func (c *Context) FullPath() string { + return c.fullPath +} + +/************************************/ +/*********** FLOW CONTROL ***********/ +/************************************/ + +// Next should be used only inside middleware. +// It executes the pending handlers in the chain inside the calling handler. +// See example in GitHub. +func (c *Context) Next() { + c.index++ + for c.index < int8(len(c.handlers)) { + c.handlers[c.index](c) + c.index++ + } +} + +// IsAborted returns true if the current context was aborted. +func (c *Context) IsAborted() bool { + return c.index >= abortIndex +} + +// Abort prevents pending handlers from being called. Note that this will not stop the current handler. +// Let's say you have an authorization middleware that validates that the current request is authorized. +// If the authorization fails (ex: the password does not match), call Abort to ensure the remaining handlers +// for this request are not called. +func (c *Context) Abort() { + c.index = abortIndex +} + +// AbortWithStatus calls `Abort()` and writes the headers with the specified status code. +// For example, a failed attempt to authenticate a request could use: context.AbortWithStatus(401). +func (c *Context) AbortWithStatus(code int) { + c.Status(code) + c.Writer.WriteHeaderNow() + c.Abort() +} + +// AbortWithStatusJSON calls `Abort()` and then `JSON` internally. +// This method stops the chain, writes the status code and return a JSON body. +// It also sets the Content-Type as "application/json". +func (c *Context) AbortWithStatusJSON(code int, jsonObj interface{}) { + c.Abort() + c.JSON(code, jsonObj) +} + +// AbortWithError calls `AbortWithStatus()` and `Error()` internally. +// This method stops the chain, writes the status code and pushes the specified error to `c.Errors`. +// See Context.Error() for more details. +func (c *Context) AbortWithError(code int, err error) *Error { + c.AbortWithStatus(code) + return c.Error(err) +} + +/************************************/ +/********* ERROR MANAGEMENT *********/ +/************************************/ + +// Error attaches an error to the current context. The error is pushed to a list of errors. +// It's a good idea to call Error for each error that occurred during the resolution of a request. +// A middleware can be used to collect all the errors and push them to a database together, +// print a log, or append it in the HTTP response. +// Error will panic if err is nil. +func (c *Context) Error(err error) *Error { + if err == nil { + panic("err is nil") + } + + parsedError, ok := err.(*Error) + if !ok { + parsedError = &Error{ + Err: err, + Type: ErrorTypePrivate, + } + } + + c.Errors = append(c.Errors, parsedError) + return parsedError +} + +/************************************/ +/******** METADATA MANAGEMENT********/ +/************************************/ + +// Set is used to store a new key/value pair exclusively for this context. +// It also lazy initializes c.Keys if it was not used previously. +func (c *Context) Set(key string, value interface{}) { + c.mu.Lock() + if c.Keys == nil { + c.Keys = make(map[string]interface{}) + } + + c.Keys[key] = value + c.mu.Unlock() +} + +// Get returns the value for the given key, ie: (value, true). +// If the value does not exists it returns (nil, false) +func (c *Context) Get(key string) (value interface{}, exists bool) { + c.mu.RLock() + value, exists = c.Keys[key] + c.mu.RUnlock() + return +} + +// MustGet returns the value for the given key if it exists, otherwise it panics. +func (c *Context) MustGet(key string) interface{} { + if value, exists := c.Get(key); exists { + return value + } + panic("Key \"" + key + "\" does not exist") +} + +// GetString returns the value associated with the key as a string. +func (c *Context) GetString(key string) (s string) { + if val, ok := c.Get(key); ok && val != nil { + s, _ = val.(string) + } + return +} + +// GetBool returns the value associated with the key as a boolean. +func (c *Context) GetBool(key string) (b bool) { + if val, ok := c.Get(key); ok && val != nil { + b, _ = val.(bool) + } + return +} + +// GetInt returns the value associated with the key as an integer. +func (c *Context) GetInt(key string) (i int) { + if val, ok := c.Get(key); ok && val != nil { + i, _ = val.(int) + } + return +} + +// GetInt64 returns the value associated with the key as an integer. +func (c *Context) GetInt64(key string) (i64 int64) { + if val, ok := c.Get(key); ok && val != nil { + i64, _ = val.(int64) + } + return +} + +// GetUint returns the value associated with the key as an unsigned integer. +func (c *Context) GetUint(key string) (ui uint) { + if val, ok := c.Get(key); ok && val != nil { + ui, _ = val.(uint) + } + return +} + +// GetUint64 returns the value associated with the key as an unsigned integer. +func (c *Context) GetUint64(key string) (ui64 uint64) { + if val, ok := c.Get(key); ok && val != nil { + ui64, _ = val.(uint64) + } + return +} + +// GetFloat64 returns the value associated with the key as a float64. +func (c *Context) GetFloat64(key string) (f64 float64) { + if val, ok := c.Get(key); ok && val != nil { + f64, _ = val.(float64) + } + return +} + +// GetTime returns the value associated with the key as time. +func (c *Context) GetTime(key string) (t time.Time) { + if val, ok := c.Get(key); ok && val != nil { + t, _ = val.(time.Time) + } + return +} + +// GetDuration returns the value associated with the key as a duration. +func (c *Context) GetDuration(key string) (d time.Duration) { + if val, ok := c.Get(key); ok && val != nil { + d, _ = val.(time.Duration) + } + return +} + +// GetStringSlice returns the value associated with the key as a slice of strings. +func (c *Context) GetStringSlice(key string) (ss []string) { + if val, ok := c.Get(key); ok && val != nil { + ss, _ = val.([]string) + } + return +} + +// GetStringMap returns the value associated with the key as a map of interfaces. +func (c *Context) GetStringMap(key string) (sm map[string]interface{}) { + if val, ok := c.Get(key); ok && val != nil { + sm, _ = val.(map[string]interface{}) + } + return +} + +// GetStringMapString returns the value associated with the key as a map of strings. +func (c *Context) GetStringMapString(key string) (sms map[string]string) { + if val, ok := c.Get(key); ok && val != nil { + sms, _ = val.(map[string]string) + } + return +} + +// GetStringMapStringSlice returns the value associated with the key as a map to a slice of strings. +func (c *Context) GetStringMapStringSlice(key string) (smss map[string][]string) { + if val, ok := c.Get(key); ok && val != nil { + smss, _ = val.(map[string][]string) + } + return +} + +/************************************/ +/************ INPUT DATA ************/ +/************************************/ + +// Param returns the value of the URL param. +// It is a shortcut for c.Params.ByName(key) +// router.GET("/user/:id", func(c *gin.Context) { +// // a GET request to /user/john +// id := c.Param("id") // id == "john" +// }) +func (c *Context) Param(key string) string { + return c.Params.ByName(key) +} + +// Query returns the keyed url query value if it exists, +// otherwise it returns an empty string `("")`. +// It is shortcut for `c.Request.URL.Query().Get(key)` +// GET /path?id=1234&name=Manu&value= +// c.Query("id") == "1234" +// c.Query("name") == "Manu" +// c.Query("value") == "" +// c.Query("wtf") == "" +func (c *Context) Query(key string) string { + value, _ := c.GetQuery(key) + return value +} + +// DefaultQuery returns the keyed url query value if it exists, +// otherwise it returns the specified defaultValue string. +// See: Query() and GetQuery() for further information. +// GET /?name=Manu&lastname= +// c.DefaultQuery("name", "unknown") == "Manu" +// c.DefaultQuery("id", "none") == "none" +// c.DefaultQuery("lastname", "none") == "" +func (c *Context) DefaultQuery(key, defaultValue string) string { + if value, ok := c.GetQuery(key); ok { + return value + } + return defaultValue +} + +// GetQuery is like Query(), it returns the keyed url query value +// if it exists `(value, true)` (even when the value is an empty string), +// otherwise it returns `("", false)`. +// It is shortcut for `c.Request.URL.Query().Get(key)` +// GET /?name=Manu&lastname= +// ("Manu", true) == c.GetQuery("name") +// ("", false) == c.GetQuery("id") +// ("", true) == c.GetQuery("lastname") +func (c *Context) GetQuery(key string) (string, bool) { + if values, ok := c.GetQueryArray(key); ok { + return values[0], ok + } + return "", false +} + +// QueryArray returns a slice of strings for a given query key. +// The length of the slice depends on the number of params with the given key. +func (c *Context) QueryArray(key string) []string { + values, _ := c.GetQueryArray(key) + return values +} + +func (c *Context) initQueryCache() { + if c.queryCache == nil { + if c.Request != nil { + c.queryCache = c.Request.URL.Query() + } else { + c.queryCache = url.Values{} + } + } +} + +// GetQueryArray returns a slice of strings for a given query key, plus +// a boolean value whether at least one value exists for the given key. +func (c *Context) GetQueryArray(key string) ([]string, bool) { + c.initQueryCache() + if values, ok := c.queryCache[key]; ok && len(values) > 0 { + return values, true + } + return []string{}, false +} + +// QueryMap returns a map for a given query key. +func (c *Context) QueryMap(key string) map[string]string { + dicts, _ := c.GetQueryMap(key) + return dicts +} + +// GetQueryMap returns a map for a given query key, plus a boolean value +// whether at least one value exists for the given key. +func (c *Context) GetQueryMap(key string) (map[string]string, bool) { + c.initQueryCache() + return c.get(c.queryCache, key) +} + +// PostForm returns the specified key from a POST urlencoded form or multipart form +// when it exists, otherwise it returns an empty string `("")`. +func (c *Context) PostForm(key string) string { + value, _ := c.GetPostForm(key) + return value +} + +// DefaultPostForm returns the specified key from a POST urlencoded form or multipart form +// when it exists, otherwise it returns the specified defaultValue string. +// See: PostForm() and GetPostForm() for further information. +func (c *Context) DefaultPostForm(key, defaultValue string) string { + if value, ok := c.GetPostForm(key); ok { + return value + } + return defaultValue +} + +// GetPostForm is like PostForm(key). It returns the specified key from a POST urlencoded +// form or multipart form when it exists `(value, true)` (even when the value is an empty string), +// otherwise it returns ("", false). +// For example, during a PATCH request to update the user's email: +// email=mail@example.com --> ("mail@example.com", true) := GetPostForm("email") // set email to "mail@example.com" +// email= --> ("", true) := GetPostForm("email") // set email to "" +// --> ("", false) := GetPostForm("email") // do nothing with email +func (c *Context) GetPostForm(key string) (string, bool) { + if values, ok := c.GetPostFormArray(key); ok { + return values[0], ok + } + return "", false +} + +// PostFormArray returns a slice of strings for a given form key. +// The length of the slice depends on the number of params with the given key. +func (c *Context) PostFormArray(key string) []string { + values, _ := c.GetPostFormArray(key) + return values +} + +func (c *Context) initFormCache() { + if c.formCache == nil { + c.formCache = make(url.Values) + req := c.Request + if err := req.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + if err != http.ErrNotMultipart { + debugPrint("error on parse multipart form array: %v", err) + } + } + c.formCache = req.PostForm + } +} + +// GetPostFormArray returns a slice of strings for a given form key, plus +// a boolean value whether at least one value exists for the given key. +func (c *Context) GetPostFormArray(key string) ([]string, bool) { + c.initFormCache() + if values := c.formCache[key]; len(values) > 0 { + return values, true + } + return []string{}, false +} + +// PostFormMap returns a map for a given form key. +func (c *Context) PostFormMap(key string) map[string]string { + dicts, _ := c.GetPostFormMap(key) + return dicts +} + +// GetPostFormMap returns a map for a given form key, plus a boolean value +// whether at least one value exists for the given key. +func (c *Context) GetPostFormMap(key string) (map[string]string, bool) { + c.initFormCache() + return c.get(c.formCache, key) +} + +// get is an internal method and returns a map which satisfy conditions. +func (c *Context) get(m map[string][]string, key string) (map[string]string, bool) { + dicts := make(map[string]string) + exist := false + for k, v := range m { + if i := strings.IndexByte(k, '['); i >= 1 && k[0:i] == key { + if j := strings.IndexByte(k[i+1:], ']'); j >= 1 { + exist = true + dicts[k[i+1:][:j]] = v[0] + } + } + } + return dicts, exist +} + +// FormFile returns the first file for the provided form key. +func (c *Context) FormFile(name string) (*multipart.FileHeader, error) { + if c.Request.MultipartForm == nil { + if err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory); err != nil { + return nil, err + } + } + f, fh, err := c.Request.FormFile(name) + if err != nil { + return nil, err + } + f.Close() + return fh, err +} + +// MultipartForm is the parsed multipart form, including file uploads. +func (c *Context) MultipartForm() (*multipart.Form, error) { + err := c.Request.ParseMultipartForm(c.engine.MaxMultipartMemory) + return c.Request.MultipartForm, err +} + +// SaveUploadedFile uploads the form file to specific dst. +func (c *Context) SaveUploadedFile(file *multipart.FileHeader, dst string) error { + src, err := file.Open() + if err != nil { + return err + } + defer src.Close() + + out, err := os.Create(dst) + if err != nil { + return err + } + defer out.Close() + + _, err = io.Copy(out, src) + return err +} + +// Bind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error. +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// It writes a 400 error and sets Content-Type header "text/plain" in the response if input is not valid. +func (c *Context) Bind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.MustBindWith(obj, b) +} + +// BindJSON is a shortcut for c.MustBindWith(obj, binding.JSON). +func (c *Context) BindJSON(obj interface{}) error { + return c.MustBindWith(obj, binding.JSON) +} + +// BindXML is a shortcut for c.MustBindWith(obj, binding.BindXML). +func (c *Context) BindXML(obj interface{}) error { + return c.MustBindWith(obj, binding.XML) +} + +// BindQuery is a shortcut for c.MustBindWith(obj, binding.Query). +func (c *Context) BindQuery(obj interface{}) error { + return c.MustBindWith(obj, binding.Query) +} + +// BindYAML is a shortcut for c.MustBindWith(obj, binding.YAML). +func (c *Context) BindYAML(obj interface{}) error { + return c.MustBindWith(obj, binding.YAML) +} + +// BindHeader is a shortcut for c.MustBindWith(obj, binding.Header). +func (c *Context) BindHeader(obj interface{}) error { + return c.MustBindWith(obj, binding.Header) +} + +// BindUri binds the passed struct pointer using binding.Uri. +// It will abort the request with HTTP 400 if any error occurs. +func (c *Context) BindUri(obj interface{}) error { + if err := c.ShouldBindUri(obj); err != nil { + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + return err + } + return nil +} + +// MustBindWith binds the passed struct pointer using the specified binding engine. +// It will abort the request with HTTP 400 if any error occurs. +// See the binding package. +func (c *Context) MustBindWith(obj interface{}, b binding.Binding) error { + if err := c.ShouldBindWith(obj, b); err != nil { + c.AbortWithError(http.StatusBadRequest, err).SetType(ErrorTypeBind) // nolint: errcheck + return err + } + return nil +} + +// ShouldBind checks the Content-Type to select a binding engine automatically, +// Depending the "Content-Type" header different bindings are used: +// "application/json" --> JSON binding +// "application/xml" --> XML binding +// otherwise --> returns an error +// It parses the request's body as JSON if Content-Type == "application/json" using JSON or XML as a JSON input. +// It decodes the json payload into the struct specified as a pointer. +// Like c.Bind() but this method does not set the response status code to 400 and abort if the json is not valid. +func (c *Context) ShouldBind(obj interface{}) error { + b := binding.Default(c.Request.Method, c.ContentType()) + return c.ShouldBindWith(obj, b) +} + +// ShouldBindJSON is a shortcut for c.ShouldBindWith(obj, binding.JSON). +func (c *Context) ShouldBindJSON(obj interface{}) error { + return c.ShouldBindWith(obj, binding.JSON) +} + +// ShouldBindXML is a shortcut for c.ShouldBindWith(obj, binding.XML). +func (c *Context) ShouldBindXML(obj interface{}) error { + return c.ShouldBindWith(obj, binding.XML) +} + +// ShouldBindQuery is a shortcut for c.ShouldBindWith(obj, binding.Query). +func (c *Context) ShouldBindQuery(obj interface{}) error { + return c.ShouldBindWith(obj, binding.Query) +} + +// ShouldBindYAML is a shortcut for c.ShouldBindWith(obj, binding.YAML). +func (c *Context) ShouldBindYAML(obj interface{}) error { + return c.ShouldBindWith(obj, binding.YAML) +} + +// ShouldBindHeader is a shortcut for c.ShouldBindWith(obj, binding.Header). +func (c *Context) ShouldBindHeader(obj interface{}) error { + return c.ShouldBindWith(obj, binding.Header) +} + +// ShouldBindUri binds the passed struct pointer using the specified binding engine. +func (c *Context) ShouldBindUri(obj interface{}) error { + m := make(map[string][]string) + for _, v := range c.Params { + m[v.Key] = []string{v.Value} + } + return binding.Uri.BindUri(m, obj) +} + +// ShouldBindWith binds the passed struct pointer using the specified binding engine. +// See the binding package. +func (c *Context) ShouldBindWith(obj interface{}, b binding.Binding) error { + return b.Bind(c.Request, obj) +} + +// ShouldBindBodyWith is similar with ShouldBindWith, but it stores the request +// body into the context, and reuse when it is called again. +// +// NOTE: This method reads the body before binding. So you should use +// ShouldBindWith for better performance if you need to call only once. +func (c *Context) ShouldBindBodyWith(obj interface{}, bb binding.BindingBody) (err error) { + var body []byte + if cb, ok := c.Get(BodyBytesKey); ok { + if cbb, ok := cb.([]byte); ok { + body = cbb + } + } + if body == nil { + body, err = ioutil.ReadAll(c.Request.Body) + if err != nil { + return err + } + c.Set(BodyBytesKey, body) + } + return bb.BindBody(body, obj) +} + +// ClientIP implements a best effort algorithm to return the real client IP. +// It called c.RemoteIP() under the hood, to check if the remote IP is a trusted proxy or not. +// If it's it will then try to parse the headers defined in Engine.RemoteIPHeaders (defaulting to [X-Forwarded-For, X-Real-Ip]). +// If the headers are nots syntactically valid OR the remote IP does not correspong to a trusted proxy, +// the remote IP (coming form Request.RemoteAddr) is returned. +func (c *Context) ClientIP() string { + if c.engine.AppEngine { + if addr := c.requestHeader("X-Appengine-Remote-Addr"); addr != "" { + return addr + } + } + + remoteIP, trusted := c.RemoteIP() + if remoteIP == nil { + return "" + } + + if trusted && c.engine.ForwardedByClientIP && c.engine.RemoteIPHeaders != nil { + for _, headerName := range c.engine.RemoteIPHeaders { + ip, valid := validateHeader(c.requestHeader(headerName)) + if valid { + return ip + } + } + } + return remoteIP.String() +} + +// RemoteIP parses the IP from Request.RemoteAddr, normalizes and returns the IP (without the port). +// It also checks if the remoteIP is a trusted proxy or not. +// In order to perform this validation, it will see if the IP is contained within at least one of the CIDR blocks +// defined in Engine.TrustedProxies +func (c *Context) RemoteIP() (net.IP, bool) { + ip, _, err := net.SplitHostPort(strings.TrimSpace(c.Request.RemoteAddr)) + if err != nil { + return nil, false + } + remoteIP := net.ParseIP(ip) + if remoteIP == nil { + return nil, false + } + + if c.engine.trustedCIDRs != nil { + for _, cidr := range c.engine.trustedCIDRs { + if cidr.Contains(remoteIP) { + return remoteIP, true + } + } + } + + return remoteIP, false +} + +func validateHeader(header string) (clientIP string, valid bool) { + if header == "" { + return "", false + } + items := strings.Split(header, ",") + for i, ipStr := range items { + ipStr = strings.TrimSpace(ipStr) + ip := net.ParseIP(ipStr) + if ip == nil { + return "", false + } + + // We need to return the first IP in the list, but, + // we should not early return since we need to validate that + // the rest of the header is syntactically valid + if i == 0 { + clientIP = ipStr + valid = true + } + } + return +} + +// ContentType returns the Content-Type header of the request. +func (c *Context) ContentType() string { + return filterFlags(c.requestHeader("Content-Type")) +} + +// IsWebsocket returns true if the request headers indicate that a websocket +// handshake is being initiated by the client. +func (c *Context) IsWebsocket() bool { + if strings.Contains(strings.ToLower(c.requestHeader("Connection")), "upgrade") && + strings.EqualFold(c.requestHeader("Upgrade"), "websocket") { + return true + } + return false +} + +func (c *Context) requestHeader(key string) string { + return c.Request.Header.Get(key) +} + +/************************************/ +/******** RESPONSE RENDERING ********/ +/************************************/ + +// bodyAllowedForStatus is a copy of http.bodyAllowedForStatus non-exported function. +func bodyAllowedForStatus(status int) bool { + switch { + case status >= 100 && status <= 199: + return false + case status == http.StatusNoContent: + return false + case status == http.StatusNotModified: + return false + } + return true +} + +// Status sets the HTTP response code. +func (c *Context) Status(code int) { + c.Writer.WriteHeader(code) +} + +// Header is a intelligent shortcut for c.Writer.Header().Set(key, value). +// It writes a header in the response. +// If value == "", this method removes the header `c.Writer.Header().Del(key)` +func (c *Context) Header(key, value string) { + if value == "" { + c.Writer.Header().Del(key) + return + } + c.Writer.Header().Set(key, value) +} + +// GetHeader returns value from request headers. +func (c *Context) GetHeader(key string) string { + return c.requestHeader(key) +} + +// GetRawData return stream data. +func (c *Context) GetRawData() ([]byte, error) { + return ioutil.ReadAll(c.Request.Body) +} + +// SetSameSite with cookie +func (c *Context) SetSameSite(samesite http.SameSite) { + c.sameSite = samesite +} + +// SetCookie adds a Set-Cookie header to the ResponseWriter's headers. +// The provided cookie must have a valid Name. Invalid cookies may be +// silently dropped. +func (c *Context) SetCookie(name, value string, maxAge int, path, domain string, secure, httpOnly bool) { + if path == "" { + path = "/" + } + http.SetCookie(c.Writer, &http.Cookie{ + Name: name, + Value: url.QueryEscape(value), + MaxAge: maxAge, + Path: path, + Domain: domain, + SameSite: c.sameSite, + Secure: secure, + HttpOnly: httpOnly, + }) +} + +// Cookie returns the named cookie provided in the request or +// ErrNoCookie if not found. And return the named cookie is unescaped. +// If multiple cookies match the given name, only one cookie will +// be returned. +func (c *Context) Cookie(name string) (string, error) { + cookie, err := c.Request.Cookie(name) + if err != nil { + return "", err + } + val, _ := url.QueryUnescape(cookie.Value) + return val, nil +} + +// Render writes the response headers and calls render.Render to render data. +func (c *Context) Render(code int, r render.Render) { + c.Status(code) + + if !bodyAllowedForStatus(code) { + r.WriteContentType(c.Writer) + c.Writer.WriteHeaderNow() + return + } + + if err := r.Render(c.Writer); err != nil { + panic(err) + } +} + +// HTML renders the HTTP template specified by its file name. +// It also updates the HTTP code and sets the Content-Type as "text/html". +// See http://golang.org/doc/articles/wiki/ +func (c *Context) HTML(code int, name string, obj interface{}) { + instance := c.engine.HTMLRender.Instance(name, obj) + c.Render(code, instance) +} + +// IndentedJSON serializes the given struct as pretty JSON (indented + endlines) into the response body. +// It also sets the Content-Type as "application/json". +// WARNING: we recommend to use this only for development purposes since printing pretty JSON is +// more CPU and bandwidth consuming. Use Context.JSON() instead. +func (c *Context) IndentedJSON(code int, obj interface{}) { + c.Render(code, render.IndentedJSON{Data: obj}) +} + +// SecureJSON serializes the given struct as Secure JSON into the response body. +// Default prepends "while(1)," to response body if the given struct is array values. +// It also sets the Content-Type as "application/json". +func (c *Context) SecureJSON(code int, obj interface{}) { + c.Render(code, render.SecureJSON{Prefix: c.engine.secureJSONPrefix, Data: obj}) +} + +// JSONP serializes the given struct as JSON into the response body. +// It adds padding to response body to request data from a server residing in a different domain than the client. +// It also sets the Content-Type as "application/javascript". +func (c *Context) JSONP(code int, obj interface{}) { + callback := c.DefaultQuery("callback", "") + if callback == "" { + c.Render(code, render.JSON{Data: obj}) + return + } + c.Render(code, render.JsonpJSON{Callback: callback, Data: obj}) +} + +// JSON serializes the given struct as JSON into the response body. +// It also sets the Content-Type as "application/json". +func (c *Context) JSON(code int, obj interface{}) { + c.Render(code, render.JSON{Data: obj}) +} + +// AsciiJSON serializes the given struct as JSON into the response body with unicode to ASCII string. +// It also sets the Content-Type as "application/json". +func (c *Context) AsciiJSON(code int, obj interface{}) { + c.Render(code, render.AsciiJSON{Data: obj}) +} + +// PureJSON serializes the given struct as JSON into the response body. +// PureJSON, unlike JSON, does not replace special html characters with their unicode entities. +func (c *Context) PureJSON(code int, obj interface{}) { + c.Render(code, render.PureJSON{Data: obj}) +} + +// XML serializes the given struct as XML into the response body. +// It also sets the Content-Type as "application/xml". +func (c *Context) XML(code int, obj interface{}) { + c.Render(code, render.XML{Data: obj}) +} + +// YAML serializes the given struct as YAML into the response body. +func (c *Context) YAML(code int, obj interface{}) { + c.Render(code, render.YAML{Data: obj}) +} + +// ProtoBuf serializes the given struct as ProtoBuf into the response body. +func (c *Context) ProtoBuf(code int, obj interface{}) { + c.Render(code, render.ProtoBuf{Data: obj}) +} + +// String writes the given string into the response body. +func (c *Context) String(code int, format string, values ...interface{}) { + c.Render(code, render.String{Format: format, Data: values}) +} + +// Redirect returns a HTTP redirect to the specific location. +func (c *Context) Redirect(code int, location string) { + c.Render(-1, render.Redirect{ + Code: code, + Location: location, + Request: c.Request, + }) +} + +// Data writes some data into the body stream and updates the HTTP code. +func (c *Context) Data(code int, contentType string, data []byte) { + c.Render(code, render.Data{ + ContentType: contentType, + Data: data, + }) +} + +// DataFromReader writes the specified reader into the body stream and updates the HTTP code. +func (c *Context) DataFromReader(code int, contentLength int64, contentType string, reader io.Reader, extraHeaders map[string]string) { + c.Render(code, render.Reader{ + Headers: extraHeaders, + ContentType: contentType, + ContentLength: contentLength, + Reader: reader, + }) +} + +// File writes the specified file into the body stream in an efficient way. +func (c *Context) File(filepath string) { + http.ServeFile(c.Writer, c.Request, filepath) +} + +// FileFromFS writes the specified file from http.FileSystem into the body stream in an efficient way. +func (c *Context) FileFromFS(filepath string, fs http.FileSystem) { + defer func(old string) { + c.Request.URL.Path = old + }(c.Request.URL.Path) + + c.Request.URL.Path = filepath + + http.FileServer(fs).ServeHTTP(c.Writer, c.Request) +} + +// FileAttachment writes the specified file into the body stream in an efficient way +// On the client side, the file will typically be downloaded with the given filename +func (c *Context) FileAttachment(filepath, filename string) { + c.Writer.Header().Set("Content-Disposition", fmt.Sprintf("attachment; filename=\"%s\"", filename)) + http.ServeFile(c.Writer, c.Request, filepath) +} + +// SSEvent writes a Server-Sent Event into the body stream. +func (c *Context) SSEvent(name string, message interface{}) { + c.Render(-1, sse.Event{ + Event: name, + Data: message, + }) +} + +// Stream sends a streaming response and returns a boolean +// indicates "Is client disconnected in middle of stream" +func (c *Context) Stream(step func(w io.Writer) bool) bool { + w := c.Writer + clientGone := w.CloseNotify() + for { + select { + case <-clientGone: + return true + default: + keepOpen := step(w) + w.Flush() + if !keepOpen { + return false + } + } + } +} + +/************************************/ +/******** CONTENT NEGOTIATION *******/ +/************************************/ + +// Negotiate contains all negotiations data. +type Negotiate struct { + Offered []string + HTMLName string + HTMLData interface{} + JSONData interface{} + XMLData interface{} + YAMLData interface{} + Data interface{} +} + +// Negotiate calls different Render according acceptable Accept format. +func (c *Context) Negotiate(code int, config Negotiate) { + switch c.NegotiateFormat(config.Offered...) { + case binding.MIMEJSON: + data := chooseData(config.JSONData, config.Data) + c.JSON(code, data) + + case binding.MIMEHTML: + data := chooseData(config.HTMLData, config.Data) + c.HTML(code, config.HTMLName, data) + + case binding.MIMEXML: + data := chooseData(config.XMLData, config.Data) + c.XML(code, data) + + case binding.MIMEYAML: + data := chooseData(config.YAMLData, config.Data) + c.YAML(code, data) + + default: + c.AbortWithError(http.StatusNotAcceptable, errors.New("the accepted formats are not offered by the server")) // nolint: errcheck + } +} + +// NegotiateFormat returns an acceptable Accept format. +func (c *Context) NegotiateFormat(offered ...string) string { + assert1(len(offered) > 0, "you must provide at least one offer") + + if c.Accepted == nil { + c.Accepted = parseAccept(c.requestHeader("Accept")) + } + if len(c.Accepted) == 0 { + return offered[0] + } + for _, accepted := range c.Accepted { + for _, offer := range offered { + // According to RFC 2616 and RFC 2396, non-ASCII characters are not allowed in headers, + // therefore we can just iterate over the string without casting it into []rune + i := 0 + for ; i < len(accepted); i++ { + if accepted[i] == '*' || offer[i] == '*' { + return offer + } + if accepted[i] != offer[i] { + break + } + } + if i == len(accepted) { + return offer + } + } + } + return "" +} + +// SetAccepted sets Accept header data. +func (c *Context) SetAccepted(formats ...string) { + c.Accepted = formats +} + +/************************************/ +/***** GOLANG.ORG/X/NET/CONTEXT *****/ +/************************************/ + +// Deadline always returns that there is no deadline (ok==false), +// maybe you want to use Request.Context().Deadline() instead. +func (c *Context) Deadline() (deadline time.Time, ok bool) { + return +} + +// Done always returns nil (chan which will wait forever), +// if you want to abort your work when the connection was closed +// you should use Request.Context().Done() instead. +func (c *Context) Done() <-chan struct{} { + return nil +} + +// Err always returns nil, maybe you want to use Request.Context().Err() instead. +func (c *Context) Err() error { + return nil +} + +// Value returns the value associated with this context for key, or nil +// if no value is associated with key. Successive calls to Value with +// the same key returns the same result. +func (c *Context) Value(key interface{}) interface{} { + if key == 0 { + return c.Request + } + if keyAsString, ok := key.(string); ok { + val, _ := c.Get(keyAsString) + return val + } + return nil +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/context_appengine.go b/terraform-server/vendor/github.com/gin-gonic/gin/context_appengine.go new file mode 100644 index 00000000..d5658434 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/context_appengine.go @@ -0,0 +1,12 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build appengine +// +build appengine + +package gin + +func init() { + defaultAppEngine = true +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/debug.go b/terraform-server/vendor/github.com/gin-gonic/gin/debug.go new file mode 100644 index 00000000..4c7cd0c3 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/debug.go @@ -0,0 +1,103 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "html/template" + "runtime" + "strconv" + "strings" +) + +const ginSupportMinGoVer = 12 + +// IsDebugging returns true if the framework is running in debug mode. +// Use SetMode(gin.ReleaseMode) to disable debug mode. +func IsDebugging() bool { + return ginMode == debugCode +} + +// DebugPrintRouteFunc indicates debug log output format. +var DebugPrintRouteFunc func(httpMethod, absolutePath, handlerName string, nuHandlers int) + +func debugPrintRoute(httpMethod, absolutePath string, handlers HandlersChain) { + if IsDebugging() { + nuHandlers := len(handlers) + handlerName := nameOfFunction(handlers.Last()) + if DebugPrintRouteFunc == nil { + debugPrint("%-6s %-25s --> %s (%d handlers)\n", httpMethod, absolutePath, handlerName, nuHandlers) + } else { + DebugPrintRouteFunc(httpMethod, absolutePath, handlerName, nuHandlers) + } + } +} + +func debugPrintLoadTemplate(tmpl *template.Template) { + if IsDebugging() { + var buf strings.Builder + for _, tmpl := range tmpl.Templates() { + buf.WriteString("\t- ") + buf.WriteString(tmpl.Name()) + buf.WriteString("\n") + } + debugPrint("Loaded HTML Templates (%d): \n%s\n", len(tmpl.Templates()), buf.String()) + } +} + +func debugPrint(format string, values ...interface{}) { + if IsDebugging() { + if !strings.HasSuffix(format, "\n") { + format += "\n" + } + fmt.Fprintf(DefaultWriter, "[GIN-debug] "+format, values...) + } +} + +func getMinVer(v string) (uint64, error) { + first := strings.IndexByte(v, '.') + last := strings.LastIndexByte(v, '.') + if first == last { + return strconv.ParseUint(v[first+1:], 10, 64) + } + return strconv.ParseUint(v[first+1:last], 10, 64) +} + +func debugPrintWARNINGDefault() { + if v, e := getMinVer(runtime.Version()); e == nil && v <= ginSupportMinGoVer { + debugPrint(`[WARNING] Now Gin requires Go 1.12+. + +`) + } + debugPrint(`[WARNING] Creating an Engine instance with the Logger and Recovery middleware already attached. + +`) +} + +func debugPrintWARNINGNew() { + debugPrint(`[WARNING] Running in "debug" mode. Switch to "release" mode in production. + - using env: export GIN_MODE=release + - using code: gin.SetMode(gin.ReleaseMode) + +`) +} + +func debugPrintWARNINGSetHTMLTemplate() { + debugPrint(`[WARNING] Since SetHTMLTemplate() is NOT thread-safe. It should only be called +at initialization. ie. before any route is registered or the router is listening in a socket: + + router := gin.Default() + router.SetHTMLTemplate(template) // << good place + +`) +} + +func debugPrintError(err error) { + if err != nil { + if IsDebugging() { + fmt.Fprintf(DefaultErrorWriter, "[GIN-debug] [ERROR] %v\n", err) + } + } +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/deprecated.go b/terraform-server/vendor/github.com/gin-gonic/gin/deprecated.go new file mode 100644 index 00000000..ab447429 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/deprecated.go @@ -0,0 +1,21 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "log" + + "github.com/gin-gonic/gin/binding" +) + +// BindWith binds the passed struct pointer using the specified binding engine. +// See the binding package. +func (c *Context) BindWith(obj interface{}, b binding.Binding) error { + log.Println(`BindWith(\"interface{}, binding.Binding\") error is going to + be deprecated, please check issue #662 and either use MustBindWith() if you + want HTTP 400 to be automatically returned if any error occur, or use + ShouldBindWith() if you need to manage the error.`) + return c.MustBindWith(obj, b) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/doc.go b/terraform-server/vendor/github.com/gin-gonic/gin/doc.go new file mode 100644 index 00000000..1bd03864 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/doc.go @@ -0,0 +1,6 @@ +/* +Package gin implements a HTTP web framework called gin. + +See https://gin-gonic.com/ for more information about gin. +*/ +package gin // import "github.com/gin-gonic/gin" diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/errors.go b/terraform-server/vendor/github.com/gin-gonic/gin/errors.go new file mode 100644 index 00000000..0f276c13 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/errors.go @@ -0,0 +1,174 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gin-gonic/gin/internal/json" +) + +// ErrorType is an unsigned 64-bit error code as defined in the gin spec. +type ErrorType uint64 + +const ( + // ErrorTypeBind is used when Context.Bind() fails. + ErrorTypeBind ErrorType = 1 << 63 + // ErrorTypeRender is used when Context.Render() fails. + ErrorTypeRender ErrorType = 1 << 62 + // ErrorTypePrivate indicates a private error. + ErrorTypePrivate ErrorType = 1 << 0 + // ErrorTypePublic indicates a public error. + ErrorTypePublic ErrorType = 1 << 1 + // ErrorTypeAny indicates any other error. + ErrorTypeAny ErrorType = 1<<64 - 1 + // ErrorTypeNu indicates any other error. + ErrorTypeNu = 2 +) + +// Error represents a error's specification. +type Error struct { + Err error + Type ErrorType + Meta interface{} +} + +type errorMsgs []*Error + +var _ error = &Error{} + +// SetType sets the error's type. +func (msg *Error) SetType(flags ErrorType) *Error { + msg.Type = flags + return msg +} + +// SetMeta sets the error's meta data. +func (msg *Error) SetMeta(data interface{}) *Error { + msg.Meta = data + return msg +} + +// JSON creates a properly formatted JSON +func (msg *Error) JSON() interface{} { + jsonData := H{} + if msg.Meta != nil { + value := reflect.ValueOf(msg.Meta) + switch value.Kind() { + case reflect.Struct: + return msg.Meta + case reflect.Map: + for _, key := range value.MapKeys() { + jsonData[key.String()] = value.MapIndex(key).Interface() + } + default: + jsonData["meta"] = msg.Meta + } + } + if _, ok := jsonData["error"]; !ok { + jsonData["error"] = msg.Error() + } + return jsonData +} + +// MarshalJSON implements the json.Marshaller interface. +func (msg *Error) MarshalJSON() ([]byte, error) { + return json.Marshal(msg.JSON()) +} + +// Error implements the error interface. +func (msg Error) Error() string { + return msg.Err.Error() +} + +// IsType judges one error. +func (msg *Error) IsType(flags ErrorType) bool { + return (msg.Type & flags) > 0 +} + +// Unwrap returns the wrapped error, to allow interoperability with errors.Is(), errors.As() and errors.Unwrap() +func (msg *Error) Unwrap() error { + return msg.Err +} + +// ByType returns a readonly copy filtered the byte. +// ie ByType(gin.ErrorTypePublic) returns a slice of errors with type=ErrorTypePublic. +func (a errorMsgs) ByType(typ ErrorType) errorMsgs { + if len(a) == 0 { + return nil + } + if typ == ErrorTypeAny { + return a + } + var result errorMsgs + for _, msg := range a { + if msg.IsType(typ) { + result = append(result, msg) + } + } + return result +} + +// Last returns the last error in the slice. It returns nil if the array is empty. +// Shortcut for errors[len(errors)-1]. +func (a errorMsgs) Last() *Error { + if length := len(a); length > 0 { + return a[length-1] + } + return nil +} + +// Errors returns an array will all the error messages. +// Example: +// c.Error(errors.New("first")) +// c.Error(errors.New("second")) +// c.Error(errors.New("third")) +// c.Errors.Errors() // == []string{"first", "second", "third"} +func (a errorMsgs) Errors() []string { + if len(a) == 0 { + return nil + } + errorStrings := make([]string, len(a)) + for i, err := range a { + errorStrings[i] = err.Error() + } + return errorStrings +} + +func (a errorMsgs) JSON() interface{} { + switch length := len(a); length { + case 0: + return nil + case 1: + return a.Last().JSON() + default: + jsonData := make([]interface{}, length) + for i, err := range a { + jsonData[i] = err.JSON() + } + return jsonData + } +} + +// MarshalJSON implements the json.Marshaller interface. +func (a errorMsgs) MarshalJSON() ([]byte, error) { + return json.Marshal(a.JSON()) +} + +func (a errorMsgs) String() string { + if len(a) == 0 { + return "" + } + var buffer strings.Builder + for i, msg := range a { + fmt.Fprintf(&buffer, "Error #%02d: %s\n", i+1, msg.Err) + if msg.Meta != nil { + fmt.Fprintf(&buffer, " Meta: %v\n", msg.Meta) + } + } + return buffer.String() +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/fs.go b/terraform-server/vendor/github.com/gin-gonic/gin/fs.go new file mode 100644 index 00000000..007d9b75 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/fs.go @@ -0,0 +1,45 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "os" +) + +type onlyFilesFS struct { + fs http.FileSystem +} + +type neuteredReaddirFile struct { + http.File +} + +// Dir returns a http.Filesystem that can be used by http.FileServer(). It is used internally +// in router.Static(). +// if listDirectory == true, then it works the same as http.Dir() otherwise it returns +// a filesystem that prevents http.FileServer() to list the directory files. +func Dir(root string, listDirectory bool) http.FileSystem { + fs := http.Dir(root) + if listDirectory { + return fs + } + return &onlyFilesFS{fs} +} + +// Open conforms to http.Filesystem. +func (fs onlyFilesFS) Open(name string) (http.File, error) { + f, err := fs.fs.Open(name) + if err != nil { + return nil, err + } + return neuteredReaddirFile{f}, nil +} + +// Readdir overrides the http.File default implementation. +func (f neuteredReaddirFile) Readdir(count int) ([]os.FileInfo, error) { + // this disables directory listing + return nil, nil +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/gin.go b/terraform-server/vendor/github.com/gin-gonic/gin/gin.go new file mode 100644 index 00000000..03a0e127 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/gin.go @@ -0,0 +1,577 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "html/template" + "net" + "net/http" + "os" + "path" + "strings" + "sync" + + "github.com/gin-gonic/gin/internal/bytesconv" + "github.com/gin-gonic/gin/render" +) + +const defaultMultipartMemory = 32 << 20 // 32 MB + +var ( + default404Body = []byte("404 page not found") + default405Body = []byte("405 method not allowed") +) + +var defaultAppEngine bool + +// HandlerFunc defines the handler used by gin middleware as return value. +type HandlerFunc func(*Context) + +// HandlersChain defines a HandlerFunc array. +type HandlersChain []HandlerFunc + +// Last returns the last handler in the chain. ie. the last handler is the main one. +func (c HandlersChain) Last() HandlerFunc { + if length := len(c); length > 0 { + return c[length-1] + } + return nil +} + +// RouteInfo represents a request route's specification which contains method and path and its handler. +type RouteInfo struct { + Method string + Path string + Handler string + HandlerFunc HandlerFunc +} + +// RoutesInfo defines a RouteInfo array. +type RoutesInfo []RouteInfo + +// Engine is the framework's instance, it contains the muxer, middleware and configuration settings. +// Create an instance of Engine, by using New() or Default() +type Engine struct { + RouterGroup + + // Enables automatic redirection if the current route can't be matched but a + // handler for the path with (without) the trailing slash exists. + // For example if /foo/ is requested but a route only exists for /foo, the + // client is redirected to /foo with http status code 301 for GET requests + // and 307 for all other request methods. + RedirectTrailingSlash bool + + // If enabled, the router tries to fix the current request path, if no + // handle is registered for it. + // First superfluous path elements like ../ or // are removed. + // Afterwards the router does a case-insensitive lookup of the cleaned path. + // If a handle can be found for this route, the router makes a redirection + // to the corrected path with status code 301 for GET requests and 307 for + // all other request methods. + // For example /FOO and /..//Foo could be redirected to /foo. + // RedirectTrailingSlash is independent of this option. + RedirectFixedPath bool + + // If enabled, the router checks if another method is allowed for the + // current route, if the current request can not be routed. + // If this is the case, the request is answered with 'Method Not Allowed' + // and HTTP status code 405. + // If no other Method is allowed, the request is delegated to the NotFound + // handler. + HandleMethodNotAllowed bool + + // If enabled, client IP will be parsed from the request's headers that + // match those stored at `(*gin.Engine).RemoteIPHeaders`. If no IP was + // fetched, it falls back to the IP obtained from + // `(*gin.Context).Request.RemoteAddr`. + ForwardedByClientIP bool + + // List of headers used to obtain the client IP when + // `(*gin.Engine).ForwardedByClientIP` is `true` and + // `(*gin.Context).Request.RemoteAddr` is matched by at least one of the + // network origins of `(*gin.Engine).TrustedProxies`. + RemoteIPHeaders []string + + // List of network origins (IPv4 addresses, IPv4 CIDRs, IPv6 addresses or + // IPv6 CIDRs) from which to trust request's headers that contain + // alternative client IP when `(*gin.Engine).ForwardedByClientIP` is + // `true`. + TrustedProxies []string + + // #726 #755 If enabled, it will trust some headers starting with + // 'X-AppEngine...' for better integration with that PaaS. + AppEngine bool + + // If enabled, the url.RawPath will be used to find parameters. + UseRawPath bool + + // If true, the path value will be unescaped. + // If UseRawPath is false (by default), the UnescapePathValues effectively is true, + // as url.Path gonna be used, which is already unescaped. + UnescapePathValues bool + + // Value of 'maxMemory' param that is given to http.Request's ParseMultipartForm + // method call. + MaxMultipartMemory int64 + + // RemoveExtraSlash a parameter can be parsed from the URL even with extra slashes. + // See the PR #1817 and issue #1644 + RemoveExtraSlash bool + + delims render.Delims + secureJSONPrefix string + HTMLRender render.HTMLRender + FuncMap template.FuncMap + allNoRoute HandlersChain + allNoMethod HandlersChain + noRoute HandlersChain + noMethod HandlersChain + pool sync.Pool + trees methodTrees + maxParams uint16 + trustedCIDRs []*net.IPNet +} + +var _ IRouter = &Engine{} + +// New returns a new blank Engine instance without any middleware attached. +// By default the configuration is: +// - RedirectTrailingSlash: true +// - RedirectFixedPath: false +// - HandleMethodNotAllowed: false +// - ForwardedByClientIP: true +// - UseRawPath: false +// - UnescapePathValues: true +func New() *Engine { + debugPrintWARNINGNew() + engine := &Engine{ + RouterGroup: RouterGroup{ + Handlers: nil, + basePath: "/", + root: true, + }, + FuncMap: template.FuncMap{}, + RedirectTrailingSlash: true, + RedirectFixedPath: false, + HandleMethodNotAllowed: false, + ForwardedByClientIP: true, + RemoteIPHeaders: []string{"X-Forwarded-For", "X-Real-IP"}, + TrustedProxies: []string{"0.0.0.0/0"}, + AppEngine: defaultAppEngine, + UseRawPath: false, + RemoveExtraSlash: false, + UnescapePathValues: true, + MaxMultipartMemory: defaultMultipartMemory, + trees: make(methodTrees, 0, 9), + delims: render.Delims{Left: "{{", Right: "}}"}, + secureJSONPrefix: "while(1);", + } + engine.RouterGroup.engine = engine + engine.pool.New = func() interface{} { + return engine.allocateContext() + } + return engine +} + +// Default returns an Engine instance with the Logger and Recovery middleware already attached. +func Default() *Engine { + debugPrintWARNINGDefault() + engine := New() + engine.Use(Logger(), Recovery()) + return engine +} + +func (engine *Engine) allocateContext() *Context { + v := make(Params, 0, engine.maxParams) + return &Context{engine: engine, params: &v} +} + +// Delims sets template left and right delims and returns a Engine instance. +func (engine *Engine) Delims(left, right string) *Engine { + engine.delims = render.Delims{Left: left, Right: right} + return engine +} + +// SecureJsonPrefix sets the secureJSONPrefix used in Context.SecureJSON. +func (engine *Engine) SecureJsonPrefix(prefix string) *Engine { + engine.secureJSONPrefix = prefix + return engine +} + +// LoadHTMLGlob loads HTML files identified by glob pattern +// and associates the result with HTML renderer. +func (engine *Engine) LoadHTMLGlob(pattern string) { + left := engine.delims.Left + right := engine.delims.Right + templ := template.Must(template.New("").Delims(left, right).Funcs(engine.FuncMap).ParseGlob(pattern)) + + if IsDebugging() { + debugPrintLoadTemplate(templ) + engine.HTMLRender = render.HTMLDebug{Glob: pattern, FuncMap: engine.FuncMap, Delims: engine.delims} + return + } + + engine.SetHTMLTemplate(templ) +} + +// LoadHTMLFiles loads a slice of HTML files +// and associates the result with HTML renderer. +func (engine *Engine) LoadHTMLFiles(files ...string) { + if IsDebugging() { + engine.HTMLRender = render.HTMLDebug{Files: files, FuncMap: engine.FuncMap, Delims: engine.delims} + return + } + + templ := template.Must(template.New("").Delims(engine.delims.Left, engine.delims.Right).Funcs(engine.FuncMap).ParseFiles(files...)) + engine.SetHTMLTemplate(templ) +} + +// SetHTMLTemplate associate a template with HTML renderer. +func (engine *Engine) SetHTMLTemplate(templ *template.Template) { + if len(engine.trees) > 0 { + debugPrintWARNINGSetHTMLTemplate() + } + + engine.HTMLRender = render.HTMLProduction{Template: templ.Funcs(engine.FuncMap)} +} + +// SetFuncMap sets the FuncMap used for template.FuncMap. +func (engine *Engine) SetFuncMap(funcMap template.FuncMap) { + engine.FuncMap = funcMap +} + +// NoRoute adds handlers for NoRoute. It return a 404 code by default. +func (engine *Engine) NoRoute(handlers ...HandlerFunc) { + engine.noRoute = handlers + engine.rebuild404Handlers() +} + +// NoMethod sets the handlers called when... TODO. +func (engine *Engine) NoMethod(handlers ...HandlerFunc) { + engine.noMethod = handlers + engine.rebuild405Handlers() +} + +// Use attaches a global middleware to the router. ie. the middleware attached though Use() will be +// included in the handlers chain for every single request. Even 404, 405, static files... +// For example, this is the right place for a logger or error management middleware. +func (engine *Engine) Use(middleware ...HandlerFunc) IRoutes { + engine.RouterGroup.Use(middleware...) + engine.rebuild404Handlers() + engine.rebuild405Handlers() + return engine +} + +func (engine *Engine) rebuild404Handlers() { + engine.allNoRoute = engine.combineHandlers(engine.noRoute) +} + +func (engine *Engine) rebuild405Handlers() { + engine.allNoMethod = engine.combineHandlers(engine.noMethod) +} + +func (engine *Engine) addRoute(method, path string, handlers HandlersChain) { + assert1(path[0] == '/', "path must begin with '/'") + assert1(method != "", "HTTP method can not be empty") + assert1(len(handlers) > 0, "there must be at least one handler") + + debugPrintRoute(method, path, handlers) + + root := engine.trees.get(method) + if root == nil { + root = new(node) + root.fullPath = "/" + engine.trees = append(engine.trees, methodTree{method: method, root: root}) + } + root.addRoute(path, handlers) + + // Update maxParams + if paramsCount := countParams(path); paramsCount > engine.maxParams { + engine.maxParams = paramsCount + } +} + +// Routes returns a slice of registered routes, including some useful information, such as: +// the http method, path and the handler name. +func (engine *Engine) Routes() (routes RoutesInfo) { + for _, tree := range engine.trees { + routes = iterate("", tree.method, routes, tree.root) + } + return routes +} + +func iterate(path, method string, routes RoutesInfo, root *node) RoutesInfo { + path += root.path + if len(root.handlers) > 0 { + handlerFunc := root.handlers.Last() + routes = append(routes, RouteInfo{ + Method: method, + Path: path, + Handler: nameOfFunction(handlerFunc), + HandlerFunc: handlerFunc, + }) + } + for _, child := range root.children { + routes = iterate(path, method, routes, child) + } + return routes +} + +// Run attaches the router to a http.Server and starts listening and serving HTTP requests. +// It is a shortcut for http.ListenAndServe(addr, router) +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) Run(addr ...string) (err error) { + defer func() { debugPrintError(err) }() + + trustedCIDRs, err := engine.prepareTrustedCIDRs() + if err != nil { + return err + } + engine.trustedCIDRs = trustedCIDRs + address := resolveAddress(addr) + debugPrint("Listening and serving HTTP on %s\n", address) + err = http.ListenAndServe(address, engine) + return +} + +func (engine *Engine) prepareTrustedCIDRs() ([]*net.IPNet, error) { + if engine.TrustedProxies == nil { + return nil, nil + } + + cidr := make([]*net.IPNet, 0, len(engine.TrustedProxies)) + for _, trustedProxy := range engine.TrustedProxies { + if !strings.Contains(trustedProxy, "/") { + ip := parseIP(trustedProxy) + if ip == nil { + return cidr, &net.ParseError{Type: "IP address", Text: trustedProxy} + } + + switch len(ip) { + case net.IPv4len: + trustedProxy += "/32" + case net.IPv6len: + trustedProxy += "/128" + } + } + _, cidrNet, err := net.ParseCIDR(trustedProxy) + if err != nil { + return cidr, err + } + cidr = append(cidr, cidrNet) + } + return cidr, nil +} + +// parseIP parse a string representation of an IP and returns a net.IP with the +// minimum byte representation or nil if input is invalid. +func parseIP(ip string) net.IP { + parsedIP := net.ParseIP(ip) + + if ipv4 := parsedIP.To4(); ipv4 != nil { + // return ip in a 4-byte representation + return ipv4 + } + + // return ip in a 16-byte representation or nil + return parsedIP +} + +// RunTLS attaches the router to a http.Server and starts listening and serving HTTPS (secure) requests. +// It is a shortcut for http.ListenAndServeTLS(addr, certFile, keyFile, router) +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunTLS(addr, certFile, keyFile string) (err error) { + debugPrint("Listening and serving HTTPS on %s\n", addr) + defer func() { debugPrintError(err) }() + + err = http.ListenAndServeTLS(addr, certFile, keyFile, engine) + return +} + +// RunUnix attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified unix socket (ie. a file). +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunUnix(file string) (err error) { + debugPrint("Listening and serving HTTP on unix:/%s", file) + defer func() { debugPrintError(err) }() + + listener, err := net.Listen("unix", file) + if err != nil { + return + } + defer listener.Close() + defer os.Remove(file) + + err = http.Serve(listener, engine) + return +} + +// RunFd attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified file descriptor. +// Note: this method will block the calling goroutine indefinitely unless an error happens. +func (engine *Engine) RunFd(fd int) (err error) { + debugPrint("Listening and serving HTTP on fd@%d", fd) + defer func() { debugPrintError(err) }() + + f := os.NewFile(uintptr(fd), fmt.Sprintf("fd@%d", fd)) + listener, err := net.FileListener(f) + if err != nil { + return + } + defer listener.Close() + err = engine.RunListener(listener) + return +} + +// RunListener attaches the router to a http.Server and starts listening and serving HTTP requests +// through the specified net.Listener +func (engine *Engine) RunListener(listener net.Listener) (err error) { + debugPrint("Listening and serving HTTP on listener what's bind with address@%s", listener.Addr()) + defer func() { debugPrintError(err) }() + err = http.Serve(listener, engine) + return +} + +// ServeHTTP conforms to the http.Handler interface. +func (engine *Engine) ServeHTTP(w http.ResponseWriter, req *http.Request) { + c := engine.pool.Get().(*Context) + c.writermem.reset(w) + c.Request = req + c.reset() + + engine.handleHTTPRequest(c) + + engine.pool.Put(c) +} + +// HandleContext re-enter a context that has been rewritten. +// This can be done by setting c.Request.URL.Path to your new target. +// Disclaimer: You can loop yourself to death with this, use wisely. +func (engine *Engine) HandleContext(c *Context) { + oldIndexValue := c.index + c.reset() + engine.handleHTTPRequest(c) + + c.index = oldIndexValue +} + +func (engine *Engine) handleHTTPRequest(c *Context) { + httpMethod := c.Request.Method + rPath := c.Request.URL.Path + unescape := false + if engine.UseRawPath && len(c.Request.URL.RawPath) > 0 { + rPath = c.Request.URL.RawPath + unescape = engine.UnescapePathValues + } + + if engine.RemoveExtraSlash { + rPath = cleanPath(rPath) + } + + // Find root of the tree for the given HTTP method + t := engine.trees + for i, tl := 0, len(t); i < tl; i++ { + if t[i].method != httpMethod { + continue + } + root := t[i].root + // Find route in tree + value := root.getValue(rPath, c.params, unescape) + if value.params != nil { + c.Params = *value.params + } + if value.handlers != nil { + c.handlers = value.handlers + c.fullPath = value.fullPath + c.Next() + c.writermem.WriteHeaderNow() + return + } + if httpMethod != "CONNECT" && rPath != "/" { + if value.tsr && engine.RedirectTrailingSlash { + redirectTrailingSlash(c) + return + } + if engine.RedirectFixedPath && redirectFixedPath(c, root, engine.RedirectFixedPath) { + return + } + } + break + } + + if engine.HandleMethodNotAllowed { + for _, tree := range engine.trees { + if tree.method == httpMethod { + continue + } + if value := tree.root.getValue(rPath, nil, unescape); value.handlers != nil { + c.handlers = engine.allNoMethod + serveError(c, http.StatusMethodNotAllowed, default405Body) + return + } + } + } + c.handlers = engine.allNoRoute + serveError(c, http.StatusNotFound, default404Body) +} + +var mimePlain = []string{MIMEPlain} + +func serveError(c *Context, code int, defaultMessage []byte) { + c.writermem.status = code + c.Next() + if c.writermem.Written() { + return + } + if c.writermem.Status() == code { + c.writermem.Header()["Content-Type"] = mimePlain + _, err := c.Writer.Write(defaultMessage) + if err != nil { + debugPrint("cannot write message to writer during serve error: %v", err) + } + return + } + c.writermem.WriteHeaderNow() +} + +func redirectTrailingSlash(c *Context) { + req := c.Request + p := req.URL.Path + if prefix := path.Clean(c.Request.Header.Get("X-Forwarded-Prefix")); prefix != "." { + p = prefix + "/" + req.URL.Path + } + req.URL.Path = p + "/" + if length := len(p); length > 1 && p[length-1] == '/' { + req.URL.Path = p[:length-1] + } + redirectRequest(c) +} + +func redirectFixedPath(c *Context, root *node, trailingSlash bool) bool { + req := c.Request + rPath := req.URL.Path + + if fixedPath, ok := root.findCaseInsensitivePath(cleanPath(rPath), trailingSlash); ok { + req.URL.Path = bytesconv.BytesToString(fixedPath) + redirectRequest(c) + return true + } + return false +} + +func redirectRequest(c *Context) { + req := c.Request + rPath := req.URL.Path + rURL := req.URL.String() + + code := http.StatusMovedPermanently // Permanent redirect, request with GET method + if req.Method != http.MethodGet { + code = http.StatusTemporaryRedirect + } + debugPrint("redirecting request %d: %s --> %s", code, rPath, rURL) + http.Redirect(c.Writer, req, rURL, code) + c.writermem.WriteHeaderNow() +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/go.mod b/terraform-server/vendor/github.com/gin-gonic/gin/go.mod new file mode 100644 index 00000000..884ff851 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/go.mod @@ -0,0 +1,14 @@ +module github.com/gin-gonic/gin + +go 1.13 + +require ( + github.com/gin-contrib/sse v0.1.0 + github.com/go-playground/validator/v10 v10.4.1 + github.com/golang/protobuf v1.3.3 + github.com/json-iterator/go v1.1.9 + github.com/mattn/go-isatty v0.0.12 + github.com/stretchr/testify v1.4.0 + github.com/ugorji/go/codec v1.1.7 + gopkg.in/yaml.v2 v2.2.8 +) diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/go.sum b/terraform-server/vendor/github.com/gin-gonic/gin/go.sum new file mode 100644 index 00000000..a64b3319 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/go.sum @@ -0,0 +1,52 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE= +github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/go-playground/validator/v10 v10.4.1 h1:pH2c5ADXtd66mxoE0Zm9SUhxE20r7aM3F26W0hOn+GE= +github.com/go-playground/validator/v10 v10.4.1/go.mod h1:nlOn6nFhuKACm19sB/8EGNn9GlaMV7XkbRSipzJ0Ii4= +github.com/golang/protobuf v1.3.3 h1:gyjaxf+svBWX08ZjK86iN9geUJF0H6gp2IRKX6Nf6/I= +github.com/golang/protobuf v1.3.3/go.mod h1:vzj43D7+SQXF/4pzW/hwtAqwc6iTitCiVSaWz5lYuqw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/json-iterator/go v1.1.9 h1:9yzud/Ht36ygwatGx56VwCZtlI/2AD15T1X2sjSuGns= +github.com/json-iterator/go v1.1.9/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/mattn/go-isatty v0.0.12 h1:wuysRhFDzyxgEmMf5xjvJ2M9dZoWAXNNr5LSBS7uHXY= +github.com/mattn/go-isatty v0.0.12/go.mod h1:cbi8OIDigv2wuxKPP5vlRcQ1OAZbq2CE4Kysco4FUpU= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +github.com/ugorji/go v1.1.7 h1:/68gy2h+1mWMrwZFeD1kQialdSzAb432dtpeJ42ovdo= +github.com/ugorji/go v1.1.7/go.mod h1:kZn38zHttfInRq0xu/PH0az30d+z6vm202qpg1oXVMw= +github.com/ugorji/go/codec v1.1.7 h1:2SvQaVZ1ouYrrKKwoSk2pzd4A9evlKJb9oTL+OaLUSs= +github.com/ugorji/go/codec v1.1.7/go.mod h1:Ax+UKWsSmolVDwsd+7N3ZtXu+yMGCf907BLYF3GoBXY= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42 h1:vEOn+mP2zCOVzKckCZy6YsCtDblrpj/w7B9nxGNELpg= +golang.org/x/sys v0.0.0-20200116001909-b77594299b42/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10= +gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go b/terraform-server/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go new file mode 100644 index 00000000..86e4c4d4 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/internal/bytesconv/bytesconv.go @@ -0,0 +1,24 @@ +// Copyright 2020 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package bytesconv + +import ( + "unsafe" +) + +// StringToBytes converts string to byte slice without a memory allocation. +func StringToBytes(s string) []byte { + return *(*[]byte)(unsafe.Pointer( + &struct { + string + Cap int + }{s, len(s)}, + )) +} + +// BytesToString converts byte slice to string without a memory allocation. +func BytesToString(b []byte) string { + return *(*string)(unsafe.Pointer(&b)) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/internal/json/json.go b/terraform-server/vendor/github.com/gin-gonic/gin/internal/json/json.go new file mode 100644 index 00000000..172aeb24 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/internal/json/json.go @@ -0,0 +1,23 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build !jsoniter +// +build !jsoniter + +package json + +import "encoding/json" + +var ( + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go b/terraform-server/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go new file mode 100644 index 00000000..232f8dca --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/internal/json/jsoniter.go @@ -0,0 +1,24 @@ +// Copyright 2017 Bo-Yi Wu. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build jsoniter +// +build jsoniter + +package json + +import jsoniter "github.com/json-iterator/go" + +var ( + json = jsoniter.ConfigCompatibleWithStandardLibrary + // Marshal is exported by gin/json package. + Marshal = json.Marshal + // Unmarshal is exported by gin/json package. + Unmarshal = json.Unmarshal + // MarshalIndent is exported by gin/json package. + MarshalIndent = json.MarshalIndent + // NewDecoder is exported by gin/json package. + NewDecoder = json.NewDecoder + // NewEncoder is exported by gin/json package. + NewEncoder = json.NewEncoder +) diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/logger.go b/terraform-server/vendor/github.com/gin-gonic/gin/logger.go new file mode 100644 index 00000000..d361b74d --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/logger.go @@ -0,0 +1,271 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "fmt" + "io" + "net/http" + "os" + "time" + + "github.com/mattn/go-isatty" +) + +type consoleColorModeValue int + +const ( + autoColor consoleColorModeValue = iota + disableColor + forceColor +) + +const ( + green = "\033[97;42m" + white = "\033[90;47m" + yellow = "\033[90;43m" + red = "\033[97;41m" + blue = "\033[97;44m" + magenta = "\033[97;45m" + cyan = "\033[97;46m" + reset = "\033[0m" +) + +var consoleColorMode = autoColor + +// LoggerConfig defines the config for Logger middleware. +type LoggerConfig struct { + // Optional. Default value is gin.defaultLogFormatter + Formatter LogFormatter + + // Output is a writer where logs are written. + // Optional. Default value is gin.DefaultWriter. + Output io.Writer + + // SkipPaths is a url path array which logs are not written. + // Optional. + SkipPaths []string +} + +// LogFormatter gives the signature of the formatter function passed to LoggerWithFormatter +type LogFormatter func(params LogFormatterParams) string + +// LogFormatterParams is the structure any formatter will be handed when time to log comes +type LogFormatterParams struct { + Request *http.Request + + // TimeStamp shows the time after the server returns a response. + TimeStamp time.Time + // StatusCode is HTTP response code. + StatusCode int + // Latency is how much time the server cost to process a certain request. + Latency time.Duration + // ClientIP equals Context's ClientIP method. + ClientIP string + // Method is the HTTP method given to the request. + Method string + // Path is a path the client requests. + Path string + // ErrorMessage is set if error has occurred in processing the request. + ErrorMessage string + // isTerm shows whether does gin's output descriptor refers to a terminal. + isTerm bool + // BodySize is the size of the Response Body + BodySize int + // Keys are the keys set on the request's context. + Keys map[string]interface{} +} + +// StatusCodeColor is the ANSI color for appropriately logging http status code to a terminal. +func (p *LogFormatterParams) StatusCodeColor() string { + code := p.StatusCode + + switch { + case code >= http.StatusOK && code < http.StatusMultipleChoices: + return green + case code >= http.StatusMultipleChoices && code < http.StatusBadRequest: + return white + case code >= http.StatusBadRequest && code < http.StatusInternalServerError: + return yellow + default: + return red + } +} + +// MethodColor is the ANSI color for appropriately logging http method to a terminal. +func (p *LogFormatterParams) MethodColor() string { + method := p.Method + + switch method { + case http.MethodGet: + return blue + case http.MethodPost: + return cyan + case http.MethodPut: + return yellow + case http.MethodDelete: + return red + case http.MethodPatch: + return green + case http.MethodHead: + return magenta + case http.MethodOptions: + return white + default: + return reset + } +} + +// ResetColor resets all escape attributes. +func (p *LogFormatterParams) ResetColor() string { + return reset +} + +// IsOutputColor indicates whether can colors be outputted to the log. +func (p *LogFormatterParams) IsOutputColor() bool { + return consoleColorMode == forceColor || (consoleColorMode == autoColor && p.isTerm) +} + +// defaultLogFormatter is the default log format function Logger middleware uses. +var defaultLogFormatter = func(param LogFormatterParams) string { + var statusColor, methodColor, resetColor string + if param.IsOutputColor() { + statusColor = param.StatusCodeColor() + methodColor = param.MethodColor() + resetColor = param.ResetColor() + } + + if param.Latency > time.Minute { + // Truncate in a golang < 1.8 safe way + param.Latency = param.Latency - param.Latency%time.Second + } + return fmt.Sprintf("[GIN] %v |%s %3d %s| %13v | %15s |%s %-7s %s %#v\n%s", + param.TimeStamp.Format("2006/01/02 - 15:04:05"), + statusColor, param.StatusCode, resetColor, + param.Latency, + param.ClientIP, + methodColor, param.Method, resetColor, + param.Path, + param.ErrorMessage, + ) +} + +// DisableConsoleColor disables color output in the console. +func DisableConsoleColor() { + consoleColorMode = disableColor +} + +// ForceConsoleColor force color output in the console. +func ForceConsoleColor() { + consoleColorMode = forceColor +} + +// ErrorLogger returns a handlerfunc for any error type. +func ErrorLogger() HandlerFunc { + return ErrorLoggerT(ErrorTypeAny) +} + +// ErrorLoggerT returns a handlerfunc for a given error type. +func ErrorLoggerT(typ ErrorType) HandlerFunc { + return func(c *Context) { + c.Next() + errors := c.Errors.ByType(typ) + if len(errors) > 0 { + c.JSON(-1, errors) + } + } +} + +// Logger instances a Logger middleware that will write the logs to gin.DefaultWriter. +// By default gin.DefaultWriter = os.Stdout. +func Logger() HandlerFunc { + return LoggerWithConfig(LoggerConfig{}) +} + +// LoggerWithFormatter instance a Logger middleware with the specified log format function. +func LoggerWithFormatter(f LogFormatter) HandlerFunc { + return LoggerWithConfig(LoggerConfig{ + Formatter: f, + }) +} + +// LoggerWithWriter instance a Logger middleware with the specified writer buffer. +// Example: os.Stdout, a file opened in write mode, a socket... +func LoggerWithWriter(out io.Writer, notlogged ...string) HandlerFunc { + return LoggerWithConfig(LoggerConfig{ + Output: out, + SkipPaths: notlogged, + }) +} + +// LoggerWithConfig instance a Logger middleware with config. +func LoggerWithConfig(conf LoggerConfig) HandlerFunc { + formatter := conf.Formatter + if formatter == nil { + formatter = defaultLogFormatter + } + + out := conf.Output + if out == nil { + out = DefaultWriter + } + + notlogged := conf.SkipPaths + + isTerm := true + + if w, ok := out.(*os.File); !ok || os.Getenv("TERM") == "dumb" || + (!isatty.IsTerminal(w.Fd()) && !isatty.IsCygwinTerminal(w.Fd())) { + isTerm = false + } + + var skip map[string]struct{} + + if length := len(notlogged); length > 0 { + skip = make(map[string]struct{}, length) + + for _, path := range notlogged { + skip[path] = struct{}{} + } + } + + return func(c *Context) { + // Start timer + start := time.Now() + path := c.Request.URL.Path + raw := c.Request.URL.RawQuery + + // Process request + c.Next() + + // Log only when path is not being skipped + if _, ok := skip[path]; !ok { + param := LogFormatterParams{ + Request: c.Request, + isTerm: isTerm, + Keys: c.Keys, + } + + // Stop timer + param.TimeStamp = time.Now() + param.Latency = param.TimeStamp.Sub(start) + + param.ClientIP = c.ClientIP() + param.Method = c.Request.Method + param.StatusCode = c.Writer.Status() + param.ErrorMessage = c.Errors.ByType(ErrorTypePrivate).String() + + param.BodySize = c.Writer.Size() + + if raw != "" { + path = path + "?" + raw + } + + param.Path = path + + fmt.Fprint(out, formatter(param)) + } + } +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/mode.go b/terraform-server/vendor/github.com/gin-gonic/gin/mode.go new file mode 100644 index 00000000..c8813aff --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/mode.go @@ -0,0 +1,92 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "io" + "os" + + "github.com/gin-gonic/gin/binding" +) + +// EnvGinMode indicates environment name for gin mode. +const EnvGinMode = "GIN_MODE" + +const ( + // DebugMode indicates gin mode is debug. + DebugMode = "debug" + // ReleaseMode indicates gin mode is release. + ReleaseMode = "release" + // TestMode indicates gin mode is test. + TestMode = "test" +) + +const ( + debugCode = iota + releaseCode + testCode +) + +// DefaultWriter is the default io.Writer used by Gin for debug output and +// middleware output like Logger() or Recovery(). +// Note that both Logger and Recovery provides custom ways to configure their +// output io.Writer. +// To support coloring in Windows use: +// import "github.com/mattn/go-colorable" +// gin.DefaultWriter = colorable.NewColorableStdout() +var DefaultWriter io.Writer = os.Stdout + +// DefaultErrorWriter is the default io.Writer used by Gin to debug errors +var DefaultErrorWriter io.Writer = os.Stderr + +var ginMode = debugCode +var modeName = DebugMode + +func init() { + mode := os.Getenv(EnvGinMode) + SetMode(mode) +} + +// SetMode sets gin mode according to input string. +func SetMode(value string) { + if value == "" { + value = DebugMode + } + + switch value { + case DebugMode: + ginMode = debugCode + case ReleaseMode: + ginMode = releaseCode + case TestMode: + ginMode = testCode + default: + panic("gin mode unknown: " + value + " (available mode: debug release test)") + } + + modeName = value +} + +// DisableBindValidation closes the default validator. +func DisableBindValidation() { + binding.Validator = nil +} + +// EnableJsonDecoderUseNumber sets true for binding.EnableDecoderUseNumber to +// call the UseNumber method on the JSON Decoder instance. +func EnableJsonDecoderUseNumber() { + binding.EnableDecoderUseNumber = true +} + +// EnableJsonDecoderDisallowUnknownFields sets true for binding.EnableDecoderDisallowUnknownFields to +// call the DisallowUnknownFields method on the JSON Decoder instance. +func EnableJsonDecoderDisallowUnknownFields() { + binding.EnableDecoderDisallowUnknownFields = true +} + +// Mode returns currently gin mode. +func Mode() string { + return modeName +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/path.go b/terraform-server/vendor/github.com/gin-gonic/gin/path.go new file mode 100644 index 00000000..d42d6b9d --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/path.go @@ -0,0 +1,150 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Based on the path package, Copyright 2009 The Go Authors. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE. + +package gin + +// cleanPath is the URL version of path.Clean, it returns a canonical URL path +// for p, eliminating . and .. elements. +// +// The following rules are applied iteratively until no further processing can +// be done: +// 1. Replace multiple slashes with a single slash. +// 2. Eliminate each . path name element (the current directory). +// 3. Eliminate each inner .. path name element (the parent directory) +// along with the non-.. element that precedes it. +// 4. Eliminate .. elements that begin a rooted path: +// that is, replace "/.." by "/" at the beginning of a path. +// +// If the result of this process is an empty string, "/" is returned. +func cleanPath(p string) string { + const stackBufSize = 128 + // Turn empty string into "/" + if p == "" { + return "/" + } + + // Reasonably sized buffer on stack to avoid allocations in the common case. + // If a larger buffer is required, it gets allocated dynamically. + buf := make([]byte, 0, stackBufSize) + + n := len(p) + + // Invariants: + // reading from path; r is index of next byte to process. + // writing to buf; w is index of next byte to write. + + // path must start with '/' + r := 1 + w := 1 + + if p[0] != '/' { + r = 0 + + if n+1 > stackBufSize { + buf = make([]byte, n+1) + } else { + buf = buf[:n+1] + } + buf[0] = '/' + } + + trailing := n > 1 && p[n-1] == '/' + + // A bit more clunky without a 'lazybuf' like the path package, but the loop + // gets completely inlined (bufApp calls). + // loop has no expensive function calls (except 1x make) // So in contrast to the path package this loop has no expensive function + // calls (except make, if needed). + + for r < n { + switch { + case p[r] == '/': + // empty path element, trailing slash is added after the end + r++ + + case p[r] == '.' && r+1 == n: + trailing = true + r++ + + case p[r] == '.' && p[r+1] == '/': + // . element + r += 2 + + case p[r] == '.' && p[r+1] == '.' && (r+2 == n || p[r+2] == '/'): + // .. element: remove to last / + r += 3 + + if w > 1 { + // can backtrack + w-- + + if len(buf) == 0 { + for w > 1 && p[w] != '/' { + w-- + } + } else { + for w > 1 && buf[w] != '/' { + w-- + } + } + } + + default: + // Real path element. + // Add slash if needed + if w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // Copy element + for r < n && p[r] != '/' { + bufApp(&buf, p, w, p[r]) + w++ + r++ + } + } + } + + // Re-append trailing slash + if trailing && w > 1 { + bufApp(&buf, p, w, '/') + w++ + } + + // If the original string was not modified (or only shortened at the end), + // return the respective substring of the original string. + // Otherwise return a new string from the buffer. + if len(buf) == 0 { + return p[:w] + } + return string(buf[:w]) +} + +// Internal helper to lazily create a buffer if necessary. +// Calls to this function get inlined. +func bufApp(buf *[]byte, s string, w int, c byte) { + b := *buf + if len(b) == 0 { + // No modification of the original string so far. + // If the next character is the same as in the original string, we do + // not yet have to allocate a buffer. + if s[w] == c { + return + } + + // Otherwise use either the stack buffer, if it is large enough, or + // allocate a new buffer on the heap, and copy all previous characters. + length := len(s) + if length > cap(b) { + *buf = make([]byte, length) + } else { + *buf = (*buf)[:length] + } + b = *buf + + copy(b, s[:w]) + } + b[w] = c +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/recovery.go b/terraform-server/vendor/github.com/gin-gonic/gin/recovery.go new file mode 100644 index 00000000..563f5aaa --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/recovery.go @@ -0,0 +1,171 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "net" + "net/http" + "net/http/httputil" + "os" + "runtime" + "strings" + "time" +) + +var ( + dunno = []byte("???") + centerDot = []byte("·") + dot = []byte(".") + slash = []byte("/") +) + +// RecoveryFunc defines the function passable to CustomRecovery. +type RecoveryFunc func(c *Context, err interface{}) + +// Recovery returns a middleware that recovers from any panics and writes a 500 if there was one. +func Recovery() HandlerFunc { + return RecoveryWithWriter(DefaultErrorWriter) +} + +//CustomRecovery returns a middleware that recovers from any panics and calls the provided handle func to handle it. +func CustomRecovery(handle RecoveryFunc) HandlerFunc { + return RecoveryWithWriter(DefaultErrorWriter, handle) +} + +// RecoveryWithWriter returns a middleware for a given writer that recovers from any panics and writes a 500 if there was one. +func RecoveryWithWriter(out io.Writer, recovery ...RecoveryFunc) HandlerFunc { + if len(recovery) > 0 { + return CustomRecoveryWithWriter(out, recovery[0]) + } + return CustomRecoveryWithWriter(out, defaultHandleRecovery) +} + +// CustomRecoveryWithWriter returns a middleware for a given writer that recovers from any panics and calls the provided handle func to handle it. +func CustomRecoveryWithWriter(out io.Writer, handle RecoveryFunc) HandlerFunc { + var logger *log.Logger + if out != nil { + logger = log.New(out, "\n\n\x1b[31m", log.LstdFlags) + } + return func(c *Context) { + defer func() { + if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + var brokenPipe bool + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + brokenPipe = true + } + } + } + if logger != nil { + stack := stack(3) + httpRequest, _ := httputil.DumpRequest(c.Request, false) + headers := strings.Split(string(httpRequest), "\r\n") + for idx, header := range headers { + current := strings.Split(header, ":") + if current[0] == "Authorization" { + headers[idx] = current[0] + ": *" + } + } + headersToStr := strings.Join(headers, "\r\n") + if brokenPipe { + logger.Printf("%s\n%s%s", err, headersToStr, reset) + } else if IsDebugging() { + logger.Printf("[Recovery] %s panic recovered:\n%s\n%s\n%s%s", + timeFormat(time.Now()), headersToStr, err, stack, reset) + } else { + logger.Printf("[Recovery] %s panic recovered:\n%s\n%s%s", + timeFormat(time.Now()), err, stack, reset) + } + } + if brokenPipe { + // If the connection is dead, we can't write a status to it. + c.Error(err.(error)) // nolint: errcheck + c.Abort() + } else { + handle(c, err) + } + } + }() + c.Next() + } +} + +func defaultHandleRecovery(c *Context, err interface{}) { + c.AbortWithStatus(http.StatusInternalServerError) +} + +// stack returns a nicely formatted stack frame, skipping skip frames. +func stack(skip int) []byte { + buf := new(bytes.Buffer) // the returned data + // As we loop, we open files and read them. These variables record the currently + // loaded file. + var lines [][]byte + var lastFile string + for i := skip; ; i++ { // Skip the expected number of frames + pc, file, line, ok := runtime.Caller(i) + if !ok { + break + } + // Print this much at least. If we can't find the source, it won't show. + fmt.Fprintf(buf, "%s:%d (0x%x)\n", file, line, pc) + if file != lastFile { + data, err := ioutil.ReadFile(file) + if err != nil { + continue + } + lines = bytes.Split(data, []byte{'\n'}) + lastFile = file + } + fmt.Fprintf(buf, "\t%s: %s\n", function(pc), source(lines, line)) + } + return buf.Bytes() +} + +// source returns a space-trimmed slice of the n'th line. +func source(lines [][]byte, n int) []byte { + n-- // in stack trace, lines are 1-indexed but our array is 0-indexed + if n < 0 || n >= len(lines) { + return dunno + } + return bytes.TrimSpace(lines[n]) +} + +// function returns, if possible, the name of the function containing the PC. +func function(pc uintptr) []byte { + fn := runtime.FuncForPC(pc) + if fn == nil { + return dunno + } + name := []byte(fn.Name()) + // The name includes the path name to the package, which is unnecessary + // since the file name is already included. Plus, it has center dots. + // That is, we see + // runtime/debug.*T·ptrmethod + // and want + // *T.ptrmethod + // Also the package path might contains dot (e.g. code.google.com/...), + // so first eliminate the path prefix + if lastSlash := bytes.LastIndex(name, slash); lastSlash >= 0 { + name = name[lastSlash+1:] + } + if period := bytes.Index(name, dot); period >= 0 { + name = name[period+1:] + } + name = bytes.Replace(name, centerDot, dot, -1) + return name +} + +func timeFormat(t time.Time) string { + timeString := t.Format("2006/01/02 - 15:04:05") + return timeString +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/data.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/data.go new file mode 100644 index 00000000..6ba657ba --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/data.go @@ -0,0 +1,25 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import "net/http" + +// Data contains ContentType and bytes data. +type Data struct { + ContentType string + Data []byte +} + +// Render (Data) writes data with custom ContentType. +func (r Data) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + _, err = w.Write(r.Data) + return +} + +// WriteContentType (Data) writes custom ContentType. +func (r Data) WriteContentType(w http.ResponseWriter) { + writeContentType(w, []string{r.ContentType}) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/html.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/html.go new file mode 100644 index 00000000..6696ece9 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/html.go @@ -0,0 +1,92 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "html/template" + "net/http" +) + +// Delims represents a set of Left and Right delimiters for HTML template rendering. +type Delims struct { + // Left delimiter, defaults to {{. + Left string + // Right delimiter, defaults to }}. + Right string +} + +// HTMLRender interface is to be implemented by HTMLProduction and HTMLDebug. +type HTMLRender interface { + // Instance returns an HTML instance. + Instance(string, interface{}) Render +} + +// HTMLProduction contains template reference and its delims. +type HTMLProduction struct { + Template *template.Template + Delims Delims +} + +// HTMLDebug contains template delims and pattern and function with file list. +type HTMLDebug struct { + Files []string + Glob string + Delims Delims + FuncMap template.FuncMap +} + +// HTML contains template reference and its name with given interface object. +type HTML struct { + Template *template.Template + Name string + Data interface{} +} + +var htmlContentType = []string{"text/html; charset=utf-8"} + +// Instance (HTMLProduction) returns an HTML instance which it realizes Render interface. +func (r HTMLProduction) Instance(name string, data interface{}) Render { + return HTML{ + Template: r.Template, + Name: name, + Data: data, + } +} + +// Instance (HTMLDebug) returns an HTML instance which it realizes Render interface. +func (r HTMLDebug) Instance(name string, data interface{}) Render { + return HTML{ + Template: r.loadTemplate(), + Name: name, + Data: data, + } +} +func (r HTMLDebug) loadTemplate() *template.Template { + if r.FuncMap == nil { + r.FuncMap = template.FuncMap{} + } + if len(r.Files) > 0 { + return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseFiles(r.Files...)) + } + if r.Glob != "" { + return template.Must(template.New("").Delims(r.Delims.Left, r.Delims.Right).Funcs(r.FuncMap).ParseGlob(r.Glob)) + } + panic("the HTML debug render was created without files or glob pattern") +} + +// Render (HTML) executes template and writes its result with custom ContentType for response. +func (r HTML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + if r.Name == "" { + return r.Template.Execute(w, r.Data) + } + return r.Template.ExecuteTemplate(w, r.Name, r.Data) +} + +// WriteContentType (HTML) writes HTML ContentType. +func (r HTML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, htmlContentType) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/json.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/json.go new file mode 100644 index 00000000..41863093 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/json.go @@ -0,0 +1,193 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "bytes" + "fmt" + "html/template" + "net/http" + + "github.com/gin-gonic/gin/internal/bytesconv" + "github.com/gin-gonic/gin/internal/json" +) + +// JSON contains the given interface object. +type JSON struct { + Data interface{} +} + +// IndentedJSON contains the given interface object. +type IndentedJSON struct { + Data interface{} +} + +// SecureJSON contains the given interface object and its prefix. +type SecureJSON struct { + Prefix string + Data interface{} +} + +// JsonpJSON contains the given interface object its callback. +type JsonpJSON struct { + Callback string + Data interface{} +} + +// AsciiJSON contains the given interface object. +type AsciiJSON struct { + Data interface{} +} + +// PureJSON contains the given interface object. +type PureJSON struct { + Data interface{} +} + +var jsonContentType = []string{"application/json; charset=utf-8"} +var jsonpContentType = []string{"application/javascript; charset=utf-8"} +var jsonAsciiContentType = []string{"application/json"} + +// Render (JSON) writes data with custom ContentType. +func (r JSON) Render(w http.ResponseWriter) (err error) { + if err = WriteJSON(w, r.Data); err != nil { + panic(err) + } + return +} + +// WriteContentType (JSON) writes JSON ContentType. +func (r JSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// WriteJSON marshals the given interface object and writes it with custom ContentType. +func WriteJSON(w http.ResponseWriter, obj interface{}) error { + writeContentType(w, jsonContentType) + jsonBytes, err := json.Marshal(obj) + if err != nil { + return err + } + _, err = w.Write(jsonBytes) + return err +} + +// Render (IndentedJSON) marshals the given interface object and writes it with custom ContentType. +func (r IndentedJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.MarshalIndent(r.Data, "", " ") + if err != nil { + return err + } + _, err = w.Write(jsonBytes) + return err +} + +// WriteContentType (IndentedJSON) writes JSON ContentType. +func (r IndentedJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// Render (SecureJSON) marshals the given interface object and writes it with custom ContentType. +func (r SecureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + jsonBytes, err := json.Marshal(r.Data) + if err != nil { + return err + } + // if the jsonBytes is array values + if bytes.HasPrefix(jsonBytes, bytesconv.StringToBytes("[")) && bytes.HasSuffix(jsonBytes, + bytesconv.StringToBytes("]")) { + _, err = w.Write(bytesconv.StringToBytes(r.Prefix)) + if err != nil { + return err + } + } + _, err = w.Write(jsonBytes) + return err +} + +// WriteContentType (SecureJSON) writes JSON ContentType. +func (r SecureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} + +// Render (JsonpJSON) marshals the given interface object and writes it and its callback with custom ContentType. +func (r JsonpJSON) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + ret, err := json.Marshal(r.Data) + if err != nil { + return err + } + + if r.Callback == "" { + _, err = w.Write(ret) + return err + } + + callback := template.JSEscapeString(r.Callback) + _, err = w.Write(bytesconv.StringToBytes(callback)) + if err != nil { + return err + } + _, err = w.Write(bytesconv.StringToBytes("(")) + if err != nil { + return err + } + _, err = w.Write(ret) + if err != nil { + return err + } + _, err = w.Write(bytesconv.StringToBytes(");")) + if err != nil { + return err + } + + return nil +} + +// WriteContentType (JsonpJSON) writes Javascript ContentType. +func (r JsonpJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonpContentType) +} + +// Render (AsciiJSON) marshals the given interface object and writes it with custom ContentType. +func (r AsciiJSON) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + ret, err := json.Marshal(r.Data) + if err != nil { + return err + } + + var buffer bytes.Buffer + for _, r := range bytesconv.BytesToString(ret) { + cvt := string(r) + if r >= 128 { + cvt = fmt.Sprintf("\\u%04x", int64(r)) + } + buffer.WriteString(cvt) + } + + _, err = w.Write(buffer.Bytes()) + return err +} + +// WriteContentType (AsciiJSON) writes JSON ContentType. +func (r AsciiJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonAsciiContentType) +} + +// Render (PureJSON) writes custom ContentType and encodes the given interface object. +func (r PureJSON) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + encoder := json.NewEncoder(w) + encoder.SetEscapeHTML(false) + return encoder.Encode(r.Data) +} + +// WriteContentType (PureJSON) writes custom ContentType. +func (r PureJSON) WriteContentType(w http.ResponseWriter) { + writeContentType(w, jsonContentType) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/msgpack.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/msgpack.go new file mode 100644 index 00000000..6ef5b6e5 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/msgpack.go @@ -0,0 +1,42 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +//go:build !nomsgpack +// +build !nomsgpack + +package render + +import ( + "net/http" + + "github.com/ugorji/go/codec" +) + +var ( + _ Render = MsgPack{} +) + +// MsgPack contains the given interface object. +type MsgPack struct { + Data interface{} +} + +var msgpackContentType = []string{"application/msgpack; charset=utf-8"} + +// WriteContentType (MsgPack) writes MsgPack ContentType. +func (r MsgPack) WriteContentType(w http.ResponseWriter) { + writeContentType(w, msgpackContentType) +} + +// Render (MsgPack) encodes the given interface object and writes data with custom ContentType. +func (r MsgPack) Render(w http.ResponseWriter) error { + return WriteMsgPack(w, r.Data) +} + +// WriteMsgPack writes MsgPack ContentType and encodes the given interface object. +func WriteMsgPack(w http.ResponseWriter, obj interface{}) error { + writeContentType(w, msgpackContentType) + var mh codec.MsgpackHandle + return codec.NewEncoder(w, &mh).Encode(obj) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/protobuf.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/protobuf.go new file mode 100644 index 00000000..15aca995 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/protobuf.go @@ -0,0 +1,36 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "github.com/golang/protobuf/proto" +) + +// ProtoBuf contains the given interface object. +type ProtoBuf struct { + Data interface{} +} + +var protobufContentType = []string{"application/x-protobuf"} + +// Render (ProtoBuf) marshals the given interface object and writes data with custom ContentType. +func (r ProtoBuf) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + bytes, err := proto.Marshal(r.Data.(proto.Message)) + if err != nil { + return err + } + + _, err = w.Write(bytes) + return err +} + +// WriteContentType (ProtoBuf) writes ProtoBuf ContentType. +func (r ProtoBuf) WriteContentType(w http.ResponseWriter) { + writeContentType(w, protobufContentType) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/reader.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/reader.go new file mode 100644 index 00000000..d5282e49 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/reader.go @@ -0,0 +1,48 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "io" + "net/http" + "strconv" +) + +// Reader contains the IO reader and its length, and custom ContentType and other headers. +type Reader struct { + ContentType string + ContentLength int64 + Reader io.Reader + Headers map[string]string +} + +// Render (Reader) writes data with custom ContentType and headers. +func (r Reader) Render(w http.ResponseWriter) (err error) { + r.WriteContentType(w) + if r.ContentLength >= 0 { + if r.Headers == nil { + r.Headers = map[string]string{} + } + r.Headers["Content-Length"] = strconv.FormatInt(r.ContentLength, 10) + } + r.writeHeaders(w, r.Headers) + _, err = io.Copy(w, r.Reader) + return +} + +// WriteContentType (Reader) writes custom ContentType. +func (r Reader) WriteContentType(w http.ResponseWriter) { + writeContentType(w, []string{r.ContentType}) +} + +// writeHeaders writes custom Header. +func (r Reader) writeHeaders(w http.ResponseWriter, headers map[string]string) { + header := w.Header() + for k, v := range headers { + if header.Get(k) == "" { + header.Set(k, v) + } + } +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/redirect.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/redirect.go new file mode 100644 index 00000000..c006691c --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/redirect.go @@ -0,0 +1,29 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "fmt" + "net/http" +) + +// Redirect contains the http request reference and redirects status code and location. +type Redirect struct { + Code int + Request *http.Request + Location string +} + +// Render (Redirect) redirects the http request to new location and writes redirect response. +func (r Redirect) Render(w http.ResponseWriter) error { + if (r.Code < http.StatusMultipleChoices || r.Code > http.StatusPermanentRedirect) && r.Code != http.StatusCreated { + panic(fmt.Sprintf("Cannot redirect with status code %d", r.Code)) + } + http.Redirect(w, r.Request, r.Location, r.Code) + return nil +} + +// WriteContentType (Redirect) don't write any ContentType. +func (r Redirect) WriteContentType(http.ResponseWriter) {} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/render.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/render.go new file mode 100644 index 00000000..bcd568bf --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/render.go @@ -0,0 +1,40 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import "net/http" + +// Render interface is to be implemented by JSON, XML, HTML, YAML and so on. +type Render interface { + // Render writes data with custom ContentType. + Render(http.ResponseWriter) error + // WriteContentType writes custom ContentType. + WriteContentType(w http.ResponseWriter) +} + +var ( + _ Render = JSON{} + _ Render = IndentedJSON{} + _ Render = SecureJSON{} + _ Render = JsonpJSON{} + _ Render = XML{} + _ Render = String{} + _ Render = Redirect{} + _ Render = Data{} + _ Render = HTML{} + _ HTMLRender = HTMLDebug{} + _ HTMLRender = HTMLProduction{} + _ Render = YAML{} + _ Render = Reader{} + _ Render = AsciiJSON{} + _ Render = ProtoBuf{} +) + +func writeContentType(w http.ResponseWriter, value []string) { + header := w.Header() + if val := header["Content-Type"]; len(val) == 0 { + header["Content-Type"] = value + } +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/text.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/text.go new file mode 100644 index 00000000..461b720a --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/text.go @@ -0,0 +1,41 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "fmt" + "net/http" + + "github.com/gin-gonic/gin/internal/bytesconv" +) + +// String contains the given interface object slice and its format. +type String struct { + Format string + Data []interface{} +} + +var plainContentType = []string{"text/plain; charset=utf-8"} + +// Render (String) writes data with custom ContentType. +func (r String) Render(w http.ResponseWriter) error { + return WriteString(w, r.Format, r.Data) +} + +// WriteContentType (String) writes Plain ContentType. +func (r String) WriteContentType(w http.ResponseWriter) { + writeContentType(w, plainContentType) +} + +// WriteString writes data according to its format and write custom ContentType. +func WriteString(w http.ResponseWriter, format string, data []interface{}) (err error) { + writeContentType(w, plainContentType) + if len(data) > 0 { + _, err = fmt.Fprintf(w, format, data...) + return + } + _, err = w.Write(bytesconv.StringToBytes(format)) + return +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/xml.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/xml.go new file mode 100644 index 00000000..cc5390a2 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/xml.go @@ -0,0 +1,28 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "encoding/xml" + "net/http" +) + +// XML contains the given interface object. +type XML struct { + Data interface{} +} + +var xmlContentType = []string{"application/xml; charset=utf-8"} + +// Render (XML) encodes the given interface object and writes data with custom ContentType. +func (r XML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + return xml.NewEncoder(w).Encode(r.Data) +} + +// WriteContentType (XML) writes XML ContentType for response. +func (r XML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, xmlContentType) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/render/yaml.go b/terraform-server/vendor/github.com/gin-gonic/gin/render/yaml.go new file mode 100644 index 00000000..0df78360 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/render/yaml.go @@ -0,0 +1,36 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package render + +import ( + "net/http" + + "gopkg.in/yaml.v2" +) + +// YAML contains the given interface object. +type YAML struct { + Data interface{} +} + +var yamlContentType = []string{"application/x-yaml; charset=utf-8"} + +// Render (YAML) marshals the given interface object and writes data with custom ContentType. +func (r YAML) Render(w http.ResponseWriter) error { + r.WriteContentType(w) + + bytes, err := yaml.Marshal(r.Data) + if err != nil { + return err + } + + _, err = w.Write(bytes) + return err +} + +// WriteContentType (YAML) writes YAML ContentType for response. +func (r YAML) WriteContentType(w http.ResponseWriter) { + writeContentType(w, yamlContentType) +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/response_writer.go b/terraform-server/vendor/github.com/gin-gonic/gin/response_writer.go new file mode 100644 index 00000000..26826689 --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/response_writer.go @@ -0,0 +1,126 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "bufio" + "io" + "net" + "net/http" +) + +const ( + noWritten = -1 + defaultStatus = http.StatusOK +) + +// ResponseWriter ... +type ResponseWriter interface { + http.ResponseWriter + http.Hijacker + http.Flusher + http.CloseNotifier + + // Returns the HTTP response status code of the current request. + Status() int + + // Returns the number of bytes already written into the response http body. + // See Written() + Size() int + + // Writes the string into the response body. + WriteString(string) (int, error) + + // Returns true if the response body was already written. + Written() bool + + // Forces to write the http header (status code + headers). + WriteHeaderNow() + + // get the http.Pusher for server push + Pusher() http.Pusher +} + +type responseWriter struct { + http.ResponseWriter + size int + status int +} + +var _ ResponseWriter = &responseWriter{} + +func (w *responseWriter) reset(writer http.ResponseWriter) { + w.ResponseWriter = writer + w.size = noWritten + w.status = defaultStatus +} + +func (w *responseWriter) WriteHeader(code int) { + if code > 0 && w.status != code { + if w.Written() { + debugPrint("[WARNING] Headers were already written. Wanted to override status code %d with %d", w.status, code) + } + w.status = code + } +} + +func (w *responseWriter) WriteHeaderNow() { + if !w.Written() { + w.size = 0 + w.ResponseWriter.WriteHeader(w.status) + } +} + +func (w *responseWriter) Write(data []byte) (n int, err error) { + w.WriteHeaderNow() + n, err = w.ResponseWriter.Write(data) + w.size += n + return +} + +func (w *responseWriter) WriteString(s string) (n int, err error) { + w.WriteHeaderNow() + n, err = io.WriteString(w.ResponseWriter, s) + w.size += n + return +} + +func (w *responseWriter) Status() int { + return w.status +} + +func (w *responseWriter) Size() int { + return w.size +} + +func (w *responseWriter) Written() bool { + return w.size != noWritten +} + +// Hijack implements the http.Hijacker interface. +func (w *responseWriter) Hijack() (net.Conn, *bufio.ReadWriter, error) { + if w.size < 0 { + w.size = 0 + } + return w.ResponseWriter.(http.Hijacker).Hijack() +} + +// CloseNotify implements the http.CloseNotify interface. +func (w *responseWriter) CloseNotify() <-chan bool { + return w.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +// Flush implements the http.Flush interface. +func (w *responseWriter) Flush() { + w.WriteHeaderNow() + w.ResponseWriter.(http.Flusher).Flush() +} + +func (w *responseWriter) Pusher() (pusher http.Pusher) { + if pusher, ok := w.ResponseWriter.(http.Pusher); ok { + return pusher + } + return nil +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/routergroup.go b/terraform-server/vendor/github.com/gin-gonic/gin/routergroup.go new file mode 100644 index 00000000..15d9930d --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/routergroup.go @@ -0,0 +1,230 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "net/http" + "path" + "regexp" + "strings" +) + +// IRouter defines all router handle interface includes single and group router. +type IRouter interface { + IRoutes + Group(string, ...HandlerFunc) *RouterGroup +} + +// IRoutes defines all router handle interface. +type IRoutes interface { + Use(...HandlerFunc) IRoutes + + Handle(string, string, ...HandlerFunc) IRoutes + Any(string, ...HandlerFunc) IRoutes + GET(string, ...HandlerFunc) IRoutes + POST(string, ...HandlerFunc) IRoutes + DELETE(string, ...HandlerFunc) IRoutes + PATCH(string, ...HandlerFunc) IRoutes + PUT(string, ...HandlerFunc) IRoutes + OPTIONS(string, ...HandlerFunc) IRoutes + HEAD(string, ...HandlerFunc) IRoutes + + StaticFile(string, string) IRoutes + Static(string, string) IRoutes + StaticFS(string, http.FileSystem) IRoutes +} + +// RouterGroup is used internally to configure router, a RouterGroup is associated with +// a prefix and an array of handlers (middleware). +type RouterGroup struct { + Handlers HandlersChain + basePath string + engine *Engine + root bool +} + +var _ IRouter = &RouterGroup{} + +// Use adds middleware to the group, see example code in GitHub. +func (group *RouterGroup) Use(middleware ...HandlerFunc) IRoutes { + group.Handlers = append(group.Handlers, middleware...) + return group.returnObj() +} + +// Group creates a new router group. You should add all the routes that have common middlewares or the same path prefix. +// For example, all the routes that use a common middleware for authorization could be grouped. +func (group *RouterGroup) Group(relativePath string, handlers ...HandlerFunc) *RouterGroup { + return &RouterGroup{ + Handlers: group.combineHandlers(handlers), + basePath: group.calculateAbsolutePath(relativePath), + engine: group.engine, + } +} + +// BasePath returns the base path of router group. +// For example, if v := router.Group("/rest/n/v1/api"), v.BasePath() is "/rest/n/v1/api". +func (group *RouterGroup) BasePath() string { + return group.basePath +} + +func (group *RouterGroup) handle(httpMethod, relativePath string, handlers HandlersChain) IRoutes { + absolutePath := group.calculateAbsolutePath(relativePath) + handlers = group.combineHandlers(handlers) + group.engine.addRoute(httpMethod, absolutePath, handlers) + return group.returnObj() +} + +// Handle registers a new request handle and middleware with the given path and method. +// The last handler should be the real handler, the other ones should be middleware that can and should be shared among different routes. +// See the example code in GitHub. +// +// For GET, POST, PUT, PATCH and DELETE requests the respective shortcut +// functions can be used. +// +// This function is intended for bulk loading and to allow the usage of less +// frequently used, non-standardized or custom methods (e.g. for internal +// communication with a proxy). +func (group *RouterGroup) Handle(httpMethod, relativePath string, handlers ...HandlerFunc) IRoutes { + if matches, err := regexp.MatchString("^[A-Z]+$", httpMethod); !matches || err != nil { + panic("http method " + httpMethod + " is not valid") + } + return group.handle(httpMethod, relativePath, handlers) +} + +// POST is a shortcut for router.Handle("POST", path, handle). +func (group *RouterGroup) POST(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodPost, relativePath, handlers) +} + +// GET is a shortcut for router.Handle("GET", path, handle). +func (group *RouterGroup) GET(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodGet, relativePath, handlers) +} + +// DELETE is a shortcut for router.Handle("DELETE", path, handle). +func (group *RouterGroup) DELETE(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodDelete, relativePath, handlers) +} + +// PATCH is a shortcut for router.Handle("PATCH", path, handle). +func (group *RouterGroup) PATCH(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodPatch, relativePath, handlers) +} + +// PUT is a shortcut for router.Handle("PUT", path, handle). +func (group *RouterGroup) PUT(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodPut, relativePath, handlers) +} + +// OPTIONS is a shortcut for router.Handle("OPTIONS", path, handle). +func (group *RouterGroup) OPTIONS(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodOptions, relativePath, handlers) +} + +// HEAD is a shortcut for router.Handle("HEAD", path, handle). +func (group *RouterGroup) HEAD(relativePath string, handlers ...HandlerFunc) IRoutes { + return group.handle(http.MethodHead, relativePath, handlers) +} + +// Any registers a route that matches all the HTTP methods. +// GET, POST, PUT, PATCH, HEAD, OPTIONS, DELETE, CONNECT, TRACE. +func (group *RouterGroup) Any(relativePath string, handlers ...HandlerFunc) IRoutes { + group.handle(http.MethodGet, relativePath, handlers) + group.handle(http.MethodPost, relativePath, handlers) + group.handle(http.MethodPut, relativePath, handlers) + group.handle(http.MethodPatch, relativePath, handlers) + group.handle(http.MethodHead, relativePath, handlers) + group.handle(http.MethodOptions, relativePath, handlers) + group.handle(http.MethodDelete, relativePath, handlers) + group.handle(http.MethodConnect, relativePath, handlers) + group.handle(http.MethodTrace, relativePath, handlers) + return group.returnObj() +} + +// StaticFile registers a single route in order to serve a single file of the local filesystem. +// router.StaticFile("favicon.ico", "./resources/favicon.ico") +func (group *RouterGroup) StaticFile(relativePath, filepath string) IRoutes { + if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { + panic("URL parameters can not be used when serving a static file") + } + handler := func(c *Context) { + c.File(filepath) + } + group.GET(relativePath, handler) + group.HEAD(relativePath, handler) + return group.returnObj() +} + +// Static serves files from the given file system root. +// Internally a http.FileServer is used, therefore http.NotFound is used instead +// of the Router's NotFound handler. +// To use the operating system's file system implementation, +// use : +// router.Static("/static", "/var/www") +func (group *RouterGroup) Static(relativePath, root string) IRoutes { + return group.StaticFS(relativePath, Dir(root, false)) +} + +// StaticFS works just like `Static()` but a custom `http.FileSystem` can be used instead. +// Gin by default user: gin.Dir() +func (group *RouterGroup) StaticFS(relativePath string, fs http.FileSystem) IRoutes { + if strings.Contains(relativePath, ":") || strings.Contains(relativePath, "*") { + panic("URL parameters can not be used when serving a static folder") + } + handler := group.createStaticHandler(relativePath, fs) + urlPattern := path.Join(relativePath, "/*filepath") + + // Register GET and HEAD handlers + group.GET(urlPattern, handler) + group.HEAD(urlPattern, handler) + return group.returnObj() +} + +func (group *RouterGroup) createStaticHandler(relativePath string, fs http.FileSystem) HandlerFunc { + absolutePath := group.calculateAbsolutePath(relativePath) + fileServer := http.StripPrefix(absolutePath, http.FileServer(fs)) + + return func(c *Context) { + if _, noListing := fs.(*onlyFilesFS); noListing { + c.Writer.WriteHeader(http.StatusNotFound) + } + + file := c.Param("filepath") + // Check if file exists and/or if we have permission to access it + f, err := fs.Open(file) + if err != nil { + c.Writer.WriteHeader(http.StatusNotFound) + c.handlers = group.engine.noRoute + // Reset index + c.index = -1 + return + } + f.Close() + + fileServer.ServeHTTP(c.Writer, c.Request) + } +} + +func (group *RouterGroup) combineHandlers(handlers HandlersChain) HandlersChain { + finalSize := len(group.Handlers) + len(handlers) + if finalSize >= int(abortIndex) { + panic("too many handlers") + } + mergedHandlers := make(HandlersChain, finalSize) + copy(mergedHandlers, group.Handlers) + copy(mergedHandlers[len(group.Handlers):], handlers) + return mergedHandlers +} + +func (group *RouterGroup) calculateAbsolutePath(relativePath string) string { + return joinPaths(group.basePath, relativePath) +} + +func (group *RouterGroup) returnObj() IRoutes { + if group.root { + return group.engine + } + return group +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/test_helpers.go b/terraform-server/vendor/github.com/gin-gonic/gin/test_helpers.go new file mode 100644 index 00000000..3a7a5ddf --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/test_helpers.go @@ -0,0 +1,16 @@ +// Copyright 2017 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import "net/http" + +// CreateTestContext returns a fresh engine and context for testing purposes +func CreateTestContext(w http.ResponseWriter) (c *Context, r *Engine) { + r = New() + c = r.allocateContext() + c.reset() + c.writermem.reset(w) + return +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/tree.go b/terraform-server/vendor/github.com/gin-gonic/gin/tree.go new file mode 100644 index 00000000..ca753e6d --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/tree.go @@ -0,0 +1,777 @@ +// Copyright 2013 Julien Schmidt. All rights reserved. +// Use of this source code is governed by a BSD-style license that can be found +// at https://github.com/julienschmidt/httprouter/blob/master/LICENSE + +package gin + +import ( + "bytes" + "net/url" + "strings" + "unicode" + "unicode/utf8" + + "github.com/gin-gonic/gin/internal/bytesconv" +) + +var ( + strColon = []byte(":") + strStar = []byte("*") +) + +// Param is a single URL parameter, consisting of a key and a value. +type Param struct { + Key string + Value string +} + +// Params is a Param-slice, as returned by the router. +// The slice is ordered, the first URL parameter is also the first slice value. +// It is therefore safe to read values by the index. +type Params []Param + +// Get returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) Get(name string) (string, bool) { + for _, entry := range ps { + if entry.Key == name { + return entry.Value, true + } + } + return "", false +} + +// ByName returns the value of the first Param which key matches the given name. +// If no matching Param is found, an empty string is returned. +func (ps Params) ByName(name string) (va string) { + va, _ = ps.Get(name) + return +} + +type methodTree struct { + method string + root *node +} + +type methodTrees []methodTree + +func (trees methodTrees) get(method string) *node { + for _, tree := range trees { + if tree.method == method { + return tree.root + } + } + return nil +} + +func min(a, b int) int { + if a <= b { + return a + } + return b +} + +func longestCommonPrefix(a, b string) int { + i := 0 + max := min(len(a), len(b)) + for i < max && a[i] == b[i] { + i++ + } + return i +} + +// addChild will add a child node, keeping wildcards at the end +func (n *node) addChild(child *node) { + if n.wildChild && len(n.children) > 0 { + wildcardChild := n.children[len(n.children)-1] + n.children = append(n.children[:len(n.children)-1], child, wildcardChild) + } else { + n.children = append(n.children, child) + } +} + +func countParams(path string) uint16 { + var n uint16 + s := bytesconv.StringToBytes(path) + n += uint16(bytes.Count(s, strColon)) + n += uint16(bytes.Count(s, strStar)) + return n +} + +type nodeType uint8 + +const ( + static nodeType = iota // default + root + param + catchAll +) + +type node struct { + path string + indices string + wildChild bool + nType nodeType + priority uint32 + children []*node // child nodes, at most 1 :param style node at the end of the array + handlers HandlersChain + fullPath string +} + +// Increments priority of the given child and reorders if necessary +func (n *node) incrementChildPrio(pos int) int { + cs := n.children + cs[pos].priority++ + prio := cs[pos].priority + + // Adjust position (move to front) + newPos := pos + for ; newPos > 0 && cs[newPos-1].priority < prio; newPos-- { + // Swap node positions + cs[newPos-1], cs[newPos] = cs[newPos], cs[newPos-1] + } + + // Build new index char string + if newPos != pos { + n.indices = n.indices[:newPos] + // Unchanged prefix, might be empty + n.indices[pos:pos+1] + // The index char we move + n.indices[newPos:pos] + n.indices[pos+1:] // Rest without char at 'pos' + } + + return newPos +} + +// addRoute adds a node with the given handle to the path. +// Not concurrency-safe! +func (n *node) addRoute(path string, handlers HandlersChain) { + fullPath := path + n.priority++ + + // Empty tree + if len(n.path) == 0 && len(n.children) == 0 { + n.insertChild(path, fullPath, handlers) + n.nType = root + return + } + + parentFullPathIndex := 0 + +walk: + for { + // Find the longest common prefix. + // This also implies that the common prefix contains no ':' or '*' + // since the existing key can't contain those chars. + i := longestCommonPrefix(path, n.path) + + // Split edge + if i < len(n.path) { + child := node{ + path: n.path[i:], + wildChild: n.wildChild, + indices: n.indices, + children: n.children, + handlers: n.handlers, + priority: n.priority - 1, + fullPath: n.fullPath, + } + + n.children = []*node{&child} + // []byte for proper unicode char conversion, see #65 + n.indices = bytesconv.BytesToString([]byte{n.path[i]}) + n.path = path[:i] + n.handlers = nil + n.wildChild = false + n.fullPath = fullPath[:parentFullPathIndex+i] + } + + // Make new node a child of this node + if i < len(path) { + path = path[i:] + c := path[0] + + // '/' after param + if n.nType == param && c == '/' && len(n.children) == 1 { + parentFullPathIndex += len(n.path) + n = n.children[0] + n.priority++ + continue walk + } + + // Check if a child with the next path byte exists + for i, max := 0, len(n.indices); i < max; i++ { + if c == n.indices[i] { + parentFullPathIndex += len(n.path) + i = n.incrementChildPrio(i) + n = n.children[i] + continue walk + } + } + + // Otherwise insert it + if c != ':' && c != '*' && n.nType != catchAll { + // []byte for proper unicode char conversion, see #65 + n.indices += bytesconv.BytesToString([]byte{c}) + child := &node{ + fullPath: fullPath, + } + n.addChild(child) + n.incrementChildPrio(len(n.indices) - 1) + n = child + } else if n.wildChild { + // inserting a wildcard node, need to check if it conflicts with the existing wildcard + n = n.children[len(n.children)-1] + n.priority++ + + // Check if the wildcard matches + if len(path) >= len(n.path) && n.path == path[:len(n.path)] && + // Adding a child to a catchAll is not possible + n.nType != catchAll && + // Check for longer wildcard, e.g. :name and :names + (len(n.path) >= len(path) || path[len(n.path)] == '/') { + continue walk + } + + // Wildcard conflict + pathSeg := path + if n.nType != catchAll { + pathSeg = strings.SplitN(pathSeg, "/", 2)[0] + } + prefix := fullPath[:strings.Index(fullPath, pathSeg)] + n.path + panic("'" + pathSeg + + "' in new path '" + fullPath + + "' conflicts with existing wildcard '" + n.path + + "' in existing prefix '" + prefix + + "'") + } + + n.insertChild(path, fullPath, handlers) + return + } + + // Otherwise add handle to current node + if n.handlers != nil { + panic("handlers are already registered for path '" + fullPath + "'") + } + n.handlers = handlers + n.fullPath = fullPath + return + } +} + +// Search for a wildcard segment and check the name for invalid characters. +// Returns -1 as index, if no wildcard was found. +func findWildcard(path string) (wildcard string, i int, valid bool) { + // Find start + for start, c := range []byte(path) { + // A wildcard starts with ':' (param) or '*' (catch-all) + if c != ':' && c != '*' { + continue + } + + // Find end and check for invalid characters + valid = true + for end, c := range []byte(path[start+1:]) { + switch c { + case '/': + return path[start : start+1+end], start, valid + case ':', '*': + valid = false + } + } + return path[start:], start, valid + } + return "", -1, false +} + +func (n *node) insertChild(path string, fullPath string, handlers HandlersChain) { + for { + // Find prefix until first wildcard + wildcard, i, valid := findWildcard(path) + if i < 0 { // No wildcard found + break + } + + // The wildcard name must not contain ':' and '*' + if !valid { + panic("only one wildcard per path segment is allowed, has: '" + + wildcard + "' in path '" + fullPath + "'") + } + + // check if the wildcard has a name + if len(wildcard) < 2 { + panic("wildcards must be named with a non-empty name in path '" + fullPath + "'") + } + + if wildcard[0] == ':' { // param + if i > 0 { + // Insert prefix before the current wildcard + n.path = path[:i] + path = path[i:] + } + + child := &node{ + nType: param, + path: wildcard, + fullPath: fullPath, + } + n.addChild(child) + n.wildChild = true + n = child + n.priority++ + + // if the path doesn't end with the wildcard, then there + // will be another non-wildcard subpath starting with '/' + if len(wildcard) < len(path) { + path = path[len(wildcard):] + + child := &node{ + priority: 1, + fullPath: fullPath, + } + n.addChild(child) + n = child + continue + } + + // Otherwise we're done. Insert the handle in the new leaf + n.handlers = handlers + return + } + + // catchAll + if i+len(wildcard) != len(path) { + panic("catch-all routes are only allowed at the end of the path in path '" + fullPath + "'") + } + + if len(n.path) > 0 && n.path[len(n.path)-1] == '/' { + panic("catch-all conflicts with existing handle for the path segment root in path '" + fullPath + "'") + } + + // currently fixed width 1 for '/' + i-- + if path[i] != '/' { + panic("no / before catch-all in path '" + fullPath + "'") + } + + n.path = path[:i] + + // First node: catchAll node with empty path + child := &node{ + wildChild: true, + nType: catchAll, + fullPath: fullPath, + } + + n.addChild(child) + n.indices = string('/') + n = child + n.priority++ + + // second node: node holding the variable + child = &node{ + path: path[i:], + nType: catchAll, + handlers: handlers, + priority: 1, + fullPath: fullPath, + } + n.children = []*node{child} + + return + } + + // If no wildcard was found, simply insert the path and handle + n.path = path + n.handlers = handlers + n.fullPath = fullPath +} + +// nodeValue holds return values of (*Node).getValue method +type nodeValue struct { + handlers HandlersChain + params *Params + tsr bool + fullPath string +} + +// Returns the handle registered with the given path (key). The values of +// wildcards are saved to a map. +// If no handle can be found, a TSR (trailing slash redirect) recommendation is +// made if a handle exists with an extra (without the) trailing slash for the +// given path. +func (n *node) getValue(path string, params *Params, unescape bool) (value nodeValue) { +walk: // Outer loop for walking the tree + for { + prefix := n.path + if len(path) > len(prefix) { + if path[:len(prefix)] == prefix { + path = path[len(prefix):] + + // Try all the non-wildcard children first by matching the indices + idxc := path[0] + for i, c := range []byte(n.indices) { + if c == idxc { + n = n.children[i] + continue walk + } + } + + // If there is no wildcard pattern, recommend a redirection + if !n.wildChild { + // Nothing found. + // We can recommend to redirect to the same URL without a + // trailing slash if a leaf exists for that path. + value.tsr = (path == "/" && n.handlers != nil) + return + } + + // Handle wildcard child, which is always at the end of the array + n = n.children[len(n.children)-1] + + switch n.nType { + case param: + // Find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // Save param value + if params != nil { + if value.params == nil { + value.params = params + } + // Expand slice within preallocated capacity + i := len(*value.params) + *value.params = (*value.params)[:i+1] + val := path[:end] + if unescape { + if v, err := url.QueryUnescape(val); err == nil { + val = v + } + } + (*value.params)[i] = Param{ + Key: n.path[1:], + Value: val, + } + } + + // we need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + path = path[end:] + n = n.children[0] + continue walk + } + + // ... but we can't + value.tsr = (len(path) == end+1) + return + } + + if value.handlers = n.handlers; value.handlers != nil { + value.fullPath = n.fullPath + return + } + if len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists for TSR recommendation + n = n.children[0] + value.tsr = (n.path == "/" && n.handlers != nil) + } + return + + case catchAll: + // Save param value + if params != nil { + if value.params == nil { + value.params = params + } + // Expand slice within preallocated capacity + i := len(*value.params) + *value.params = (*value.params)[:i+1] + val := path + if unescape { + if v, err := url.QueryUnescape(path); err == nil { + val = v + } + } + (*value.params)[i] = Param{ + Key: n.path[2:], + Value: val, + } + } + + value.handlers = n.handlers + value.fullPath = n.fullPath + return + + default: + panic("invalid node type") + } + } + } + + if path == prefix { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if value.handlers = n.handlers; value.handlers != nil { + value.fullPath = n.fullPath + return + } + + // If there is no handle for this route, but this route has a + // wildcard child, there must be a handle for this path with an + // additional trailing slash + if path == "/" && n.wildChild && n.nType != root { + value.tsr = true + return + } + + // No handle found. Check if a handle for this path + a + // trailing slash exists for trailing slash recommendation + for i, c := range []byte(n.indices) { + if c == '/' { + n = n.children[i] + value.tsr = (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) + return + } + } + + return + } + + // Nothing found. We can recommend to redirect to the same URL with an + // extra trailing slash if a leaf exists for that path + value.tsr = (path == "/") || + (len(prefix) == len(path)+1 && prefix[len(path)] == '/' && + path == prefix[:len(prefix)-1] && n.handlers != nil) + return + } +} + +// Makes a case-insensitive lookup of the given path and tries to find a handler. +// It can optionally also fix trailing slashes. +// It returns the case-corrected path and a bool indicating whether the lookup +// was successful. +func (n *node) findCaseInsensitivePath(path string, fixTrailingSlash bool) ([]byte, bool) { + const stackBufSize = 128 + + // Use a static sized buffer on the stack in the common case. + // If the path is too long, allocate a buffer on the heap instead. + buf := make([]byte, 0, stackBufSize) + if length := len(path) + 1; length > stackBufSize { + buf = make([]byte, 0, length) + } + + ciPath := n.findCaseInsensitivePathRec( + path, + buf, // Preallocate enough memory for new path + [4]byte{}, // Empty rune buffer + fixTrailingSlash, + ) + + return ciPath, ciPath != nil +} + +// Shift bytes in array by n bytes left +func shiftNRuneBytes(rb [4]byte, n int) [4]byte { + switch n { + case 0: + return rb + case 1: + return [4]byte{rb[1], rb[2], rb[3], 0} + case 2: + return [4]byte{rb[2], rb[3]} + case 3: + return [4]byte{rb[3]} + default: + return [4]byte{} + } +} + +// Recursive case-insensitive lookup function used by n.findCaseInsensitivePath +func (n *node) findCaseInsensitivePathRec(path string, ciPath []byte, rb [4]byte, fixTrailingSlash bool) []byte { + npLen := len(n.path) + +walk: // Outer loop for walking the tree + for len(path) >= npLen && (npLen == 0 || strings.EqualFold(path[1:npLen], n.path[1:])) { + // Add common prefix to result + oldPath := path + path = path[npLen:] + ciPath = append(ciPath, n.path...) + + if len(path) == 0 { + // We should have reached the node containing the handle. + // Check if this node has a handle registered. + if n.handlers != nil { + return ciPath + } + + // No handle found. + // Try to fix the path by adding a trailing slash + if fixTrailingSlash { + for i, c := range []byte(n.indices) { + if c == '/' { + n = n.children[i] + if (len(n.path) == 1 && n.handlers != nil) || + (n.nType == catchAll && n.children[0].handlers != nil) { + return append(ciPath, '/') + } + return nil + } + } + } + return nil + } + + // If this node does not have a wildcard (param or catchAll) child, + // we can just look up the next child node and continue to walk down + // the tree + if !n.wildChild { + // Skip rune bytes already processed + rb = shiftNRuneBytes(rb, npLen) + + if rb[0] != 0 { + // Old rune not finished + idxc := rb[0] + for i, c := range []byte(n.indices) { + if c == idxc { + // continue with child node + n = n.children[i] + npLen = len(n.path) + continue walk + } + } + } else { + // Process a new rune + var rv rune + + // Find rune start. + // Runes are up to 4 byte long, + // -4 would definitely be another rune. + var off int + for max := min(npLen, 3); off < max; off++ { + if i := npLen - off; utf8.RuneStart(oldPath[i]) { + // read rune from cached path + rv, _ = utf8.DecodeRuneInString(oldPath[i:]) + break + } + } + + // Calculate lowercase bytes of current rune + lo := unicode.ToLower(rv) + utf8.EncodeRune(rb[:], lo) + + // Skip already processed bytes + rb = shiftNRuneBytes(rb, off) + + idxc := rb[0] + for i, c := range []byte(n.indices) { + // Lowercase matches + if c == idxc { + // must use a recursive approach since both the + // uppercase byte and the lowercase byte might exist + // as an index + if out := n.children[i].findCaseInsensitivePathRec( + path, ciPath, rb, fixTrailingSlash, + ); out != nil { + return out + } + break + } + } + + // If we found no match, the same for the uppercase rune, + // if it differs + if up := unicode.ToUpper(rv); up != lo { + utf8.EncodeRune(rb[:], up) + rb = shiftNRuneBytes(rb, off) + + idxc := rb[0] + for i, c := range []byte(n.indices) { + // Uppercase matches + if c == idxc { + // Continue with child node + n = n.children[i] + npLen = len(n.path) + continue walk + } + } + } + } + + // Nothing found. We can recommend to redirect to the same URL + // without a trailing slash if a leaf exists for that path + if fixTrailingSlash && path == "/" && n.handlers != nil { + return ciPath + } + return nil + } + + n = n.children[0] + switch n.nType { + case param: + // Find param end (either '/' or path end) + end := 0 + for end < len(path) && path[end] != '/' { + end++ + } + + // Add param value to case insensitive path + ciPath = append(ciPath, path[:end]...) + + // We need to go deeper! + if end < len(path) { + if len(n.children) > 0 { + // Continue with child node + n = n.children[0] + npLen = len(n.path) + path = path[end:] + continue + } + + // ... but we can't + if fixTrailingSlash && len(path) == end+1 { + return ciPath + } + return nil + } + + if n.handlers != nil { + return ciPath + } + + if fixTrailingSlash && len(n.children) == 1 { + // No handle found. Check if a handle for this path + a + // trailing slash exists + n = n.children[0] + if n.path == "/" && n.handlers != nil { + return append(ciPath, '/') + } + } + + return nil + + case catchAll: + return append(ciPath, path...) + + default: + panic("invalid node type") + } + } + + // Nothing found. + // Try to fix the path by adding / removing a trailing slash + if fixTrailingSlash { + if path == "/" { + return ciPath + } + if len(path)+1 == npLen && n.path[len(path)] == '/' && + strings.EqualFold(path[1:], n.path[1:len(path)]) && n.handlers != nil { + return append(ciPath, n.path...) + } + } + return nil +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/utils.go b/terraform-server/vendor/github.com/gin-gonic/gin/utils.go new file mode 100644 index 00000000..c32f0eeb --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/utils.go @@ -0,0 +1,153 @@ +// Copyright 2014 Manu Martinez-Almeida. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +import ( + "encoding/xml" + "net/http" + "os" + "path" + "reflect" + "runtime" + "strings" +) + +// BindKey indicates a default bind key. +const BindKey = "_gin-gonic/gin/bindkey" + +// Bind is a helper function for given interface object and returns a Gin middleware. +func Bind(val interface{}) HandlerFunc { + value := reflect.ValueOf(val) + if value.Kind() == reflect.Ptr { + panic(`Bind struct can not be a pointer. Example: + Use: gin.Bind(Struct{}) instead of gin.Bind(&Struct{}) +`) + } + typ := value.Type() + + return func(c *Context) { + obj := reflect.New(typ).Interface() + if c.Bind(obj) == nil { + c.Set(BindKey, obj) + } + } +} + +// WrapF is a helper function for wrapping http.HandlerFunc and returns a Gin middleware. +func WrapF(f http.HandlerFunc) HandlerFunc { + return func(c *Context) { + f(c.Writer, c.Request) + } +} + +// WrapH is a helper function for wrapping http.Handler and returns a Gin middleware. +func WrapH(h http.Handler) HandlerFunc { + return func(c *Context) { + h.ServeHTTP(c.Writer, c.Request) + } +} + +// H is a shortcut for map[string]interface{} +type H map[string]interface{} + +// MarshalXML allows type H to be used with xml.Marshal. +func (h H) MarshalXML(e *xml.Encoder, start xml.StartElement) error { + start.Name = xml.Name{ + Space: "", + Local: "map", + } + if err := e.EncodeToken(start); err != nil { + return err + } + for key, value := range h { + elem := xml.StartElement{ + Name: xml.Name{Space: "", Local: key}, + Attr: []xml.Attr{}, + } + if err := e.EncodeElement(value, elem); err != nil { + return err + } + } + + return e.EncodeToken(xml.EndElement{Name: start.Name}) +} + +func assert1(guard bool, text string) { + if !guard { + panic(text) + } +} + +func filterFlags(content string) string { + for i, char := range content { + if char == ' ' || char == ';' { + return content[:i] + } + } + return content +} + +func chooseData(custom, wildcard interface{}) interface{} { + if custom != nil { + return custom + } + if wildcard != nil { + return wildcard + } + panic("negotiation config is invalid") +} + +func parseAccept(acceptHeader string) []string { + parts := strings.Split(acceptHeader, ",") + out := make([]string, 0, len(parts)) + for _, part := range parts { + if i := strings.IndexByte(part, ';'); i > 0 { + part = part[:i] + } + if part = strings.TrimSpace(part); part != "" { + out = append(out, part) + } + } + return out +} + +func lastChar(str string) uint8 { + if str == "" { + panic("The length of the string can't be 0") + } + return str[len(str)-1] +} + +func nameOfFunction(f interface{}) string { + return runtime.FuncForPC(reflect.ValueOf(f).Pointer()).Name() +} + +func joinPaths(absolutePath, relativePath string) string { + if relativePath == "" { + return absolutePath + } + + finalPath := path.Join(absolutePath, relativePath) + if lastChar(relativePath) == '/' && lastChar(finalPath) != '/' { + return finalPath + "/" + } + return finalPath +} + +func resolveAddress(addr []string) string { + switch len(addr) { + case 0: + if port := os.Getenv("PORT"); port != "" { + debugPrint("Environment variable PORT=\"%s\"", port) + return ":" + port + } + debugPrint("Environment variable PORT is undefined. Using port :8080 by default") + return ":8080" + case 1: + return addr[0] + default: + panic("too many parameters") + } +} diff --git a/terraform-server/vendor/github.com/gin-gonic/gin/version.go b/terraform-server/vendor/github.com/gin-gonic/gin/version.go new file mode 100644 index 00000000..3647461b --- /dev/null +++ b/terraform-server/vendor/github.com/gin-gonic/gin/version.go @@ -0,0 +1,8 @@ +// Copyright 2018 Gin Core Team. All rights reserved. +// Use of this source code is governed by a MIT style +// license that can be found in the LICENSE file. + +package gin + +// Version is the current gin framework's version. +const Version = "v1.7.1" diff --git a/terraform-server/vendor/github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre/Makefile b/terraform-server/vendor/github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre/Makefile new file mode 100644 index 00000000..da5a4dbe --- /dev/null +++ b/terraform-server/vendor/github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre/Makefile @@ -0,0 +1,12 @@ +include $(GOROOT)/src/Make.inc + +TARG=pcre + +CGOFILES=\ + pcre.go + +include $(GOROOT)/src/Make.pkg + +.PHONY: install-debian +install-debian: + install -D _obj/$(TARG).a $(DESTDIR)/$(pkgdir)/$(TARG).a diff --git a/terraform-server/vendor/github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre/pcre.go b/terraform-server/vendor/github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre/pcre.go new file mode 100644 index 00000000..1b357df2 --- /dev/null +++ b/terraform-server/vendor/github.com/glenn-brown/golang-pkg-pcre/src/pkg/pcre/pcre.go @@ -0,0 +1,406 @@ +// Copyright (c) 2011 Florian Weimer. All rights reserved. +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// +// * Redistributions in binary form must reproduce the above copyright +// notice, this list of conditions and the following disclaimer in the +// documentation and/or other materials provided with the distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// This package provides access to the Perl Compatible Regular +// Expresion library, PCRE. +// +// It implements two main types, Regexp and Matcher. Regexp objects +// store a compiled regular expression. They are immutable. +// Compilation of regular expressions using Compile or MustCompile is +// slightly expensive, so these objects should be kept and reused, +// instead of compiling them from scratch for each matching attempt. +// +// Matcher objects keeps the results of a match against a []byte or +// string subject. The Group and GroupString functions provide access +// to capture groups; both versions work no matter if the subject was a +// []byte or string, but the version with the matching type is slightly +// more efficient. +// +// Matcher objects contain some temporary space and refer the original +// subject. They are mutable and can be reused (using Match, +// MatchString, Reset or ResetString). +// +// For details on the regular expression language implemented by this +// package and the flags defined below, see the PCRE documentation. +package pcre + +/* +#cgo LDFLAGS: -lpcre +#cgo CFLAGS: -I/opt/local/include +#include +#include +*/ +import "C" + +import ( + "strconv" + "unsafe" +) + +// Flags for Compile and Match functions. +const ( + ANCHORED = C.PCRE_ANCHORED + BSR_ANYCRLF = C.PCRE_BSR_ANYCRLF + BSR_UNICODE = C.PCRE_BSR_UNICODE + NEWLINE_ANY = C.PCRE_NEWLINE_ANY + NEWLINE_ANYCRLF = C.PCRE_NEWLINE_ANYCRLF + NEWLINE_CR = C.PCRE_NEWLINE_CR + NEWLINE_CRLF = C.PCRE_NEWLINE_CRLF + NEWLINE_LF = C.PCRE_NEWLINE_LF + NO_UTF8_CHECK = C.PCRE_NO_UTF8_CHECK +) + +// Flags for Compile functions +const ( + CASELESS = C.PCRE_CASELESS + DOLLAR_ENDONLY = C.PCRE_DOLLAR_ENDONLY + DOTALL = C.PCRE_DOTALL + DUPNAMES = C.PCRE_DUPNAMES + EXTENDED = C.PCRE_EXTENDED + EXTRA = C.PCRE_EXTRA + FIRSTLINE = C.PCRE_FIRSTLINE + JAVASCRIPT_COMPAT = C.PCRE_JAVASCRIPT_COMPAT + MULTILINE = C.PCRE_MULTILINE + NO_AUTO_CAPTURE = C.PCRE_NO_AUTO_CAPTURE + UNGREEDY = C.PCRE_UNGREEDY + UTF8 = C.PCRE_UTF8 +) + +// Flags for Match functions +const ( + NOTBOL = C.PCRE_NOTBOL + NOTEOL = C.PCRE_NOTEOL + NOTEMPTY = C.PCRE_NOTEMPTY + NOTEMPTY_ATSTART = C.PCRE_NOTEMPTY_ATSTART + NO_START_OPTIMIZE = C.PCRE_NO_START_OPTIMIZE + PARTIAL_HARD = C.PCRE_PARTIAL_HARD + PARTIAL_SOFT = C.PCRE_PARTIAL_SOFT +) + +// A reference to a compiled regular expression. +// Use Compile or MustCompile to create such objects. +type Regexp struct { + ptr []byte +} + +// Number of bytes in the compiled pattern +func pcresize(ptr *C.pcre) (size C.size_t) { + C.pcre_fullinfo(ptr, nil, C.PCRE_INFO_SIZE, unsafe.Pointer(&size)) + return +} + +// Number of capture groups +func pcregroups(ptr *C.pcre) (count C.int) { + C.pcre_fullinfo(ptr, nil, + C.PCRE_INFO_CAPTURECOUNT, unsafe.Pointer(&count)) + return +} + +// Move pattern to the Go heap so that we do not have to use a +// finalizer. PCRE patterns are fully relocatable. (We do not use +// custom character tables.) +func toheap(ptr *C.pcre) (re Regexp) { + defer C.free(unsafe.Pointer(ptr)) + size := pcresize(ptr) + re.ptr = make([]byte, size) + C.memcpy(unsafe.Pointer(&re.ptr[0]), unsafe.Pointer(ptr), size) + return +} + +// Try to compile the pattern. If an error occurs, the second return +// value is non-nil. +func Compile(pattern string, flags int) (Regexp, *CompileError) { + pattern1 := C.CString(pattern) + defer C.free(unsafe.Pointer(pattern1)) + if clen := int(C.strlen(pattern1)); clen != len(pattern) { + return Regexp{}, &CompileError{ + Pattern: pattern, + Message: "NUL byte in pattern", + Offset: clen, + } + } + var errptr *C.char + var erroffset C.int + ptr := C.pcre_compile(pattern1, C.int(flags), &errptr, &erroffset, nil) + if ptr == nil { + return Regexp{}, &CompileError{ + Pattern: pattern, + Message: C.GoString(errptr), + Offset: int(erroffset), + } + } + return toheap(ptr), nil +} + +// Compile the pattern. If compilation fails, panic. +func MustCompile(pattern string, flags int) (re Regexp) { + re, err := Compile(pattern, flags) + if err != nil { + panic(err) + } + return +} + +// Returns the number of capture groups in the compiled pattern. +func (re Regexp) Groups() int { + if re.ptr == nil { + panic("Regexp.Groups: uninitialized") + } + return int(pcregroups((*C.pcre)(unsafe.Pointer(&re.ptr[0])))) +} + +// Matcher objects provide a place for storing match results. +// They can be created by the Matcher and MatcherString functions, +// or they can be initialized with Reset or ResetString. +type Matcher struct { + re Regexp + groups int + ovector []C.int // scratch space for capture offsets + matches bool // last match was successful + subjects string // one of these fields is set to record the subject, + subjectb []byte // so that Group/GroupString can return slices +} + +// Returns a new matcher object, with the byte array slice as a +// subject. +func (re Regexp) Matcher(subject []byte, flags int) (m *Matcher) { + m = new(Matcher) + m.Reset(re, subject, flags) + return +} + +// Returns a new matcher object, with the specified subject string. +func (re Regexp) MatcherString(subject string, flags int) (m *Matcher) { + m = new(Matcher) + m.ResetString(re, subject, flags) + return +} + +// Switches the matcher object to the specified pattern and subject. +func (m *Matcher) Reset(re Regexp, subject []byte, flags int) { + if re.ptr == nil { + panic("Regexp.Matcher: uninitialized") + } + m.init(re) + m.Match(subject, flags) +} + +// Switches the matcher object to the specified pattern and subject +// string. +func (m *Matcher) ResetString(re Regexp, subject string, flags int) { + if re.ptr == nil { + panic("Regexp.Matcher: uninitialized") + } + m.init(re) + m.MatchString(subject, flags) +} + +func (m *Matcher) init(re Regexp) { + m.matches = false + if m.re.ptr != nil && &m.re.ptr[0] == &re.ptr[0] { + // Skip group count extraction if the matcher has + // already been initialized with the same regular + // expression. + return + } + m.re = re + m.groups = re.Groups() + if ovectorlen := 3 * (1 + m.groups); len(m.ovector) < ovectorlen { + m.ovector = make([]C.int, ovectorlen) + } +} + +var nullbyte = []byte{0} + +// Tries to match the speficied byte array slice to the current +// pattern. Returns true if the match succeeds. +func (m *Matcher) Match(subject []byte, flags int) bool { + if m.re.ptr == nil { + panic("Matcher.Match: uninitialized") + } + length := len(subject) + m.subjects = "" + m.subjectb = subject + if length == 0 { + subject = nullbyte // make first character adressable + } + subjectptr := (*C.char)(unsafe.Pointer(&subject[0])) + return m.match(subjectptr, length, flags) +} + +// Tries to match the speficied subject string to the current pattern. +// Returns true if the match succeeds. +func (m *Matcher) MatchString(subject string, flags int) bool { + if m.re.ptr == nil { + panic("Matcher.Match: uninitialized") + } + length := len(subject) + m.subjects = subject + m.subjectb = nil + if length == 0 { + subject = "\000" // make first character addressable + } + // The following is a non-portable kludge to avoid a copy + subjectptr := *(**C.char)(unsafe.Pointer(&subject)) + return m.match(subjectptr, length, flags) +} + +func (m *Matcher) match(subjectptr *C.char, length, flags int) bool { + rc := C.pcre_exec((*C.pcre)(unsafe.Pointer(&m.re.ptr[0])), nil, + subjectptr, C.int(length), + 0, C.int(flags), &m.ovector[0], C.int(len(m.ovector))) + switch { + case rc >= 0: + m.matches = true + return true + case rc == C.PCRE_ERROR_NOMATCH: + m.matches = false + return false + case rc == C.PCRE_ERROR_BADOPTION: + panic("PCRE.Match: invalid option flag") + } + panic("unexepected return code from pcre_exec: " + + strconv.Itoa(int(rc))) +} + +// Returns true if a previous call to Matcher, MatcherString, Reset, +// ResetString, Match or MatchString succeeded. +func (m *Matcher) Matches() bool { + return m.matches +} + +// Returns the number of groups in the current pattern. +func (m *Matcher) Groups() int { + return m.groups +} + +// Returns true if the numbered capture group is present in the last +// match (performed by Matcher, MatcherString, Reset, ResetString, +// Match, or MatchString). Group numbers start at 1. A capture group +// can be present and match the empty string. +func (m *Matcher) Present(group int) bool { + return m.ovector[2 * group] >= 0 +} + +// Returns the numbered capture group of the last match (performed by +// Matcher, MatcherString, Reset, ResetString, Match, or MatchString). +// Group 0 is the part of the subject which matches the whole pattern; +// the first actual capture group is numbered 1. Capture groups which +// are not present return a nil slice. +func (m *Matcher) Group(group int) []byte { + start := m.ovector[2 * group] + end := m.ovector[2 * group + 1] + if start >= 0 { + if m.subjectb != nil { + return m.subjectb[start:end] + } + return []byte(m.subjects[start:end]) + } + return nil +} + +// Returns the numbered capture group as a string. Group 0 is the +// part of the subject which matches the whole pattern; the first +// actual capture group is numbered 1. Capture groups which are not +// present return an empty string. +func (m *Matcher) GroupString(group int) string { + start := m.ovector[2 * group] + end := m.ovector[2 * group + 1] + if start >= 0 { + if m.subjectb != nil { + return string(m.subjectb[start:end]) + } + return m.subjects[start:end] + } + return "" +} + +func (m *Matcher) name2index(name string) (group int) { + if m.re.ptr == nil { + panic("Matcher.Named: uninitialized") + } + name1 := C.CString(name) + defer C.free(unsafe.Pointer(name1)) + group = int(C.pcre_get_stringnumber( + (*C.pcre)(unsafe.Pointer(&m.re.ptr[0])), name1)) + if group < 0 { + panic("Matcher.Named: unknown name: " + name) + } + return +} + +// Returns the value of the named capture group. This is a nil slice +// if the capture group is not present. Panics if the name does not +// refer to a group. +func (m *Matcher) Named(group string) []byte { + return m.Group(m.name2index(group)) +} + +// Returns the value of the named capture group, or an empty string if +// the capture group is not present. Panics if the name does not +// refer to a group. +func (m *Matcher) NamedString(group string) string { + return m.GroupString(m.name2index(group)) +} + +// Returns true if the named capture group is present. Panics if the +// name does not refer to a group. +func (m *Matcher) NamedPresent(group string) bool { + return m.Present(m.name2index(group)) +} + +// Return the start and end of the first match, or nil if no match. +// loc[0] is the start and loc[1] is the end. +func (re *Regexp) FindIndex(bytes []byte, flags int) []int { + m := re.Matcher(bytes, flags) + if m.Match(bytes, flags) { + return []int{int(m.ovector[0]), int(m.ovector[1])} + } + return nil +} + +// Return a copy of a byte slice with pattern matches replaced by repl. +func (re Regexp) ReplaceAll(bytes, repl []byte, flags int) []byte { + m := re.Matcher(bytes, 0) + r := []byte{} + for m.Match(bytes, flags) { + r = append (append (r, bytes[:m.ovector[0]]...), repl...) + bytes = bytes[m.ovector[1]:] + } + return append (r, bytes...) +} + +// A compilation error, as returned by the Compile function. The +// offset is the byte position in the pattern string at which the +// error was detected. +type CompileError struct { + Pattern string + Message string + Offset int +} + +func (e *CompileError) String() string { + return e.Pattern + " (" + strconv.Itoa(e.Offset) + "): " + e.Message +} diff --git a/terraform-server/vendor/github.com/go-playground/locales/.gitignore b/terraform-server/vendor/github.com/go-playground/locales/.gitignore new file mode 100644 index 00000000..daf913b1 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/.gitignore @@ -0,0 +1,24 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof diff --git a/terraform-server/vendor/github.com/go-playground/locales/.travis.yml b/terraform-server/vendor/github.com/go-playground/locales/.travis.yml new file mode 100644 index 00000000..d50237a6 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/.travis.yml @@ -0,0 +1,26 @@ +language: go +go: + - 1.13.1 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/terraform-server/vendor/github.com/go-playground/locales/LICENSE b/terraform-server/vendor/github.com/go-playground/locales/LICENSE new file mode 100644 index 00000000..75854ac4 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. \ No newline at end of file diff --git a/terraform-server/vendor/github.com/go-playground/locales/README.md b/terraform-server/vendor/github.com/go-playground/locales/README.md new file mode 100644 index 00000000..ba1b0680 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/README.md @@ -0,0 +1,172 @@ +## locales +![Project status](https://img.shields.io/badge/version-0.13.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/locales.svg?branch=master)](https://travis-ci.org/go-playground/locales) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/locales)](https://goreportcard.com/report/github.com/go-playground/locales) +[![GoDoc](https://godoc.org/github.com/go-playground/locales?status.svg)](https://godoc.org/github.com/go-playground/locales) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/locales.svg)](https://gitter.im/go-playground/locales?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Locales is a set of locales generated from the [Unicode CLDR Project](http://cldr.unicode.org/) which can be used independently or within +an i18n package; these were built for use with, but not exclusive to, [Universal Translator](https://github.com/go-playground/universal-translator). + +Features +-------- +- [x] Rules generated from the latest [CLDR](http://cldr.unicode.org/index/downloads) data, v31.0.1 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) + +Full Tests +-------------------- +I could sure use your help adding tests for every locale, it is a huge undertaking and I just don't have the free time to do it all at the moment; +any help would be **greatly appreciated!!!!** please see [issue](https://github.com/go-playground/locales/issues/1) for details. + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/locales +``` + +NOTES +-------- +You'll notice most return types are []byte, this is because most of the time the results will be concatenated with a larger body +of text and can avoid some allocations if already appending to a byte array, otherwise just cast as string. + +Usage +------- +```go +package main + +import ( + "fmt" + "time" + + "github.com/go-playground/locales/currency" + "github.com/go-playground/locales/en_CA" +) + +func main() { + + loc, _ := time.LoadLocation("America/Toronto") + datetime := time.Date(2016, 02, 03, 9, 0, 1, 0, loc) + + l := en_CA.New() + + // Dates + fmt.Println(l.FmtDateFull(datetime)) + fmt.Println(l.FmtDateLong(datetime)) + fmt.Println(l.FmtDateMedium(datetime)) + fmt.Println(l.FmtDateShort(datetime)) + + // Times + fmt.Println(l.FmtTimeFull(datetime)) + fmt.Println(l.FmtTimeLong(datetime)) + fmt.Println(l.FmtTimeMedium(datetime)) + fmt.Println(l.FmtTimeShort(datetime)) + + // Months Wide + fmt.Println(l.MonthWide(time.January)) + fmt.Println(l.MonthWide(time.February)) + fmt.Println(l.MonthWide(time.March)) + // ... + + // Months Abbreviated + fmt.Println(l.MonthAbbreviated(time.January)) + fmt.Println(l.MonthAbbreviated(time.February)) + fmt.Println(l.MonthAbbreviated(time.March)) + // ... + + // Months Narrow + fmt.Println(l.MonthNarrow(time.January)) + fmt.Println(l.MonthNarrow(time.February)) + fmt.Println(l.MonthNarrow(time.March)) + // ... + + // Weekdays Wide + fmt.Println(l.WeekdayWide(time.Sunday)) + fmt.Println(l.WeekdayWide(time.Monday)) + fmt.Println(l.WeekdayWide(time.Tuesday)) + // ... + + // Weekdays Abbreviated + fmt.Println(l.WeekdayAbbreviated(time.Sunday)) + fmt.Println(l.WeekdayAbbreviated(time.Monday)) + fmt.Println(l.WeekdayAbbreviated(time.Tuesday)) + // ... + + // Weekdays Short + fmt.Println(l.WeekdayShort(time.Sunday)) + fmt.Println(l.WeekdayShort(time.Monday)) + fmt.Println(l.WeekdayShort(time.Tuesday)) + // ... + + // Weekdays Narrow + fmt.Println(l.WeekdayNarrow(time.Sunday)) + fmt.Println(l.WeekdayNarrow(time.Monday)) + fmt.Println(l.WeekdayNarrow(time.Tuesday)) + // ... + + var f64 float64 + + f64 = -10356.4523 + + // Number + fmt.Println(l.FmtNumber(f64, 2)) + + // Currency + fmt.Println(l.FmtCurrency(f64, 2, currency.CAD)) + fmt.Println(l.FmtCurrency(f64, 2, currency.USD)) + + // Accounting + fmt.Println(l.FmtAccounting(f64, 2, currency.CAD)) + fmt.Println(l.FmtAccounting(f64, 2, currency.USD)) + + f64 = 78.12 + + // Percent + fmt.Println(l.FmtPercent(f64, 0)) + + // Plural Rules for locale, so you know what rules you must cover + fmt.Println(l.PluralsCardinal()) + fmt.Println(l.PluralsOrdinal()) + + // Cardinal Plural Rules + fmt.Println(l.CardinalPluralRule(1, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 0)) + fmt.Println(l.CardinalPluralRule(1.0, 1)) + fmt.Println(l.CardinalPluralRule(3, 0)) + + // Ordinal Plural Rules + fmt.Println(l.OrdinalPluralRule(21, 0)) // 21st + fmt.Println(l.OrdinalPluralRule(22, 0)) // 22nd + fmt.Println(l.OrdinalPluralRule(33, 0)) // 33rd + fmt.Println(l.OrdinalPluralRule(34, 0)) // 34th + + // Range Plural Rules + fmt.Println(l.RangePluralRule(1, 0, 1, 0)) // 1-1 + fmt.Println(l.RangePluralRule(1, 0, 2, 0)) // 1-2 + fmt.Println(l.RangePluralRule(5, 0, 8, 0)) // 5-8 +} +``` + +NOTES: +------- +These rules were generated from the [Unicode CLDR Project](http://cldr.unicode.org/), if you encounter any issues +I strongly encourage contributing to the CLDR project to get the locale information corrected and the next time +these locales are regenerated the fix will come with. + +I do however realize that time constraints are often important and so there are two options: + +1. Create your own locale, copy, paste and modify, and ensure it complies with the `Translator` interface. +2. Add an exception in the locale generation code directly and once regenerated, fix will be in place. + +Please to not make fixes inside the locale files, they WILL get overwritten when the locales are regenerated. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/terraform-server/vendor/github.com/go-playground/locales/currency/currency.go b/terraform-server/vendor/github.com/go-playground/locales/currency/currency.go new file mode 100644 index 00000000..cdaba596 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/currency/currency.go @@ -0,0 +1,308 @@ +package currency + +// Type is the currency type associated with the locales currency enum +type Type int + +// locale currencies +const ( + ADP Type = iota + AED + AFA + AFN + ALK + ALL + AMD + ANG + AOA + AOK + AON + AOR + ARA + ARL + ARM + ARP + ARS + ATS + AUD + AWG + AZM + AZN + BAD + BAM + BAN + BBD + BDT + BEC + BEF + BEL + BGL + BGM + BGN + BGO + BHD + BIF + BMD + BND + BOB + BOL + BOP + BOV + BRB + BRC + BRE + BRL + BRN + BRR + BRZ + BSD + BTN + BUK + BWP + BYB + BYN + BYR + BZD + CAD + CDF + CHE + CHF + CHW + CLE + CLF + CLP + CNH + CNX + CNY + COP + COU + CRC + CSD + CSK + CUC + CUP + CVE + CYP + CZK + DDM + DEM + DJF + DKK + DOP + DZD + ECS + ECV + EEK + EGP + ERN + ESA + ESB + ESP + ETB + EUR + FIM + FJD + FKP + FRF + GBP + GEK + GEL + GHC + GHS + GIP + GMD + GNF + GNS + GQE + GRD + GTQ + GWE + GWP + GYD + HKD + HNL + HRD + HRK + HTG + HUF + IDR + IEP + ILP + ILR + ILS + INR + IQD + IRR + ISJ + ISK + ITL + JMD + JOD + JPY + KES + KGS + KHR + KMF + KPW + KRH + KRO + KRW + KWD + KYD + KZT + LAK + LBP + LKR + LRD + LSL + LTL + LTT + LUC + LUF + LUL + LVL + LVR + LYD + MAD + MAF + MCF + MDC + MDL + MGA + MGF + MKD + MKN + MLF + MMK + MNT + MOP + MRO + MTL + MTP + MUR + MVP + MVR + MWK + MXN + MXP + MXV + MYR + MZE + MZM + MZN + NAD + NGN + NIC + NIO + NLG + NOK + NPR + NZD + OMR + PAB + PEI + PEN + PES + PGK + PHP + PKR + PLN + PLZ + PTE + PYG + QAR + RHD + ROL + RON + RSD + RUB + RUR + RWF + SAR + SBD + SCR + SDD + SDG + SDP + SEK + SGD + SHP + SIT + SKK + SLL + SOS + SRD + SRG + SSP + STD + STN + SUR + SVC + SYP + SZL + THB + TJR + TJS + TMM + TMT + TND + TOP + TPE + TRL + TRY + TTD + TWD + TZS + UAH + UAK + UGS + UGX + USD + USN + USS + UYI + UYP + UYU + UZS + VEB + VEF + VND + VNN + VUV + WST + XAF + XAG + XAU + XBA + XBB + XBC + XBD + XCD + XDR + XEU + XFO + XFU + XOF + XPD + XPF + XPT + XRE + XSU + XTS + XUA + XXX + YDD + YER + YUD + YUM + YUN + YUR + ZAL + ZAR + ZMK + ZMW + ZRN + ZRZ + ZWD + ZWL + ZWR +) diff --git a/terraform-server/vendor/github.com/go-playground/locales/go.mod b/terraform-server/vendor/github.com/go-playground/locales/go.mod new file mode 100644 index 00000000..34ab6f23 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/go.mod @@ -0,0 +1,5 @@ +module github.com/go-playground/locales + +go 1.13 + +require golang.org/x/text v0.3.2 diff --git a/terraform-server/vendor/github.com/go-playground/locales/go.sum b/terraform-server/vendor/github.com/go-playground/locales/go.sum new file mode 100644 index 00000000..63c9200f --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/go.sum @@ -0,0 +1,3 @@ +golang.org/x/text v0.3.2 h1:tW2bmiBqwgJj/UpqtC8EpXEZVYOwU0yG4iWbprSVAcs= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/terraform-server/vendor/github.com/go-playground/locales/logo.png b/terraform-server/vendor/github.com/go-playground/locales/logo.png new file mode 100644 index 00000000..3038276e Binary files /dev/null and b/terraform-server/vendor/github.com/go-playground/locales/logo.png differ diff --git a/terraform-server/vendor/github.com/go-playground/locales/rules.go b/terraform-server/vendor/github.com/go-playground/locales/rules.go new file mode 100644 index 00000000..92029001 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/locales/rules.go @@ -0,0 +1,293 @@ +package locales + +import ( + "strconv" + "time" + + "github.com/go-playground/locales/currency" +) + +// // ErrBadNumberValue is returned when the number passed for +// // plural rule determination cannot be parsed +// type ErrBadNumberValue struct { +// NumberValue string +// InnerError error +// } + +// // Error returns ErrBadNumberValue error string +// func (e *ErrBadNumberValue) Error() string { +// return fmt.Sprintf("Invalid Number Value '%s' %s", e.NumberValue, e.InnerError) +// } + +// var _ error = new(ErrBadNumberValue) + +// PluralRule denotes the type of plural rules +type PluralRule int + +// PluralRule's +const ( + PluralRuleUnknown PluralRule = iota + PluralRuleZero // zero + PluralRuleOne // one - singular + PluralRuleTwo // two - dual + PluralRuleFew // few - paucal + PluralRuleMany // many - also used for fractions if they have a separate class + PluralRuleOther // other - required—general plural form—also used if the language only has a single form +) + +const ( + pluralsString = "UnknownZeroOneTwoFewManyOther" +) + +// Translator encapsulates an instance of a locale +// NOTE: some values are returned as a []byte just in case the caller +// wishes to add more and can help avoid allocations; otherwise just cast as string +type Translator interface { + + // The following Functions are for overriding, debugging or developing + // with a Translator Locale + + // Locale returns the string value of the translator + Locale() string + + // returns an array of cardinal plural rules associated + // with this translator + PluralsCardinal() []PluralRule + + // returns an array of ordinal plural rules associated + // with this translator + PluralsOrdinal() []PluralRule + + // returns an array of range plural rules associated + // with this translator + PluralsRange() []PluralRule + + // returns the cardinal PluralRule given 'num' and digits/precision of 'v' for locale + CardinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num' and digits/precision of 'v' for locale + OrdinalPluralRule(num float64, v uint64) PluralRule + + // returns the ordinal PluralRule given 'num1', 'num2' and digits/precision of 'v1' and 'v2' for locale + RangePluralRule(num1 float64, v1 uint64, num2 float64, v2 uint64) PluralRule + + // returns the locales abbreviated month given the 'month' provided + MonthAbbreviated(month time.Month) string + + // returns the locales abbreviated months + MonthsAbbreviated() []string + + // returns the locales narrow month given the 'month' provided + MonthNarrow(month time.Month) string + + // returns the locales narrow months + MonthsNarrow() []string + + // returns the locales wide month given the 'month' provided + MonthWide(month time.Month) string + + // returns the locales wide months + MonthsWide() []string + + // returns the locales abbreviated weekday given the 'weekday' provided + WeekdayAbbreviated(weekday time.Weekday) string + + // returns the locales abbreviated weekdays + WeekdaysAbbreviated() []string + + // returns the locales narrow weekday given the 'weekday' provided + WeekdayNarrow(weekday time.Weekday) string + + // WeekdaysNarrowreturns the locales narrow weekdays + WeekdaysNarrow() []string + + // returns the locales short weekday given the 'weekday' provided + WeekdayShort(weekday time.Weekday) string + + // returns the locales short weekdays + WeekdaysShort() []string + + // returns the locales wide weekday given the 'weekday' provided + WeekdayWide(weekday time.Weekday) string + + // returns the locales wide weekdays + WeekdaysWide() []string + + // The following Functions are common Formatting functionsfor the Translator's Locale + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + FmtNumber(num float64, v uint64) string + + // returns 'num' with digits/precision of 'v' for locale and handles both Whole and Real numbers based on 'v' + // NOTE: 'num' passed into FmtPercent is assumed to be in percent already + FmtPercent(num float64, v uint64) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + FmtCurrency(num float64, v uint64, currency currency.Type) string + + // returns the currency representation of 'num' with digits/precision of 'v' for locale + // in accounting notation. + FmtAccounting(num float64, v uint64, currency currency.Type) string + + // returns the short date representation of 't' for locale + FmtDateShort(t time.Time) string + + // returns the medium date representation of 't' for locale + FmtDateMedium(t time.Time) string + + // returns the long date representation of 't' for locale + FmtDateLong(t time.Time) string + + // returns the full date representation of 't' for locale + FmtDateFull(t time.Time) string + + // returns the short time representation of 't' for locale + FmtTimeShort(t time.Time) string + + // returns the medium time representation of 't' for locale + FmtTimeMedium(t time.Time) string + + // returns the long time representation of 't' for locale + FmtTimeLong(t time.Time) string + + // returns the full time representation of 't' for locale + FmtTimeFull(t time.Time) string +} + +// String returns the string value of PluralRule +func (p PluralRule) String() string { + + switch p { + case PluralRuleZero: + return pluralsString[7:11] + case PluralRuleOne: + return pluralsString[11:14] + case PluralRuleTwo: + return pluralsString[14:17] + case PluralRuleFew: + return pluralsString[17:20] + case PluralRuleMany: + return pluralsString[20:24] + case PluralRuleOther: + return pluralsString[24:] + default: + return pluralsString[:7] + } +} + +// +// Precision Notes: +// +// must specify a precision >= 0, and here is why https://play.golang.org/p/LyL90U0Vyh +// +// v := float64(3.141) +// i := float64(int64(v)) +// +// fmt.Println(v - i) +// +// or +// +// s := strconv.FormatFloat(v-i, 'f', -1, 64) +// fmt.Println(s) +// +// these will not print what you'd expect: 0.14100000000000001 +// and so this library requires a precision to be specified, or +// inaccurate plural rules could be applied. +// +// +// +// n - absolute value of the source number (integer and decimals). +// i - integer digits of n. +// v - number of visible fraction digits in n, with trailing zeros. +// w - number of visible fraction digits in n, without trailing zeros. +// f - visible fractional digits in n, with trailing zeros. +// t - visible fractional digits in n, without trailing zeros. +// +// +// Func(num float64, v uint64) // v = digits/precision and prevents -1 as a special case as this can lead to very unexpected behaviour, see precision note's above. +// +// n := math.Abs(num) +// i := int64(n) +// v := v +// +// +// w := strconv.FormatFloat(num-float64(i), 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// f := strconv.FormatFloat(n, 'f', int(v), 64) // then turn everything after decimal into an int64 +// t := strconv.FormatFloat(n, 'f', int(v), 64) // then parse backwards on string until no more zero's.... +// +// +// +// General Inclusion Rules +// - v will always be available inherently +// - all require n +// - w requires i +// + +// W returns the number of visible fraction digits in N, without trailing zeros. +func W(n float64, v uint64) (w int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then w will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + w = int64(len(s[:end])) + } + + return +} + +// F returns the visible fractional digits in N, with trailing zeros. +func F(n float64, v uint64) (f int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then f will be zero + // otherwise need to parse + if len(s) != 1 { + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + f, _ = strconv.ParseInt(s[2:], 10, 64) + } + + return +} + +// T returns the visible fractional digits in N, without trailing zeros. +func T(n float64, v uint64) (t int64) { + + s := strconv.FormatFloat(n-float64(int64(n)), 'f', int(v), 64) + + // with either be '0' or '0.xxxx', so if 1 then t will be zero + // otherwise need to parse + if len(s) != 1 { + + s = s[2:] + end := len(s) + 1 + + for i := end; i >= 0; i-- { + if s[i] != '0' { + end = i + 1 + break + } + } + + // ignoring error, because it can't fail as we generated + // the string internally from a real number + t, _ = strconv.ParseInt(s[:end], 10, 64) + } + + return +} diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/.gitignore b/terraform-server/vendor/github.com/go-playground/universal-translator/.gitignore new file mode 100644 index 00000000..bc4e07f3 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/.gitignore @@ -0,0 +1,25 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.coverprofile \ No newline at end of file diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/.travis.yml b/terraform-server/vendor/github.com/go-playground/universal-translator/.travis.yml new file mode 100644 index 00000000..39b8b923 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/.travis.yml @@ -0,0 +1,27 @@ +language: go +go: + - 1.13.4 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + [ $TRAVIS_GO_VERSION = 1.13.4 ] && + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN \ No newline at end of file diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/LICENSE b/terraform-server/vendor/github.com/go-playground/universal-translator/LICENSE new file mode 100644 index 00000000..8d8aba15 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 Go Playground + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/README.md b/terraform-server/vendor/github.com/go-playground/universal-translator/README.md new file mode 100644 index 00000000..071f33ab --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/README.md @@ -0,0 +1,89 @@ +## universal-translator +![Project status](https://img.shields.io/badge/version-0.17.0-green.svg) +[![Build Status](https://travis-ci.org/go-playground/universal-translator.svg?branch=master)](https://travis-ci.org/go-playground/universal-translator) +[![Coverage Status](https://coveralls.io/repos/github/go-playground/universal-translator/badge.svg)](https://coveralls.io/github/go-playground/universal-translator) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/universal-translator)](https://goreportcard.com/report/github.com/go-playground/universal-translator) +[![GoDoc](https://godoc.org/github.com/go-playground/universal-translator?status.svg)](https://godoc.org/github.com/go-playground/universal-translator) +![License](https://img.shields.io/dub/l/vibe-d.svg) +[![Gitter](https://badges.gitter.im/go-playground/universal-translator.svg)](https://gitter.im/go-playground/universal-translator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge) + +Universal Translator is an i18n Translator for Go/Golang using CLDR data + pluralization rules + +Why another i18n library? +-------------------------- +Because none of the plural rules seem to be correct out there, including the previous implementation of this package, +so I took it upon myself to create [locales](https://github.com/go-playground/locales) for everyone to use; this package +is a thin wrapper around [locales](https://github.com/go-playground/locales) in order to store and translate text for +use in your applications. + +Features +-------- +- [x] Rules generated from the [CLDR](http://cldr.unicode.org/index/downloads) data, v30.0.3 +- [x] Contains Cardinal, Ordinal and Range Plural Rules +- [x] Contains Month, Weekday and Timezone translations built in +- [x] Contains Date & Time formatting functions +- [x] Contains Number, Currency, Accounting and Percent formatting functions +- [x] Supports the "Gregorian" calendar only ( my time isn't unlimited, had to draw the line somewhere ) +- [x] Support loading translations from files +- [x] Exporting translations to file(s), mainly for getting them professionally translated +- [ ] Code Generation for translation files -> Go code.. i.e. after it has been professionally translated +- [ ] Tests for all languages, I need help with this, please see [here](https://github.com/go-playground/locales/issues/1) + +Installation +----------- + +Use go get + +```shell +go get github.com/go-playground/universal-translator +``` + +Usage & Documentation +------- + +Please see https://godoc.org/github.com/go-playground/universal-translator for usage docs + +##### Examples: + +- [Basic](https://github.com/go-playground/universal-translator/tree/master/_examples/basic) +- [Full - no files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-no-files) +- [Full - with files](https://github.com/go-playground/universal-translator/tree/master/_examples/full-with-files) + +File formatting +-------------- +All types, Plain substitution, Cardinal, Ordinal and Range translations can all be contained withing the same file(s); +they are only separated for easy viewing. + +##### Examples: + +- [Formats](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) + +##### Basic Makeup +NOTE: not all fields are needed for all translation types, see [examples](https://github.com/go-playground/universal-translator/tree/master/_examples/file-formats) +```json +{ + "locale": "en", + "key": "days-left", + "trans": "You have {0} day left.", + "type": "Cardinal", + "rule": "One", + "override": false +} +``` +|Field|Description| +|---|---| +|locale|The locale for which the translation is for.| +|key|The translation key that will be used to store and lookup each translation; normally it is a string or integer.| +|trans|The actual translation text.| +|type|The type of translation Cardinal, Ordinal, Range or "" for a plain substitution(not required to be defined if plain used)| +|rule|The plural rule for which the translation is for eg. One, Two, Few, Many or Other.(not required to be defined if plain used)| +|override|If you wish to override an existing translation that has already been registered, set this to 'true'. 99% of the time there is no need to define it.| + +Help With Tests +--------------- +To anyone interesting in helping or contributing, I sure could use some help creating tests for each language. +Please see issue [here](https://github.com/go-playground/locales/issues/1) for details. + +License +------ +Distributed under MIT License, please see license file in code for more details. diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/errors.go b/terraform-server/vendor/github.com/go-playground/universal-translator/errors.go new file mode 100644 index 00000000..38b163b6 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/errors.go @@ -0,0 +1,148 @@ +package ut + +import ( + "errors" + "fmt" + + "github.com/go-playground/locales" +) + +var ( + // ErrUnknowTranslation indicates the translation could not be found + ErrUnknowTranslation = errors.New("Unknown Translation") +) + +var _ error = new(ErrConflictingTranslation) +var _ error = new(ErrRangeTranslation) +var _ error = new(ErrOrdinalTranslation) +var _ error = new(ErrCardinalTranslation) +var _ error = new(ErrMissingPluralTranslation) +var _ error = new(ErrExistingTranslator) + +// ErrExistingTranslator is the error representing a conflicting translator +type ErrExistingTranslator struct { + locale string +} + +// Error returns ErrExistingTranslator's internal error text +func (e *ErrExistingTranslator) Error() string { + return fmt.Sprintf("error: conflicting translator for locale '%s'", e.locale) +} + +// ErrConflictingTranslation is the error representing a conflicting translation +type ErrConflictingTranslation struct { + locale string + key interface{} + rule locales.PluralRule + text string +} + +// Error returns ErrConflictingTranslation's internal error text +func (e *ErrConflictingTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: conflicting key '%#v' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) + } + + return fmt.Sprintf("error: conflicting key '%s' rule '%s' with text '%s' for locale '%s', value being ignored", e.key, e.rule, e.text, e.locale) +} + +// ErrRangeTranslation is the error representing a range translation error +type ErrRangeTranslation struct { + text string +} + +// Error returns ErrRangeTranslation's internal error text +func (e *ErrRangeTranslation) Error() string { + return e.text +} + +// ErrOrdinalTranslation is the error representing an ordinal translation error +type ErrOrdinalTranslation struct { + text string +} + +// Error returns ErrOrdinalTranslation's internal error text +func (e *ErrOrdinalTranslation) Error() string { + return e.text +} + +// ErrCardinalTranslation is the error representing a cardinal translation error +type ErrCardinalTranslation struct { + text string +} + +// Error returns ErrCardinalTranslation's internal error text +func (e *ErrCardinalTranslation) Error() string { + return e.text +} + +// ErrMissingPluralTranslation is the error signifying a missing translation given +// the locales plural rules. +type ErrMissingPluralTranslation struct { + locale string + key interface{} + rule locales.PluralRule + translationType string +} + +// Error returns ErrMissingPluralTranslation's internal error text +func (e *ErrMissingPluralTranslation) Error() string { + + if _, ok := e.key.(string); !ok { + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%#v' and locale '%s'", e.translationType, e.rule, e.key, e.locale) + } + + return fmt.Sprintf("error: missing '%s' plural rule '%s' for translation with key '%s' and locale '%s'", e.translationType, e.rule, e.key, e.locale) +} + +// ErrMissingBracket is the error representing a missing bracket in a translation +// eg. This is a {0 <-- missing ending '}' +type ErrMissingBracket struct { + locale string + key interface{} + text string +} + +// Error returns ErrMissingBracket error message +func (e *ErrMissingBracket) Error() string { + return fmt.Sprintf("error: missing bracket '{}', in translation. locale: '%s' key: '%v' text: '%s'", e.locale, e.key, e.text) +} + +// ErrBadParamSyntax is the error representing a bad parameter definition in a translation +// eg. This is a {must-be-int} +type ErrBadParamSyntax struct { + locale string + param string + key interface{} + text string +} + +// Error returns ErrBadParamSyntax error message +func (e *ErrBadParamSyntax) Error() string { + return fmt.Sprintf("error: bad parameter syntax, missing parameter '%s' in translation. locale: '%s' key: '%v' text: '%s'", e.param, e.locale, e.key, e.text) +} + +// import/export errors + +// ErrMissingLocale is the error representing an expected locale that could +// not be found aka locale not registered with the UniversalTranslator Instance +type ErrMissingLocale struct { + locale string +} + +// Error returns ErrMissingLocale's internal error text +func (e *ErrMissingLocale) Error() string { + return fmt.Sprintf("error: locale '%s' not registered.", e.locale) +} + +// ErrBadPluralDefinition is the error representing an incorrect plural definition +// usually found within translations defined within files during the import process. +type ErrBadPluralDefinition struct { + tl translation +} + +// Error returns ErrBadPluralDefinition's internal error text +func (e *ErrBadPluralDefinition) Error() string { + return fmt.Sprintf("error: bad plural definition '%#v'", e.tl) +} diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/go.mod b/terraform-server/vendor/github.com/go-playground/universal-translator/go.mod new file mode 100644 index 00000000..8079590f --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/go.mod @@ -0,0 +1,5 @@ +module github.com/go-playground/universal-translator + +go 1.13 + +require github.com/go-playground/locales v0.13.0 diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/go.sum b/terraform-server/vendor/github.com/go-playground/universal-translator/go.sum new file mode 100644 index 00000000..cbbf3241 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/go.sum @@ -0,0 +1,4 @@ +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/import_export.go b/terraform-server/vendor/github.com/go-playground/universal-translator/import_export.go new file mode 100644 index 00000000..7bd76f26 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/import_export.go @@ -0,0 +1,274 @@ +package ut + +import ( + "encoding/json" + "fmt" + "io/ioutil" + "os" + "path/filepath" + + "io" + + "github.com/go-playground/locales" +) + +type translation struct { + Locale string `json:"locale"` + Key interface{} `json:"key"` // either string or integer + Translation string `json:"trans"` + PluralType string `json:"type,omitempty"` + PluralRule string `json:"rule,omitempty"` + OverrideExisting bool `json:"override,omitempty"` +} + +const ( + cardinalType = "Cardinal" + ordinalType = "Ordinal" + rangeType = "Range" +) + +// ImportExportFormat is the format of the file import or export +type ImportExportFormat uint8 + +// supported Export Formats +const ( + FormatJSON ImportExportFormat = iota +) + +// Export writes the translations out to a file on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Export(format ImportExportFormat, dirname string) error { + + _, err := os.Stat(dirname) + fmt.Println(dirname, err, os.IsNotExist(err)) + if err != nil { + + if !os.IsNotExist(err) { + return err + } + + if err = os.MkdirAll(dirname, 0744); err != nil { + return err + } + } + + // build up translations + var trans []translation + var b []byte + var ext string + + for _, locale := range t.translators { + + for k, v := range locale.(*translator).translations { + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k, + Translation: v.text, + }) + } + + for k, pluralTrans := range locale.(*translator).cardinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: cardinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).ordinalTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: ordinalType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + for k, pluralTrans := range locale.(*translator).rangeTanslations { + + for i, plural := range pluralTrans { + + // leave enough for all plural rules + // but not all are set for all languages. + if plural == nil { + continue + } + + trans = append(trans, translation{ + Locale: locale.Locale(), + Key: k.(string), + Translation: plural.text, + PluralType: rangeType, + PluralRule: locales.PluralRule(i).String(), + }) + } + } + + switch format { + case FormatJSON: + b, err = json.MarshalIndent(trans, "", " ") + ext = ".json" + } + + if err != nil { + return err + } + + err = ioutil.WriteFile(filepath.Join(dirname, fmt.Sprintf("%s%s", locale.Locale(), ext)), b, 0644) + if err != nil { + return err + } + + trans = trans[0:0] + } + + return nil +} + +// Import reads the translations out of a file or directory on disk. +// +// NOTE: this currently only works with string or int translations keys. +func (t *UniversalTranslator) Import(format ImportExportFormat, dirnameOrFilename string) error { + + fi, err := os.Stat(dirnameOrFilename) + if err != nil { + return err + } + + processFn := func(filename string) error { + + f, err := os.Open(filename) + if err != nil { + return err + } + defer f.Close() + + return t.ImportByReader(format, f) + } + + if !fi.IsDir() { + return processFn(dirnameOrFilename) + } + + // recursively go through directory + walker := func(path string, info os.FileInfo, err error) error { + + if info.IsDir() { + return nil + } + + switch format { + case FormatJSON: + // skip non JSON files + if filepath.Ext(info.Name()) != ".json" { + return nil + } + } + + return processFn(path) + } + + return filepath.Walk(dirnameOrFilename, walker) +} + +// ImportByReader imports the the translations found within the contents read from the supplied reader. +// +// NOTE: generally used when assets have been embedded into the binary and are already in memory. +func (t *UniversalTranslator) ImportByReader(format ImportExportFormat, reader io.Reader) error { + + b, err := ioutil.ReadAll(reader) + if err != nil { + return err + } + + var trans []translation + + switch format { + case FormatJSON: + err = json.Unmarshal(b, &trans) + } + + if err != nil { + return err + } + + for _, tl := range trans { + + locale, found := t.FindTranslator(tl.Locale) + if !found { + return &ErrMissingLocale{locale: tl.Locale} + } + + pr := stringToPR(tl.PluralRule) + + if pr == locales.PluralRuleUnknown { + + err = locale.Add(tl.Key, tl.Translation, tl.OverrideExisting) + if err != nil { + return err + } + + continue + } + + switch tl.PluralType { + case cardinalType: + err = locale.AddCardinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case ordinalType: + err = locale.AddOrdinal(tl.Key, tl.Translation, pr, tl.OverrideExisting) + case rangeType: + err = locale.AddRange(tl.Key, tl.Translation, pr, tl.OverrideExisting) + default: + return &ErrBadPluralDefinition{tl: tl} + } + + if err != nil { + return err + } + } + + return nil +} + +func stringToPR(s string) locales.PluralRule { + + switch s { + case "One": + return locales.PluralRuleOne + case "Two": + return locales.PluralRuleTwo + case "Few": + return locales.PluralRuleFew + case "Many": + return locales.PluralRuleMany + case "Other": + return locales.PluralRuleOther + default: + return locales.PluralRuleUnknown + } + +} diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/logo.png b/terraform-server/vendor/github.com/go-playground/universal-translator/logo.png new file mode 100644 index 00000000..a37aa8c0 Binary files /dev/null and b/terraform-server/vendor/github.com/go-playground/universal-translator/logo.png differ diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/translator.go b/terraform-server/vendor/github.com/go-playground/universal-translator/translator.go new file mode 100644 index 00000000..cfafce8a --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/translator.go @@ -0,0 +1,420 @@ +package ut + +import ( + "fmt" + "strconv" + "strings" + + "github.com/go-playground/locales" +) + +const ( + paramZero = "{0}" + paramOne = "{1}" + unknownTranslation = "" +) + +// Translator is universal translators +// translator instance which is a thin wrapper +// around locales.Translator instance providing +// some extra functionality +type Translator interface { + locales.Translator + + // adds a normal translation for a particular language/locale + // {#} is the only replacement type accepted and are ad infinitum + // eg. one: '{0} day left' other: '{0} days left' + Add(key interface{}, text string, override bool) error + + // adds a cardinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0} day left' other: '{0} days left' + AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds an ordinal plural translation for a particular language/locale + // {0} is the only replacement type accepted and only one variable is accepted as + // multiple cannot be used for a plural rule determination, unless it is a range; + // see AddRange below. + // eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' + // - 1st, 2nd, 3rd... + AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error + + // adds a range plural translation for a particular language/locale + // {0} and {1} are the only replacement types accepted and only these are accepted. + // eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' + AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error + + // creates the translation for the locale given the 'key' and params passed in + T(key interface{}, params ...string) (string, error) + + // creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + C(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments + // and param passed in + O(key interface{}, num float64, digits uint64, param string) (string, error) + + // creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and + // 'digit2' arguments and 'param1' and 'param2' passed in + R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) + + // VerifyTranslations checks to ensures that no plural rules have been + // missed within the translations. + VerifyTranslations() error +} + +var _ Translator = new(translator) +var _ locales.Translator = new(translator) + +type translator struct { + locales.Translator + translations map[interface{}]*transText + cardinalTanslations map[interface{}][]*transText // array index is mapped to locales.PluralRule index + the locales.PluralRuleUnknown + ordinalTanslations map[interface{}][]*transText + rangeTanslations map[interface{}][]*transText +} + +type transText struct { + text string + indexes []int +} + +func newTranslator(trans locales.Translator) Translator { + return &translator{ + Translator: trans, + translations: make(map[interface{}]*transText), // translation text broken up by byte index + cardinalTanslations: make(map[interface{}][]*transText), + ordinalTanslations: make(map[interface{}][]*transText), + rangeTanslations: make(map[interface{}][]*transText), + } +} + +// Add adds a normal translation for a particular language/locale +// {#} is the only replacement type accepted and are ad infinitum +// eg. one: '{0} day left' other: '{0} days left' +func (t *translator) Add(key interface{}, text string, override bool) error { + + if _, ok := t.translations[key]; ok && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, text: text} + } + + lb := strings.Count(text, "{") + rb := strings.Count(text, "}") + + if lb != rb { + return &ErrMissingBracket{locale: t.Locale(), key: key, text: text} + } + + trans := &transText{ + text: text, + } + + var idx int + + for i := 0; i < lb; i++ { + s := "{" + strconv.Itoa(i) + "}" + idx = strings.Index(text, s) + if idx == -1 { + return &ErrBadParamSyntax{locale: t.Locale(), param: s, key: key, text: text} + } + + trans.indexes = append(trans.indexes, idx) + trans.indexes = append(trans.indexes, idx+len(s)) + } + + t.translations[key] = trans + + return nil +} + +// AddCardinal adds a cardinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0} day left' other: '{0} days left' +func (t *translator) AddCardinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsCardinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrCardinalTranslation{text: fmt.Sprintf("error: cardinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.cardinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.cardinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrCardinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddCardinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddOrdinal adds an ordinal plural translation for a particular language/locale +// {0} is the only replacement type accepted and only one variable is accepted as +// multiple cannot be used for a plural rule determination, unless it is a range; +// see AddRange below. +// eg. in locale 'en' one: '{0}st day of spring' other: '{0}nd day of spring' - 1st, 2nd, 3rd... +func (t *translator) AddOrdinal(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsOrdinal() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: ordinal plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.ordinalTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.ordinalTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 2, 2), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrOrdinalTranslation{text: fmt.Sprintf("error: parameter '%s' not found, may want to use 'Add' instead of 'AddOrdinal'. locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + return nil +} + +// AddRange adds a range plural translation for a particular language/locale +// {0} and {1} are the only replacement types accepted and only these are accepted. +// eg. in locale 'nl' one: '{0}-{1} day left' other: '{0}-{1} days left' +func (t *translator) AddRange(key interface{}, text string, rule locales.PluralRule, override bool) error { + + var verified bool + + // verify plural rule exists for locale + for _, pr := range t.PluralsRange() { + if pr == rule { + verified = true + break + } + } + + if !verified { + return &ErrRangeTranslation{text: fmt.Sprintf("error: range plural rule '%s' does not exist for locale '%s' key: '%v' text: '%s'", rule, t.Locale(), key, text)} + } + + tarr, ok := t.rangeTanslations[key] + if ok { + // verify not adding a conflicting record + if len(tarr) > 0 && tarr[rule] != nil && !override { + return &ErrConflictingTranslation{locale: t.Locale(), key: key, rule: rule, text: text} + } + + } else { + tarr = make([]*transText, 7, 7) + t.rangeTanslations[key] = tarr + } + + trans := &transText{ + text: text, + indexes: make([]int, 4, 4), + } + + tarr[rule] = trans + + idx := strings.Index(text, paramZero) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, are you sure you're adding a Range Translation? locale: '%s' key: '%v' text: '%s'", paramZero, t.Locale(), key, text)} + } + + trans.indexes[0] = idx + trans.indexes[1] = idx + len(paramZero) + + idx = strings.Index(text, paramOne) + if idx == -1 { + tarr[rule] = nil + return &ErrRangeTranslation{text: fmt.Sprintf("error: parameter '%s' not found, a Range Translation requires two parameters. locale: '%s' key: '%v' text: '%s'", paramOne, t.Locale(), key, text)} + } + + trans.indexes[2] = idx + trans.indexes[3] = idx + len(paramOne) + + return nil +} + +// T creates the translation for the locale given the 'key' and params passed in +func (t *translator) T(key interface{}, params ...string) (string, error) { + + trans, ok := t.translations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + b := make([]byte, 0, 64) + + var start, end, count int + + for i := 0; i < len(trans.indexes); i++ { + end = trans.indexes[i] + b = append(b, trans.text[start:end]...) + b = append(b, params[count]...) + i++ + start = trans.indexes[i] + count++ + } + + b = append(b, trans.text[start:]...) + + return string(b), nil +} + +// C creates the cardinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) C(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.cardinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.CardinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// O creates the ordinal translation for the locale given the 'key', 'num' and 'digit' arguments and param passed in +func (t *translator) O(key interface{}, num float64, digits uint64, param string) (string, error) { + + tarr, ok := t.ordinalTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.OrdinalPluralRule(num, digits) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param...) + b = append(b, trans.text[trans.indexes[1]:]...) + + return string(b), nil +} + +// R creates the range translation for the locale given the 'key', 'num1', 'digit1', 'num2' and 'digit2' arguments +// and 'param1' and 'param2' passed in +func (t *translator) R(key interface{}, num1 float64, digits1 uint64, num2 float64, digits2 uint64, param1, param2 string) (string, error) { + + tarr, ok := t.rangeTanslations[key] + if !ok { + return unknownTranslation, ErrUnknowTranslation + } + + rule := t.RangePluralRule(num1, digits1, num2, digits2) + + trans := tarr[rule] + + b := make([]byte, 0, 64) + b = append(b, trans.text[:trans.indexes[0]]...) + b = append(b, param1...) + b = append(b, trans.text[trans.indexes[1]:trans.indexes[2]]...) + b = append(b, param2...) + b = append(b, trans.text[trans.indexes[3]:]...) + + return string(b), nil +} + +// VerifyTranslations checks to ensures that no plural rules have been +// missed within the translations. +func (t *translator) VerifyTranslations() error { + + for k, v := range t.cardinalTanslations { + + for _, rule := range t.PluralsCardinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "plural", rule: rule, key: k} + } + } + } + + for k, v := range t.ordinalTanslations { + + for _, rule := range t.PluralsOrdinal() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "ordinal", rule: rule, key: k} + } + } + } + + for k, v := range t.rangeTanslations { + + for _, rule := range t.PluralsRange() { + + if v[rule] == nil { + return &ErrMissingPluralTranslation{locale: t.Locale(), translationType: "range", rule: rule, key: k} + } + } + } + + return nil +} diff --git a/terraform-server/vendor/github.com/go-playground/universal-translator/universal_translator.go b/terraform-server/vendor/github.com/go-playground/universal-translator/universal_translator.go new file mode 100644 index 00000000..dbf707f5 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/universal-translator/universal_translator.go @@ -0,0 +1,113 @@ +package ut + +import ( + "strings" + + "github.com/go-playground/locales" +) + +// UniversalTranslator holds all locale & translation data +type UniversalTranslator struct { + translators map[string]Translator + fallback Translator +} + +// New returns a new UniversalTranslator instance set with +// the fallback locale and locales it should support +func New(fallback locales.Translator, supportedLocales ...locales.Translator) *UniversalTranslator { + + t := &UniversalTranslator{ + translators: make(map[string]Translator), + } + + for _, v := range supportedLocales { + + trans := newTranslator(v) + t.translators[strings.ToLower(trans.Locale())] = trans + + if fallback.Locale() == v.Locale() { + t.fallback = trans + } + } + + if t.fallback == nil && fallback != nil { + t.fallback = newTranslator(fallback) + } + + return t +} + +// FindTranslator trys to find a Translator based on an array of locales +// and returns the first one it can find, otherwise returns the +// fallback translator. +func (t *UniversalTranslator) FindTranslator(locales ...string) (trans Translator, found bool) { + + for _, locale := range locales { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + } + + return t.fallback, false +} + +// GetTranslator returns the specified translator for the given locale, +// or fallback if not found +func (t *UniversalTranslator) GetTranslator(locale string) (trans Translator, found bool) { + + if trans, found = t.translators[strings.ToLower(locale)]; found { + return + } + + return t.fallback, false +} + +// GetFallback returns the fallback locale +func (t *UniversalTranslator) GetFallback() Translator { + return t.fallback +} + +// AddTranslator adds the supplied translator, if it already exists the override param +// will be checked and if false an error will be returned, otherwise the translator will be +// overridden; if the fallback matches the supplied translator it will be overridden as well +// NOTE: this is normally only used when translator is embedded within a library +func (t *UniversalTranslator) AddTranslator(translator locales.Translator, override bool) error { + + lc := strings.ToLower(translator.Locale()) + _, ok := t.translators[lc] + if ok && !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + trans := newTranslator(translator) + + if t.fallback.Locale() == translator.Locale() { + + // because it's optional to have a fallback, I don't impose that limitation + // don't know why you wouldn't but... + if !override { + return &ErrExistingTranslator{locale: translator.Locale()} + } + + t.fallback = trans + } + + t.translators[lc] = trans + + return nil +} + +// VerifyTranslations runs through all locales and identifies any issues +// eg. missing plural rules for a locale +func (t *UniversalTranslator) VerifyTranslations() (err error) { + + for _, trans := range t.translators { + err = trans.VerifyTranslations() + if err != nil { + return + } + } + + return +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/.gitignore b/terraform-server/vendor/github.com/go-playground/validator/v10/.gitignore new file mode 100644 index 00000000..6e43fac0 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/.gitignore @@ -0,0 +1,30 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test +bin + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test +*.prof +*.test +*.out +*.txt +cover.html +README.html diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/.travis.yml b/terraform-server/vendor/github.com/go-playground/validator/v10/.travis.yml new file mode 100644 index 00000000..85a7be34 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/.travis.yml @@ -0,0 +1,29 @@ +language: go +go: + - 1.15.2 + - tip +matrix: + allow_failures: + - go: tip + +notifications: + email: + recipients: dean.karn@gmail.com + on_success: change + on_failure: always + +before_install: + - go install github.com/mattn/goveralls + - mkdir -p $GOPATH/src/gopkg.in + - ln -s $GOPATH/src/github.com/$TRAVIS_REPO_SLUG $GOPATH/src/gopkg.in/validator.v9 + +# Only clone the most recent commit. +git: + depth: 1 + +script: + - go test -v -race -covermode=atomic -coverprofile=coverage.coverprofile ./... + +after_success: | + [ $TRAVIS_GO_VERSION = 1.15.2 ] && + goveralls -coverprofile=coverage.coverprofile -service travis-ci -repotoken $COVERALLS_TOKEN diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/LICENSE b/terraform-server/vendor/github.com/go-playground/validator/v10/LICENSE new file mode 100644 index 00000000..6a2ae9aa --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/LICENSE @@ -0,0 +1,22 @@ +The MIT License (MIT) + +Copyright (c) 2015 Dean Karn + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. + diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/Makefile b/terraform-server/vendor/github.com/go-playground/validator/v10/Makefile new file mode 100644 index 00000000..19c91ed7 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/Makefile @@ -0,0 +1,18 @@ +GOCMD=GO111MODULE=on go + +linters-install: + @golangci-lint --version >/dev/null 2>&1 || { \ + echo "installing linting tools..."; \ + curl -sfL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh| sh -s v1.21.0; \ + } + +lint: linters-install + $(PWD)/bin/golangci-lint run + +test: + $(GOCMD) test -cover -race ./... + +bench: + $(GOCMD) test -bench=. -benchmem ./... + +.PHONY: test lint linters-install \ No newline at end of file diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/README.md b/terraform-server/vendor/github.com/go-playground/validator/v10/README.md new file mode 100644 index 00000000..04fbb3c8 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/README.md @@ -0,0 +1,299 @@ +Package validator +================ +[![Join the chat at https://gitter.im/go-playground/validator](https://badges.gitter.im/Join%20Chat.svg)](https://gitter.im/go-playground/validator?utm_source=badge&utm_medium=badge&utm_campaign=pr-badge&utm_content=badge) +![Project status](https://img.shields.io/badge/version-10.4.1-green.svg) +[![Build Status](https://travis-ci.org/go-playground/validator.svg?branch=master)](https://travis-ci.org/go-playground/validator) +[![Coverage Status](https://coveralls.io/repos/go-playground/validator/badge.svg?branch=master&service=github)](https://coveralls.io/github/go-playground/validator?branch=master) +[![Go Report Card](https://goreportcard.com/badge/github.com/go-playground/validator)](https://goreportcard.com/report/github.com/go-playground/validator) +[![GoDoc](https://godoc.org/github.com/go-playground/validator?status.svg)](https://pkg.go.dev/github.com/go-playground/validator/v10) +![License](https://img.shields.io/dub/l/vibe-d.svg) + +Package validator implements value validations for structs and individual fields based on tags. + +It has the following **unique** features: + +- Cross Field and Cross Struct validations by using validation tags or custom validators. +- Slice, Array and Map diving, which allows any or all levels of a multidimensional field to be validated. +- Ability to dive into both map keys and values for validation +- Handles type interface by determining it's underlying type prior to validation. +- Handles custom field types such as sql driver Valuer see [Valuer](https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29) +- Alias validation tags, which allows for mapping of several validations to a single tag for easier defining of validations on structs +- Extraction of custom defined Field Name e.g. can specify to extract the JSON name while validating and have it available in the resulting FieldError +- Customizable i18n aware error messages. +- Default validator for the [gin](https://github.com/gin-gonic/gin) web framework; upgrading from v8 to v9 in gin see [here](https://github.com/go-playground/validator/tree/master/_examples/gin-upgrading-overriding) + +Installation +------------ + +Use go get. + + go get github.com/go-playground/validator/v10 + +Then import the validator package into your own code. + + import "github.com/go-playground/validator/v10" + +Error Return Value +------- + +Validation functions return type error + +They return type error to avoid the issue discussed in the following, where err is always != nil: + +* http://stackoverflow.com/a/29138676/3158232 +* https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or ValidationErrors as type error; so, in your code all you need to do is check if the error returned is not nil, and if it's not check if error is InvalidValidationError ( if necessary, most of the time it isn't ) type cast it to type ValidationErrors like so: + +```go +err := validate.Struct(mystruct) +validationErrors := err.(validator.ValidationErrors) + ``` + +Usage and documentation +------ + +Please see https://godoc.org/github.com/go-playground/validator for detailed usage docs. + +##### Examples: + +- [Simple](https://github.com/go-playground/validator/blob/master/_examples/simple/main.go) +- [Custom Field Types](https://github.com/go-playground/validator/blob/master/_examples/custom/main.go) +- [Struct Level](https://github.com/go-playground/validator/blob/master/_examples/struct-level/main.go) +- [Translations & Custom Errors](https://github.com/go-playground/validator/blob/master/_examples/translations/main.go) +- [Gin upgrade and/or override validator](https://github.com/go-playground/validator/tree/v9/_examples/gin-upgrading-overriding) +- [wash - an example application putting it all together](https://github.com/bluesuncorp/wash) + +Baked-in Validations +------ + +### Fields: + +| Tag | Description | +| - | - | +| eqcsfield | Field Equals Another Field (relative)| +| eqfield | Field Equals Another Field | +| fieldcontains | NOT DOCUMENTED IN doc.go | +| fieldexcludes | NOT DOCUMENTED IN doc.go | +| gtcsfield | Field Greater Than Another Relative Field | +| gtecsfield | Field Greater Than or Equal To Another Relative Field | +| gtefield | Field Greater Than or Equal To Another Field | +| gtfield | Field Greater Than Another Field | +| ltcsfield | Less Than Another Relative Field | +| ltecsfield | Less Than or Equal To Another Relative Field | +| ltefield | Less Than or Equal To Another Field | +| ltfield | Less Than Another Field | +| necsfield | Field Does Not Equal Another Field (relative) | +| nefield | Field Does Not Equal Another Field | + +### Network: + +| Tag | Description | +| - | - | +| cidr | Classless Inter-Domain Routing CIDR | +| cidrv4 | Classless Inter-Domain Routing CIDRv4 | +| cidrv6 | Classless Inter-Domain Routing CIDRv6 | +| datauri | Data URL | +| fqdn | Full Qualified Domain Name (FQDN) | +| hostname | Hostname RFC 952 | +| hostname_port | HostPort | +| hostname_rfc1123 | Hostname RFC 1123 | +| ip | Internet Protocol Address IP | +| ip4_addr | Internet Protocol Address IPv4 | +| ip6_addr |Internet Protocol Address IPv6 | +| ip_addr | Internet Protocol Address IP | +| ipv4 | Internet Protocol Address IPv4 | +| ipv6 | Internet Protocol Address IPv6 | +| mac | Media Access Control Address MAC | +| tcp4_addr | Transmission Control Protocol Address TCPv4 | +| tcp6_addr | Transmission Control Protocol Address TCPv6 | +| tcp_addr | Transmission Control Protocol Address TCP | +| udp4_addr | User Datagram Protocol Address UDPv4 | +| udp6_addr | User Datagram Protocol Address UDPv6 | +| udp_addr | User Datagram Protocol Address UDP | +| unix_addr | Unix domain socket end point Address | +| uri | URI String | +| url | URL String | +| url_encoded | URL Encoded | +| urn_rfc2141 | Urn RFC 2141 String | + +### Strings: + +| Tag | Description | +| - | - | +| alpha | Alpha Only | +| alphanum | Alphanumeric | +| alphanumunicode | Alphanumeric Unicode | +| alphaunicode | Alpha Unicode | +| ascii | ASCII | +| contains | Contains | +| containsany | Contains Any | +| containsrune | Contains Rune | +| endswith | Ends With | +| lowercase | Lowercase | +| multibyte | Multi-Byte Characters | +| number | NOT DOCUMENTED IN doc.go | +| numeric | Numeric | +| printascii | Printable ASCII | +| startswith | Starts With | +| uppercase | Uppercase | + +### Format: +| Tag | Description | +| - | - | +| base64 | Base64 String | +| base64url | Base64URL String | +| btc_addr | Bitcoin Address | +| btc_addr_bech32 | Bitcoin Bech32 Address (segwit) | +| datetime | Datetime | +| e164 | e164 formatted phone number | +| email | E-mail String +| eth_addr | Ethereum Address | +| hexadecimal | Hexadecimal String | +| hexcolor | Hexcolor String | +| hsl | HSL String | +| hsla | HSLA String | +| html | HTML Tags | +| html_encoded | HTML Encoded | +| isbn | International Standard Book Number | +| isbn10 | International Standard Book Number 10 | +| isbn13 | International Standard Book Number 13 | +| json | JSON | +| latitude | Latitude | +| longitude | Longitude | +| rgb | RGB String | +| rgba | RGBA String | +| ssn | Social Security Number SSN | +| uuid | Universally Unique Identifier UUID | +| uuid3 | Universally Unique Identifier UUID v3 | +| uuid3_rfc4122 | Universally Unique Identifier UUID v3 RFC4122 | +| uuid4 | Universally Unique Identifier UUID v4 | +| uuid4_rfc4122 | Universally Unique Identifier UUID v4 RFC4122 | +| uuid5 | Universally Unique Identifier UUID v5 | +| uuid5_rfc4122 | Universally Unique Identifier UUID v5 RFC4122 | +| uuid_rfc4122 | Universally Unique Identifier UUID RFC4122 | + +### Comparisons: +| Tag | Description | +| - | - | +| eq | Equals | +| gt | Greater than| +| gte |Greater than or equal | +| lt | Less Than | +| lte | Less Than or Equal | +| ne | Not Equal | + +### Other: +| Tag | Description | +| - | - | +| dir | Directory | +| endswith | Ends With | +| excludes | Excludes | +| excludesall | Excludes All | +| excludesrune | Excludes Rune | +| file | File path | +| isdefault | Is Default | +| len | Length | +| max | Maximum | +| min | Minimum | +| oneof | One Of | +| required | Required | +| required_if | Required If | +| required_unless | Required Unless | +| required_with | Required With | +| required_with_all | Required With All | +| required_without | Required Without | +| required_without_all | Required Without All | +| excluded_with | Excluded With | +| excluded_with_all | Excluded With All | +| excluded_without | Excluded Without | +| excluded_without_all | Excluded Without All | +| unique | Unique | + +Benchmarks +------ +###### Run on MacBook Pro (15-inch, 2017) go version go1.10.2 darwin/amd64 +```go +goos: darwin +goarch: amd64 +pkg: github.com/go-playground/validator +BenchmarkFieldSuccess-8 20000000 83.6 ns/op 0 B/op 0 allocs/op +BenchmarkFieldSuccessParallel-8 50000000 26.8 ns/op 0 B/op 0 allocs/op +BenchmarkFieldFailure-8 5000000 291 ns/op 208 B/op 4 allocs/op +BenchmarkFieldFailureParallel-8 20000000 107 ns/op 208 B/op 4 allocs/op +BenchmarkFieldArrayDiveSuccess-8 2000000 623 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveSuccessParallel-8 10000000 237 ns/op 201 B/op 11 allocs/op +BenchmarkFieldArrayDiveFailure-8 2000000 859 ns/op 412 B/op 16 allocs/op +BenchmarkFieldArrayDiveFailureParallel-8 5000000 335 ns/op 413 B/op 16 allocs/op +BenchmarkFieldMapDiveSuccess-8 1000000 1292 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveSuccessParallel-8 3000000 467 ns/op 432 B/op 18 allocs/op +BenchmarkFieldMapDiveFailure-8 1000000 1082 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveFailureParallel-8 5000000 425 ns/op 512 B/op 16 allocs/op +BenchmarkFieldMapDiveWithKeysSuccess-8 1000000 1539 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysSuccessParallel-8 3000000 613 ns/op 480 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailure-8 1000000 1413 ns/op 721 B/op 21 allocs/op +BenchmarkFieldMapDiveWithKeysFailureParallel-8 3000000 575 ns/op 721 B/op 21 allocs/op +BenchmarkFieldCustomTypeSuccess-8 10000000 216 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeSuccessParallel-8 20000000 82.2 ns/op 32 B/op 2 allocs/op +BenchmarkFieldCustomTypeFailure-8 5000000 274 ns/op 208 B/op 4 allocs/op +BenchmarkFieldCustomTypeFailureParallel-8 20000000 116 ns/op 208 B/op 4 allocs/op +BenchmarkFieldOrTagSuccess-8 2000000 740 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagSuccessParallel-8 3000000 474 ns/op 16 B/op 1 allocs/op +BenchmarkFieldOrTagFailure-8 3000000 471 ns/op 224 B/op 5 allocs/op +BenchmarkFieldOrTagFailureParallel-8 3000000 414 ns/op 224 B/op 5 allocs/op +BenchmarkStructLevelValidationSuccess-8 10000000 213 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationSuccessParallel-8 20000000 91.8 ns/op 32 B/op 2 allocs/op +BenchmarkStructLevelValidationFailure-8 3000000 473 ns/op 304 B/op 8 allocs/op +BenchmarkStructLevelValidationFailureParallel-8 10000000 234 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCustomTypeSuccess-8 5000000 385 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeSuccessParallel-8 10000000 161 ns/op 32 B/op 2 allocs/op +BenchmarkStructSimpleCustomTypeFailure-8 2000000 640 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleCustomTypeFailureParallel-8 5000000 318 ns/op 440 B/op 10 allocs/op +BenchmarkStructFilteredSuccess-8 2000000 597 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredSuccessParallel-8 10000000 266 ns/op 288 B/op 9 allocs/op +BenchmarkStructFilteredFailure-8 3000000 454 ns/op 256 B/op 7 allocs/op +BenchmarkStructFilteredFailureParallel-8 10000000 214 ns/op 256 B/op 7 allocs/op +BenchmarkStructPartialSuccess-8 3000000 502 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialSuccessParallel-8 10000000 225 ns/op 256 B/op 6 allocs/op +BenchmarkStructPartialFailure-8 2000000 702 ns/op 480 B/op 11 allocs/op +BenchmarkStructPartialFailureParallel-8 5000000 329 ns/op 480 B/op 11 allocs/op +BenchmarkStructExceptSuccess-8 2000000 793 ns/op 496 B/op 12 allocs/op +BenchmarkStructExceptSuccessParallel-8 10000000 193 ns/op 240 B/op 5 allocs/op +BenchmarkStructExceptFailure-8 2000000 639 ns/op 464 B/op 10 allocs/op +BenchmarkStructExceptFailureParallel-8 5000000 300 ns/op 464 B/op 10 allocs/op +BenchmarkStructSimpleCrossFieldSuccess-8 3000000 417 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldSuccessParallel-8 10000000 163 ns/op 72 B/op 3 allocs/op +BenchmarkStructSimpleCrossFieldFailure-8 2000000 645 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossFieldFailureParallel-8 5000000 285 ns/op 304 B/op 8 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccess-8 3000000 588 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldSuccessParallel-8 10000000 221 ns/op 80 B/op 4 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailure-8 2000000 868 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleCrossStructCrossFieldFailureParallel-8 5000000 337 ns/op 320 B/op 9 allocs/op +BenchmarkStructSimpleSuccess-8 5000000 260 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleSuccessParallel-8 20000000 90.6 ns/op 0 B/op 0 allocs/op +BenchmarkStructSimpleFailure-8 2000000 619 ns/op 424 B/op 9 allocs/op +BenchmarkStructSimpleFailureParallel-8 5000000 296 ns/op 424 B/op 9 allocs/op +BenchmarkStructComplexSuccess-8 1000000 1454 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexSuccessParallel-8 3000000 579 ns/op 128 B/op 8 allocs/op +BenchmarkStructComplexFailure-8 300000 4140 ns/op 3041 B/op 53 allocs/op +BenchmarkStructComplexFailureParallel-8 1000000 2127 ns/op 3041 B/op 53 allocs/op +BenchmarkOneof-8 10000000 140 ns/op 0 B/op 0 allocs/op +BenchmarkOneofParallel-8 20000000 70.1 ns/op 0 B/op 0 allocs/op +``` + +Complementary Software +---------------------- + +Here is a list of software that complements using this library either pre or post validation. + +* [form](https://github.com/go-playground/form) - Decodes url.Values into Go value(s) and Encodes Go value(s) into url.Values. Dual Array and Full map support. +* [mold](https://github.com/go-playground/mold) - A general library to help modify or set data within data structures and other objects + +How to Contribute +------ + +Make a pull request... + +License +------ +Distributed under MIT License, please see license file within the code for more details. diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/baked_in.go b/terraform-server/vendor/github.com/go-playground/validator/v10/baked_in.go new file mode 100644 index 00000000..6ce762d1 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/baked_in.go @@ -0,0 +1,2285 @@ +package validator + +import ( + "bytes" + "context" + "crypto/sha256" + "encoding/hex" + "encoding/json" + "fmt" + "net" + "net/url" + "os" + "reflect" + "strconv" + "strings" + "sync" + "time" + "unicode/utf8" + + "golang.org/x/crypto/sha3" + + urn "github.com/leodido/go-urn" +) + +// Func accepts a FieldLevel interface for all validation needs. The return +// value should be true when validation succeeds. +type Func func(fl FieldLevel) bool + +// FuncCtx accepts a context.Context and FieldLevel interface for all +// validation needs. The return value should be true when validation succeeds. +type FuncCtx func(ctx context.Context, fl FieldLevel) bool + +// wrapFunc wraps noramal Func makes it compatible with FuncCtx +func wrapFunc(fn Func) FuncCtx { + if fn == nil { + return nil // be sure not to wrap a bad function. + } + return func(ctx context.Context, fl FieldLevel) bool { + return fn(fl) + } +} + +var ( + restrictedTags = map[string]struct{}{ + diveTag: {}, + keysTag: {}, + endKeysTag: {}, + structOnlyTag: {}, + omitempty: {}, + skipValidationTag: {}, + utf8HexComma: {}, + utf8Pipe: {}, + noStructLevelTag: {}, + requiredTag: {}, + isdefault: {}, + } + + // BakedInAliasValidators is a default mapping of a single validation tag that + // defines a common or complex set of validation(s) to simplify + // adding validation to structs. + bakedInAliases = map[string]string{ + "iscolor": "hexcolor|rgb|rgba|hsl|hsla", + "country_code": "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric", + } + + // BakedInValidators is the default map of ValidationFunc + // you can add, remove or even replace items to suite your needs, + // or even disregard and use your own map if so desired. + bakedInValidators = map[string]Func{ + "required": hasValue, + "required_if": requiredIf, + "required_unless": requiredUnless, + "required_with": requiredWith, + "required_with_all": requiredWithAll, + "required_without": requiredWithout, + "required_without_all": requiredWithoutAll, + "excluded_with": excludedWith, + "excluded_with_all": excludedWithAll, + "excluded_without": excludedWithout, + "excluded_without_all": excludedWithoutAll, + "isdefault": isDefault, + "len": hasLengthOf, + "min": hasMinOf, + "max": hasMaxOf, + "eq": isEq, + "ne": isNe, + "lt": isLt, + "lte": isLte, + "gt": isGt, + "gte": isGte, + "eqfield": isEqField, + "eqcsfield": isEqCrossStructField, + "necsfield": isNeCrossStructField, + "gtcsfield": isGtCrossStructField, + "gtecsfield": isGteCrossStructField, + "ltcsfield": isLtCrossStructField, + "ltecsfield": isLteCrossStructField, + "nefield": isNeField, + "gtefield": isGteField, + "gtfield": isGtField, + "ltefield": isLteField, + "ltfield": isLtField, + "fieldcontains": fieldContains, + "fieldexcludes": fieldExcludes, + "alpha": isAlpha, + "alphanum": isAlphanum, + "alphaunicode": isAlphaUnicode, + "alphanumunicode": isAlphanumUnicode, + "numeric": isNumeric, + "number": isNumber, + "hexadecimal": isHexadecimal, + "hexcolor": isHEXColor, + "rgb": isRGB, + "rgba": isRGBA, + "hsl": isHSL, + "hsla": isHSLA, + "e164": isE164, + "email": isEmail, + "url": isURL, + "uri": isURI, + "urn_rfc2141": isUrnRFC2141, // RFC 2141 + "file": isFile, + "base64": isBase64, + "base64url": isBase64URL, + "contains": contains, + "containsany": containsAny, + "containsrune": containsRune, + "excludes": excludes, + "excludesall": excludesAll, + "excludesrune": excludesRune, + "startswith": startsWith, + "endswith": endsWith, + "startsnotwith": startsNotWith, + "endsnotwith": endsNotWith, + "isbn": isISBN, + "isbn10": isISBN10, + "isbn13": isISBN13, + "eth_addr": isEthereumAddress, + "btc_addr": isBitcoinAddress, + "btc_addr_bech32": isBitcoinBech32Address, + "uuid": isUUID, + "uuid3": isUUID3, + "uuid4": isUUID4, + "uuid5": isUUID5, + "uuid_rfc4122": isUUIDRFC4122, + "uuid3_rfc4122": isUUID3RFC4122, + "uuid4_rfc4122": isUUID4RFC4122, + "uuid5_rfc4122": isUUID5RFC4122, + "ascii": isASCII, + "printascii": isPrintableASCII, + "multibyte": hasMultiByteCharacter, + "datauri": isDataURI, + "latitude": isLatitude, + "longitude": isLongitude, + "ssn": isSSN, + "ipv4": isIPv4, + "ipv6": isIPv6, + "ip": isIP, + "cidrv4": isCIDRv4, + "cidrv6": isCIDRv6, + "cidr": isCIDR, + "tcp4_addr": isTCP4AddrResolvable, + "tcp6_addr": isTCP6AddrResolvable, + "tcp_addr": isTCPAddrResolvable, + "udp4_addr": isUDP4AddrResolvable, + "udp6_addr": isUDP6AddrResolvable, + "udp_addr": isUDPAddrResolvable, + "ip4_addr": isIP4AddrResolvable, + "ip6_addr": isIP6AddrResolvable, + "ip_addr": isIPAddrResolvable, + "unix_addr": isUnixAddrResolvable, + "mac": isMAC, + "hostname": isHostnameRFC952, // RFC 952 + "hostname_rfc1123": isHostnameRFC1123, // RFC 1123 + "fqdn": isFQDN, + "unique": isUnique, + "oneof": isOneOf, + "html": isHTML, + "html_encoded": isHTMLEncoded, + "url_encoded": isURLEncoded, + "dir": isDir, + "json": isJSON, + "hostname_port": isHostnamePort, + "lowercase": isLowercase, + "uppercase": isUppercase, + "datetime": isDatetime, + "timezone": isTimeZone, + "iso3166_1_alpha2": isIso3166Alpha2, + "iso3166_1_alpha3": isIso3166Alpha3, + "iso3166_1_alpha_numeric": isIso3166AlphaNumeric, + } +) + +var oneofValsCache = map[string][]string{} +var oneofValsCacheRWLock = sync.RWMutex{} + +func parseOneOfParam2(s string) []string { + oneofValsCacheRWLock.RLock() + vals, ok := oneofValsCache[s] + oneofValsCacheRWLock.RUnlock() + if !ok { + oneofValsCacheRWLock.Lock() + vals = splitParamsRegex.FindAllString(s, -1) + for i := 0; i < len(vals); i++ { + vals[i] = strings.Replace(vals[i], "'", "", -1) + } + oneofValsCache[s] = vals + oneofValsCacheRWLock.Unlock() + } + return vals +} + +func isURLEncoded(fl FieldLevel) bool { + return uRLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTMLEncoded(fl FieldLevel) bool { + return hTMLEncodedRegex.MatchString(fl.Field().String()) +} + +func isHTML(fl FieldLevel) bool { + return hTMLRegex.MatchString(fl.Field().String()) +} + +func isOneOf(fl FieldLevel) bool { + vals := parseOneOfParam2(fl.Param()) + + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + for i := 0; i < len(vals); i++ { + if vals[i] == v { + return true + } + } + return false +} + +// isUnique is the validation function for validating if each array|slice|map value is unique +func isUnique(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + v := reflect.ValueOf(struct{}{}) + + switch field.Kind() { + case reflect.Slice, reflect.Array: + elem := field.Type().Elem() + if elem.Kind() == reflect.Ptr { + elem = elem.Elem() + } + + if param == "" { + m := reflect.MakeMap(reflect.MapOf(elem, v.Type())) + + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(field.Index(i)), v) + } + return field.Len() == m.Len() + } + + sf, ok := elem.FieldByName(param) + if !ok { + panic(fmt.Sprintf("Bad field name %s", param)) + } + + sfTyp := sf.Type + if sfTyp.Kind() == reflect.Ptr { + sfTyp = sfTyp.Elem() + } + + m := reflect.MakeMap(reflect.MapOf(sfTyp, v.Type())) + for i := 0; i < field.Len(); i++ { + m.SetMapIndex(reflect.Indirect(reflect.Indirect(field.Index(i)).FieldByName(param)), v) + } + return field.Len() == m.Len() + case reflect.Map: + m := reflect.MakeMap(reflect.MapOf(field.Type().Elem(), v.Type())) + + for _, k := range field.MapKeys() { + m.SetMapIndex(field.MapIndex(k), v) + } + return field.Len() == m.Len() + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } +} + +// IsMAC is the validation function for validating if the field's value is a valid MAC address. +func isMAC(fl FieldLevel) bool { + + _, err := net.ParseMAC(fl.Field().String()) + + return err == nil +} + +// IsCIDRv4 is the validation function for validating if the field's value is a valid v4 CIDR address. +func isCIDRv4(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() != nil +} + +// IsCIDRv6 is the validation function for validating if the field's value is a valid v6 CIDR address. +func isCIDRv6(fl FieldLevel) bool { + + ip, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil && ip.To4() == nil +} + +// IsCIDR is the validation function for validating if the field's value is a valid v4 or v6 CIDR address. +func isCIDR(fl FieldLevel) bool { + + _, _, err := net.ParseCIDR(fl.Field().String()) + + return err == nil +} + +// IsIPv4 is the validation function for validating if a value is a valid v4 IP address. +func isIPv4(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() != nil +} + +// IsIPv6 is the validation function for validating if the field's value is a valid v6 IP address. +func isIPv6(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil && ip.To4() == nil +} + +// IsIP is the validation function for validating if the field's value is a valid v4 or v6 IP address. +func isIP(fl FieldLevel) bool { + + ip := net.ParseIP(fl.Field().String()) + + return ip != nil +} + +// IsSSN is the validation function for validating if the field's value is a valid SSN. +func isSSN(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() != 11 { + return false + } + + return sSNRegex.MatchString(field.String()) +} + +// IsLongitude is the validation function for validating if the field's value is a valid longitude coordinate. +func isLongitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return longitudeRegex.MatchString(v) +} + +// IsLatitude is the validation function for validating if the field's value is a valid latitude coordinate. +func isLatitude(fl FieldLevel) bool { + field := fl.Field() + + var v string + switch field.Kind() { + case reflect.String: + v = field.String() + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + v = strconv.FormatInt(field.Int(), 10) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + v = strconv.FormatUint(field.Uint(), 10) + case reflect.Float32: + v = strconv.FormatFloat(field.Float(), 'f', -1, 32) + case reflect.Float64: + v = strconv.FormatFloat(field.Float(), 'f', -1, 64) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + + return latitudeRegex.MatchString(v) +} + +// IsDataURI is the validation function for validating if the field's value is a valid data URI. +func isDataURI(fl FieldLevel) bool { + + uri := strings.SplitN(fl.Field().String(), ",", 2) + + if len(uri) != 2 { + return false + } + + if !dataURIRegex.MatchString(uri[0]) { + return false + } + + return base64Regex.MatchString(uri[1]) +} + +// HasMultiByteCharacter is the validation function for validating if the field's value has a multi byte character. +func hasMultiByteCharacter(fl FieldLevel) bool { + + field := fl.Field() + + if field.Len() == 0 { + return true + } + + return multibyteRegex.MatchString(field.String()) +} + +// IsPrintableASCII is the validation function for validating if the field's value is a valid printable ASCII character. +func isPrintableASCII(fl FieldLevel) bool { + return printableASCIIRegex.MatchString(fl.Field().String()) +} + +// IsASCII is the validation function for validating if the field's value is a valid ASCII character. +func isASCII(fl FieldLevel) bool { + return aSCIIRegex.MatchString(fl.Field().String()) +} + +// IsUUID5 is the validation function for validating if the field's value is a valid v5 UUID. +func isUUID5(fl FieldLevel) bool { + return uUID5Regex.MatchString(fl.Field().String()) +} + +// IsUUID4 is the validation function for validating if the field's value is a valid v4 UUID. +func isUUID4(fl FieldLevel) bool { + return uUID4Regex.MatchString(fl.Field().String()) +} + +// IsUUID3 is the validation function for validating if the field's value is a valid v3 UUID. +func isUUID3(fl FieldLevel) bool { + return uUID3Regex.MatchString(fl.Field().String()) +} + +// IsUUID is the validation function for validating if the field's value is a valid UUID of any version. +func isUUID(fl FieldLevel) bool { + return uUIDRegex.MatchString(fl.Field().String()) +} + +// IsUUID5RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v5 UUID. +func isUUID5RFC4122(fl FieldLevel) bool { + return uUID5RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUID4RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v4 UUID. +func isUUID4RFC4122(fl FieldLevel) bool { + return uUID4RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUID3RFC4122 is the validation function for validating if the field's value is a valid RFC4122 v3 UUID. +func isUUID3RFC4122(fl FieldLevel) bool { + return uUID3RFC4122Regex.MatchString(fl.Field().String()) +} + +// IsUUIDRFC4122 is the validation function for validating if the field's value is a valid RFC4122 UUID of any version. +func isUUIDRFC4122(fl FieldLevel) bool { + return uUIDRFC4122Regex.MatchString(fl.Field().String()) +} + +// IsISBN is the validation function for validating if the field's value is a valid v10 or v13 ISBN. +func isISBN(fl FieldLevel) bool { + return isISBN10(fl) || isISBN13(fl) +} + +// IsISBN13 is the validation function for validating if the field's value is a valid v13 ISBN. +func isISBN13(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 4), " ", "", 4) + + if !iSBN13Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + factor := []int32{1, 3} + + for i = 0; i < 12; i++ { + checksum += factor[i%2] * int32(s[i]-'0') + } + + return (int32(s[12]-'0'))-((10-(checksum%10))%10) == 0 +} + +// IsISBN10 is the validation function for validating if the field's value is a valid v10 ISBN. +func isISBN10(fl FieldLevel) bool { + + s := strings.Replace(strings.Replace(fl.Field().String(), "-", "", 3), " ", "", 3) + + if !iSBN10Regex.MatchString(s) { + return false + } + + var checksum int32 + var i int32 + + for i = 0; i < 9; i++ { + checksum += (i + 1) * int32(s[i]-'0') + } + + if s[9] == 'X' { + checksum += 10 * 10 + } else { + checksum += 10 * int32(s[9]-'0') + } + + return checksum%11 == 0 +} + +// IsEthereumAddress is the validation function for validating if the field's value is a valid Ethereum address. +func isEthereumAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !ethAddressRegex.MatchString(address) { + return false + } + + if ethaddressRegexUpper.MatchString(address) || ethAddressRegexLower.MatchString(address) { + return true + } + + // Checksum validation. Reference: https://github.com/ethereum/EIPs/blob/master/EIPS/eip-55.md + address = address[2:] // Skip "0x" prefix. + h := sha3.NewLegacyKeccak256() + // hash.Hash's io.Writer implementation says it never returns an error. https://golang.org/pkg/hash/#Hash + _, _ = h.Write([]byte(strings.ToLower(address))) + hash := hex.EncodeToString(h.Sum(nil)) + + for i := 0; i < len(address); i++ { + if address[i] <= '9' { // Skip 0-9 digits: they don't have upper/lower-case. + continue + } + if hash[i] > '7' && address[i] >= 'a' || hash[i] <= '7' && address[i] <= 'F' { + return false + } + } + + return true +} + +// IsBitcoinAddress is the validation function for validating if the field's value is a valid btc address +func isBitcoinAddress(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcAddressRegex.MatchString(address) { + return false + } + + alphabet := []byte("123456789ABCDEFGHJKLMNPQRSTUVWXYZabcdefghijkmnopqrstuvwxyz") + + decode := [25]byte{} + + for _, n := range []byte(address) { + d := bytes.IndexByte(alphabet, n) + + for i := 24; i >= 0; i-- { + d += 58 * int(decode[i]) + decode[i] = byte(d % 256) + d /= 256 + } + } + + h := sha256.New() + _, _ = h.Write(decode[:21]) + d := h.Sum([]byte{}) + h = sha256.New() + _, _ = h.Write(d) + + validchecksum := [4]byte{} + computedchecksum := [4]byte{} + + copy(computedchecksum[:], h.Sum(d[:0])) + copy(validchecksum[:], decode[21:]) + + return validchecksum == computedchecksum +} + +// IsBitcoinBech32Address is the validation function for validating if the field's value is a valid bech32 btc address +func isBitcoinBech32Address(fl FieldLevel) bool { + address := fl.Field().String() + + if !btcLowerAddressRegexBech32.MatchString(address) && !btcUpperAddressRegexBech32.MatchString(address) { + return false + } + + am := len(address) % 8 + + if am == 0 || am == 3 || am == 5 { + return false + } + + address = strings.ToLower(address) + + alphabet := "qpzry9x8gf2tvdw0s3jn54khce6mua7l" + + hr := []int{3, 3, 0, 2, 3} // the human readable part will always be bc + addr := address[3:] + dp := make([]int, 0, len(addr)) + + for _, c := range addr { + dp = append(dp, strings.IndexRune(alphabet, c)) + } + + ver := dp[0] + + if ver < 0 || ver > 16 { + return false + } + + if ver == 0 { + if len(address) != 42 && len(address) != 62 { + return false + } + } + + values := append(hr, dp...) + + GEN := []int{0x3b6a57b2, 0x26508e6d, 0x1ea119fa, 0x3d4233dd, 0x2a1462b3} + + p := 1 + + for _, v := range values { + b := p >> 25 + p = (p&0x1ffffff)<<5 ^ v + + for i := 0; i < 5; i++ { + if (b>>uint(i))&1 == 1 { + p ^= GEN[i] + } + } + } + + if p != 1 { + return false + } + + b := uint(0) + acc := 0 + mv := (1 << 5) - 1 + var sw []int + + for _, v := range dp[1 : len(dp)-6] { + acc = (acc << 5) | v + b += 5 + for b >= 8 { + b -= 8 + sw = append(sw, (acc>>b)&mv) + } + } + + if len(sw) < 2 || len(sw) > 40 { + return false + } + + return true +} + +// ExcludesRune is the validation function for validating that the field's value does not contain the rune specified within the param. +func excludesRune(fl FieldLevel) bool { + return !containsRune(fl) +} + +// ExcludesAll is the validation function for validating that the field's value does not contain any of the characters specified within the param. +func excludesAll(fl FieldLevel) bool { + return !containsAny(fl) +} + +// Excludes is the validation function for validating that the field's value does not contain the text specified within the param. +func excludes(fl FieldLevel) bool { + return !contains(fl) +} + +// ContainsRune is the validation function for validating that the field's value contains the rune specified within the param. +func containsRune(fl FieldLevel) bool { + + r, _ := utf8.DecodeRuneInString(fl.Param()) + + return strings.ContainsRune(fl.Field().String(), r) +} + +// ContainsAny is the validation function for validating that the field's value contains any of the characters specified within the param. +func containsAny(fl FieldLevel) bool { + return strings.ContainsAny(fl.Field().String(), fl.Param()) +} + +// Contains is the validation function for validating that the field's value contains the text specified within the param. +func contains(fl FieldLevel) bool { + return strings.Contains(fl.Field().String(), fl.Param()) +} + +// StartsWith is the validation function for validating that the field's value starts with the text specified within the param. +func startsWith(fl FieldLevel) bool { + return strings.HasPrefix(fl.Field().String(), fl.Param()) +} + +// EndsWith is the validation function for validating that the field's value ends with the text specified within the param. +func endsWith(fl FieldLevel) bool { + return strings.HasSuffix(fl.Field().String(), fl.Param()) +} + +// StartsNotWith is the validation function for validating that the field's value does not start with the text specified within the param. +func startsNotWith(fl FieldLevel) bool { + return !startsWith(fl) +} + +// EndsNotWith is the validation function for validating that the field's value does not end with the text specified within the param. +func endsNotWith(fl FieldLevel) bool { + return !endsWith(fl) +} + +// FieldContains is the validation function for validating if the current field's value contains the field specified by the param's value. +func fieldContains(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + + if !ok { + return false + } + + return strings.Contains(field.String(), currentField.String()) +} + +// FieldExcludes is the validation function for validating if the current field's value excludes the field specified by the param's value. +func fieldExcludes(fl FieldLevel) bool { + field := fl.Field() + + currentField, _, ok := fl.GetStructFieldOK() + if !ok { + return true + } + + return !strings.Contains(field.String(), currentField.String()) +} + +// IsNeField is the validation function for validating if the current field's value is not equal to the field specified by the param's value. +func isNeField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() != currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() != currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() != currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) != int64(currentField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return true + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() != currentField.String() +} + +// IsNe is the validation function for validating that the field's value does not equal the provided param value. +func isNe(fl FieldLevel) bool { + return !isEq(fl) +} + +// IsLteCrossStructField is the validation function for validating if the current field's value is less than or equal to the field, within a separate struct, specified by the param's value. +func isLteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() <= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() <= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() <= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) <= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() <= topField.String() +} + +// IsLtCrossStructField is the validation function for validating if the current field's value is less than the field, within a separate struct, specified by the param's value. +// NOTE: This is exposed for use within your own custom functions and not intended to be called directly. +func isLtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() < topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() < topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() < topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) < int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.Before(topTime) + } + } + + // default reflect.String: + return field.String() < topField.String() +} + +// IsGteCrossStructField is the validation function for validating if the current field's value is greater than or equal to the field, within a separate struct, specified by the param's value. +func isGteCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() >= topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() >= topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() >= topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) >= int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) || fieldTime.Equal(topTime) + } + } + + // default reflect.String: + return field.String() >= topField.String() +} + +// IsGtCrossStructField is the validation function for validating if the current field's value is greater than the field, within a separate struct, specified by the param's value. +func isGtCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() > topField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() > topField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() > topField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) > int64(topField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + fieldTime := field.Interface().(time.Time) + topTime := topField.Interface().(time.Time) + + return fieldTime.After(topTime) + } + } + + // default reflect.String: + return field.String() > topField.String() +} + +// IsNeCrossStructField is the validation function for validating that the current field's value is not equal to the field, within a separate struct, specified by the param's value. +func isNeCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return true + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() != field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() != field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() != field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) != int64(field.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return true + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return !fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() != field.String() +} + +// IsEqCrossStructField is the validation function for validating that the current field's value is equal to the field, within a separate struct, specified by the param's value. +func isEqCrossStructField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + topField, topKind, ok := fl.GetStructFieldOK() + if !ok || topKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return topField.Int() == field.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return topField.Uint() == field.Uint() + + case reflect.Float32, reflect.Float64: + return topField.Float() == field.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(topField.Len()) == int64(field.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != topField.Type() { + return false + } + + if fieldType == timeType { + + t := field.Interface().(time.Time) + fieldTime := topField.Interface().(time.Time) + + return fieldTime.Equal(t) + } + } + + // default reflect.String: + return topField.String() == field.String() +} + +// IsEqField is the validation function for validating if the current field's value is equal to the field specified by the param's value. +func isEqField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == currentField.Uint() + + case reflect.Float32, reflect.Float64: + return field.Float() == currentField.Float() + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == int64(currentField.Len()) + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Equal(t) + } + + } + + // default reflect.String: + return field.String() == currentField.String() +} + +// IsEq is the validation function for validating if the current field's value is equal to the param's value. +func isEq(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + return field.String() == param + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + + case reflect.Bool: + p := asBool(param) + + return field.Bool() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsBase64 is the validation function for validating if the current field's value is a valid base 64. +func isBase64(fl FieldLevel) bool { + return base64Regex.MatchString(fl.Field().String()) +} + +// IsBase64URL is the validation function for validating if the current field's value is a valid base64 URL safe string. +func isBase64URL(fl FieldLevel) bool { + return base64URLRegex.MatchString(fl.Field().String()) +} + +// IsURI is the validation function for validating if the current field's value is a valid URI. +func isURI(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i := strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + _, err := url.ParseRequestURI(s) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsURL is the validation function for validating if the current field's value is a valid URL. +func isURL(fl FieldLevel) bool { + + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + var i int + s := field.String() + + // checks needed as of Go 1.6 because of change https://github.com/golang/go/commit/617c93ce740c3c3cc28cdd1a0d712be183d0b328#diff-6c2d018290e298803c0c9419d8739885L195 + // emulate browser and strip the '#' suffix prior to validation. see issue-#237 + if i = strings.Index(s, "#"); i > -1 { + s = s[:i] + } + + if len(s) == 0 { + return false + } + + url, err := url.ParseRequestURI(s) + + if err != nil || url.Scheme == "" { + return false + } + + return true + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUrnRFC2141 is the validation function for validating if the current field's value is a valid URN as per RFC 2141. +func isUrnRFC2141(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + + case reflect.String: + + str := field.String() + + _, match := urn.Parse([]byte(str)) + + return match + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsFile is the validation function for validating if the current field's value is a valid file path. +func isFile(fl FieldLevel) bool { + field := fl.Field() + + switch field.Kind() { + case reflect.String: + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return !fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsE164 is the validation function for validating if the current field's value is a valid e.164 formatted phone number. +func isE164(fl FieldLevel) bool { + return e164Regex.MatchString(fl.Field().String()) +} + +// IsEmail is the validation function for validating if the current field's value is a valid email address. +func isEmail(fl FieldLevel) bool { + return emailRegex.MatchString(fl.Field().String()) +} + +// IsHSLA is the validation function for validating if the current field's value is a valid HSLA color. +func isHSLA(fl FieldLevel) bool { + return hslaRegex.MatchString(fl.Field().String()) +} + +// IsHSL is the validation function for validating if the current field's value is a valid HSL color. +func isHSL(fl FieldLevel) bool { + return hslRegex.MatchString(fl.Field().String()) +} + +// IsRGBA is the validation function for validating if the current field's value is a valid RGBA color. +func isRGBA(fl FieldLevel) bool { + return rgbaRegex.MatchString(fl.Field().String()) +} + +// IsRGB is the validation function for validating if the current field's value is a valid RGB color. +func isRGB(fl FieldLevel) bool { + return rgbRegex.MatchString(fl.Field().String()) +} + +// IsHEXColor is the validation function for validating if the current field's value is a valid HEX color. +func isHEXColor(fl FieldLevel) bool { + return hexcolorRegex.MatchString(fl.Field().String()) +} + +// IsHexadecimal is the validation function for validating if the current field's value is a valid hexadecimal. +func isHexadecimal(fl FieldLevel) bool { + return hexadecimalRegex.MatchString(fl.Field().String()) +} + +// IsNumber is the validation function for validating if the current field's value is a valid number. +func isNumber(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numberRegex.MatchString(fl.Field().String()) + } +} + +// IsNumeric is the validation function for validating if the current field's value is a valid numeric value. +func isNumeric(fl FieldLevel) bool { + switch fl.Field().Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr, reflect.Float32, reflect.Float64: + return true + default: + return numericRegex.MatchString(fl.Field().String()) + } +} + +// IsAlphanum is the validation function for validating if the current field's value is a valid alphanumeric value. +func isAlphanum(fl FieldLevel) bool { + return alphaNumericRegex.MatchString(fl.Field().String()) +} + +// IsAlpha is the validation function for validating if the current field's value is a valid alpha value. +func isAlpha(fl FieldLevel) bool { + return alphaRegex.MatchString(fl.Field().String()) +} + +// IsAlphanumUnicode is the validation function for validating if the current field's value is a valid alphanumeric unicode value. +func isAlphanumUnicode(fl FieldLevel) bool { + return alphaUnicodeNumericRegex.MatchString(fl.Field().String()) +} + +// IsAlphaUnicode is the validation function for validating if the current field's value is a valid alpha unicode value. +func isAlphaUnicode(fl FieldLevel) bool { + return alphaUnicodeRegex.MatchString(fl.Field().String()) +} + +// isDefault is the opposite of required aka hasValue +func isDefault(fl FieldLevel) bool { + return !hasValue(fl) +} + +// HasValue is the validation function for validating if the current field's value is not the default static value. +func hasValue(fl FieldLevel) bool { + field := fl.Field() + switch field.Kind() { + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return !field.IsNil() + default: + if fl.(*validate).fldIsPointer && field.Interface() != nil { + return true + } + return field.IsValid() && field.Interface() != reflect.Zero(field.Type()).Interface() + } +} + +// requireCheckField is a func for check field kind +func requireCheckFieldKind(fl FieldLevel, param string, defaultNotFoundValue bool) bool { + field := fl.Field() + kind := field.Kind() + var nullable, found bool + if len(param) > 0 { + field, kind, nullable, found = fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + } + switch kind { + case reflect.Invalid: + return defaultNotFoundValue + case reflect.Slice, reflect.Map, reflect.Ptr, reflect.Interface, reflect.Chan, reflect.Func: + return field.IsNil() + default: + if nullable && field.Interface() != nil { + return false + } + return field.IsValid() && field.Interface() == reflect.Zero(field.Type()).Interface() + } +} + +// requireCheckFieldValue is a func for check field value +func requireCheckFieldValue(fl FieldLevel, param string, value string, defaultNotFoundValue bool) bool { + field, kind, _, found := fl.GetStructFieldOKAdvanced2(fl.Parent(), param) + if !found { + return defaultNotFoundValue + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return field.Int() == asInt(value) + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + return field.Uint() == asUint(value) + + case reflect.Float32, reflect.Float64: + return field.Float() == asFloat(value) + + case reflect.Slice, reflect.Map, reflect.Array: + return int64(field.Len()) == asInt(value) + } + + // default reflect.String: + return field.String() == value +} + +// requiredIf is the validation function +// The field under validation must be present and not empty only if all the other specified fields are equal to the value following with the specified field. +func requiredIf(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_if %s", fl.FieldName())) + } + for i := 0; i < len(params); i += 2 { + if !requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// requiredUnless is the validation function +// The field under validation must be present and not empty only unless all the other specified fields are equal to the value following with the specified field. +func requiredUnless(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + if len(params)%2 != 0 { + panic(fmt.Sprintf("Bad param number for required_unless %s", fl.FieldName())) + } + + for i := 0; i < len(params); i += 2 { + if requireCheckFieldValue(fl, params[i], params[i+1], false) { + return true + } + } + return hasValue(fl) +} + +// ExcludedWith is the validation function +// The field under validation must not be present or is empty if any of the other specified fields are present. +func excludedWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return !hasValue(fl) + } + } + return true +} + +// RequiredWith is the validation function +// The field under validation must be present and not empty only if any of the other specified fields are present. +func requiredWith(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return hasValue(fl) + } + } + return true +} + +// ExcludedWithAll is the validation function +// The field under validation must not be present or is empty if all of the other specified fields are present. +func excludedWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// RequiredWithAll is the validation function +// The field under validation must be present and not empty only if all of the other specified fields are present. +func requiredWithAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// ExcludedWithout is the validation function +// The field under validation must not be present or is empty when any of the other specified fields are not present. +func excludedWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return !hasValue(fl) + } + return true +} + +// RequiredWithout is the validation function +// The field under validation must be present and not empty only when any of the other specified fields are not present. +func requiredWithout(fl FieldLevel) bool { + if requireCheckFieldKind(fl, strings.TrimSpace(fl.Param()), true) { + return hasValue(fl) + } + return true +} + +// RequiredWithoutAll is the validation function +// The field under validation must not be present or is empty when all of the other specified fields are not present. +func excludedWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return !hasValue(fl) +} + +// RequiredWithoutAll is the validation function +// The field under validation must be present and not empty only when all of the other specified fields are not present. +func requiredWithoutAll(fl FieldLevel) bool { + params := parseOneOfParam2(fl.Param()) + for _, param := range params { + if !requireCheckFieldKind(fl, param, true) { + return true + } + } + return hasValue(fl) +} + +// IsGteField is the validation function for validating if the current field's value is greater than or equal to the field specified by the param's value. +func isGteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() >= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() >= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() >= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) >= len(currentField.String()) +} + +// IsGtField is the validation function for validating if the current field's value is greater than the field specified by the param's value. +func isGtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() > currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() > currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() > currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.After(t) + } + } + + // default reflect.String + return len(field.String()) > len(currentField.String()) +} + +// IsGte is the validation function for validating if the current field's value is greater than or equal to the param's value. +func isGte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) >= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) >= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() >= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() >= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() >= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.After(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsGt is the validation function for validating if the current field's value is greater than the param's value. +func isGt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) > p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) > p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() > p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() > p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() > p + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).After(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasLengthOf is the validation function for validating if the current field's value is equal to the param's value. +func hasLengthOf(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) == p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) == p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() == p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() == p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() == p + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMinOf is the validation function for validating if the current field's value is greater than or equal to the param's value. +func hasMinOf(fl FieldLevel) bool { + return isGte(fl) +} + +// IsLteField is the validation function for validating if the current field's value is less than or equal to the field specified by the param's value. +func isLteField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() <= currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() <= currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() <= currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) || fieldTime.Equal(t) + } + } + + // default reflect.String + return len(field.String()) <= len(currentField.String()) +} + +// IsLtField is the validation function for validating if the current field's value is less than the field specified by the param's value. +func isLtField(fl FieldLevel) bool { + + field := fl.Field() + kind := field.Kind() + + currentField, currentKind, ok := fl.GetStructFieldOK() + if !ok || currentKind != kind { + return false + } + + switch kind { + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + + return field.Int() < currentField.Int() + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + + return field.Uint() < currentField.Uint() + + case reflect.Float32, reflect.Float64: + + return field.Float() < currentField.Float() + + case reflect.Struct: + + fieldType := field.Type() + + // Not Same underlying type i.e. struct and time + if fieldType != currentField.Type() { + return false + } + + if fieldType == timeType { + + t := currentField.Interface().(time.Time) + fieldTime := field.Interface().(time.Time) + + return fieldTime.Before(t) + } + } + + // default reflect.String + return len(field.String()) < len(currentField.String()) +} + +// IsLte is the validation function for validating if the current field's value is less than or equal to the param's value. +func isLte(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) <= p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) <= p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() <= p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() <= p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() <= p + + case reflect.Struct: + + if field.Type() == timeType { + + now := time.Now().UTC() + t := field.Interface().(time.Time) + + return t.Before(now) || t.Equal(now) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// IsLt is the validation function for validating if the current field's value is less than the param's value. +func isLt(fl FieldLevel) bool { + + field := fl.Field() + param := fl.Param() + + switch field.Kind() { + + case reflect.String: + p := asInt(param) + + return int64(utf8.RuneCountInString(field.String())) < p + + case reflect.Slice, reflect.Map, reflect.Array: + p := asInt(param) + + return int64(field.Len()) < p + + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + p := asIntFromType(field.Type(), param) + + return field.Int() < p + + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uintptr: + p := asUint(param) + + return field.Uint() < p + + case reflect.Float32, reflect.Float64: + p := asFloat(param) + + return field.Float() < p + + case reflect.Struct: + + if field.Type() == timeType { + + return field.Interface().(time.Time).Before(time.Now().UTC()) + } + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// HasMaxOf is the validation function for validating if the current field's value is less than or equal to the param's value. +func hasMaxOf(fl FieldLevel) bool { + return isLte(fl) +} + +// IsTCP4AddrResolvable is the validation function for validating if the field's value is a resolvable tcp4 address. +func isTCP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp4", fl.Field().String()) + return err == nil +} + +// IsTCP6AddrResolvable is the validation function for validating if the field's value is a resolvable tcp6 address. +func isTCP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp6", fl.Field().String()) + + return err == nil +} + +// IsTCPAddrResolvable is the validation function for validating if the field's value is a resolvable tcp address. +func isTCPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveTCPAddr("tcp", fl.Field().String()) + + return err == nil +} + +// IsUDP4AddrResolvable is the validation function for validating if the field's value is a resolvable udp4 address. +func isUDP4AddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp4", fl.Field().String()) + + return err == nil +} + +// IsUDP6AddrResolvable is the validation function for validating if the field's value is a resolvable udp6 address. +func isUDP6AddrResolvable(fl FieldLevel) bool { + + if !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp6", fl.Field().String()) + + return err == nil +} + +// IsUDPAddrResolvable is the validation function for validating if the field's value is a resolvable udp address. +func isUDPAddrResolvable(fl FieldLevel) bool { + + if !isIP4Addr(fl) && !isIP6Addr(fl) { + return false + } + + _, err := net.ResolveUDPAddr("udp", fl.Field().String()) + + return err == nil +} + +// IsIP4AddrResolvable is the validation function for validating if the field's value is a resolvable ip4 address. +func isIP4AddrResolvable(fl FieldLevel) bool { + + if !isIPv4(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip4", fl.Field().String()) + + return err == nil +} + +// IsIP6AddrResolvable is the validation function for validating if the field's value is a resolvable ip6 address. +func isIP6AddrResolvable(fl FieldLevel) bool { + + if !isIPv6(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip6", fl.Field().String()) + + return err == nil +} + +// IsIPAddrResolvable is the validation function for validating if the field's value is a resolvable ip address. +func isIPAddrResolvable(fl FieldLevel) bool { + + if !isIP(fl) { + return false + } + + _, err := net.ResolveIPAddr("ip", fl.Field().String()) + + return err == nil +} + +// IsUnixAddrResolvable is the validation function for validating if the field's value is a resolvable unix address. +func isUnixAddrResolvable(fl FieldLevel) bool { + + _, err := net.ResolveUnixAddr("unix", fl.Field().String()) + + return err == nil +} + +func isIP4Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + val = val[0:idx] + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() != nil +} + +func isIP6Addr(fl FieldLevel) bool { + + val := fl.Field().String() + + if idx := strings.LastIndex(val, ":"); idx != -1 { + if idx != 0 && val[idx-1:idx] == "]" { + val = val[1 : idx-1] + } + } + + ip := net.ParseIP(val) + + return ip != nil && ip.To4() == nil +} + +func isHostnameRFC952(fl FieldLevel) bool { + return hostnameRegexRFC952.MatchString(fl.Field().String()) +} + +func isHostnameRFC1123(fl FieldLevel) bool { + return hostnameRegexRFC1123.MatchString(fl.Field().String()) +} + +func isFQDN(fl FieldLevel) bool { + val := fl.Field().String() + + if val == "" { + return false + } + + return fqdnRegexRFC1123.MatchString(val) +} + +// IsDir is the validation function for validating if the current field's value is a valid directory. +func isDir(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + fileInfo, err := os.Stat(field.String()) + if err != nil { + return false + } + + return fileInfo.IsDir() + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isJSON is the validation function for validating if the current field's value is a valid json string. +func isJSON(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + val := field.String() + return json.Valid([]byte(val)) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isHostnamePort validates a : combination for fields typically used for socket address. +func isHostnamePort(fl FieldLevel) bool { + val := fl.Field().String() + host, port, err := net.SplitHostPort(val) + if err != nil { + return false + } + // Port must be a iny <= 65535. + if portNum, err := strconv.ParseInt(port, 10, 32); err != nil || portNum > 65535 || portNum < 1 { + return false + } + + // If host is specified, it should match a DNS name + if host != "" { + return hostnameRegexRFC1123.MatchString(host) + } + return true +} + +// isLowercase is the validation function for validating if the current field's value is a lowercase string. +func isLowercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToLower(field.String()) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isUppercase is the validation function for validating if the current field's value is an uppercase string. +func isUppercase(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + if field.String() == "" { + return false + } + return field.String() == strings.ToUpper(field.String()) + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isDatetime is the validation function for validating if the current field's value is a valid datetime string. +func isDatetime(fl FieldLevel) bool { + field := fl.Field() + param := fl.Param() + + if field.Kind() == reflect.String { + _, err := time.Parse(param, field.String()) + + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isTimeZone is the validation function for validating if the current field's value is a valid time zone string. +func isTimeZone(fl FieldLevel) bool { + field := fl.Field() + + if field.Kind() == reflect.String { + // empty value is converted to UTC by time.LoadLocation but disallow it as it is not a valid time zone name + if field.String() == "" { + return false + } + + // Local value is converted to the current system time zone by time.LoadLocation but disallow it as it is not a valid time zone name + if strings.ToLower(field.String()) == "local" { + return false + } + + _, err := time.LoadLocation(field.String()) + return err == nil + } + + panic(fmt.Sprintf("Bad field type %T", field.Interface())) +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-2 country code. +func isIso3166Alpha2(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_1_alpha2[val] +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-3 country code. +func isIso3166Alpha3(fl FieldLevel) bool { + val := fl.Field().String() + return iso3166_1_alpha3[val] +} + +// isIso3166Alpha2 is the validation function for validating if the current field's value is a valid iso3166-1 alpha-numeric country code. +func isIso3166AlphaNumeric(fl FieldLevel) bool { + field := fl.Field() + + var code int + switch field.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + code = int(field.Int() % 1000) + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + code = int(field.Uint() % 1000) + default: + panic(fmt.Sprintf("Bad field type %T", field.Interface())) + } + return iso3166_1_alpha_numeric[code] +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/cache.go b/terraform-server/vendor/github.com/go-playground/validator/v10/cache.go new file mode 100644 index 00000000..0d18d6ec --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/cache.go @@ -0,0 +1,322 @@ +package validator + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type tagType uint8 + +const ( + typeDefault tagType = iota + typeOmitEmpty + typeIsDefault + typeNoStructLevel + typeStructOnly + typeDive + typeOr + typeKeys + typeEndKeys +) + +const ( + invalidValidation = "Invalid validation tag on field '%s'" + undefinedValidation = "Undefined validation function '%s' on field '%s'" + keysTagNotDefined = "'" + endKeysTag + "' tag encountered without a corresponding '" + keysTag + "' tag" +) + +type structCache struct { + lock sync.Mutex + m atomic.Value // map[reflect.Type]*cStruct +} + +func (sc *structCache) Get(key reflect.Type) (c *cStruct, found bool) { + c, found = sc.m.Load().(map[reflect.Type]*cStruct)[key] + return +} + +func (sc *structCache) Set(key reflect.Type, value *cStruct) { + m := sc.m.Load().(map[reflect.Type]*cStruct) + nm := make(map[reflect.Type]*cStruct, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + sc.m.Store(nm) +} + +type tagCache struct { + lock sync.Mutex + m atomic.Value // map[string]*cTag +} + +func (tc *tagCache) Get(key string) (c *cTag, found bool) { + c, found = tc.m.Load().(map[string]*cTag)[key] + return +} + +func (tc *tagCache) Set(key string, value *cTag) { + m := tc.m.Load().(map[string]*cTag) + nm := make(map[string]*cTag, len(m)+1) + for k, v := range m { + nm[k] = v + } + nm[key] = value + tc.m.Store(nm) +} + +type cStruct struct { + name string + fields []*cField + fn StructLevelFuncCtx +} + +type cField struct { + idx int + name string + altName string + namesEqual bool + cTags *cTag +} + +type cTag struct { + tag string + aliasTag string + actualAliasTag string + param string + keys *cTag // only populated when using tag's 'keys' and 'endkeys' for map key validation + next *cTag + fn FuncCtx + typeof tagType + hasTag bool + hasAlias bool + hasParam bool // true if parameter used eg. eq= where the equal sign has been set + isBlockEnd bool // indicates the current tag represents the last validation in the block + runValidationWhenNil bool +} + +func (v *Validate) extractStructCache(current reflect.Value, sName string) *cStruct { + v.structCache.lock.Lock() + defer v.structCache.lock.Unlock() // leave as defer! because if inner panics, it will never get unlocked otherwise! + + typ := current.Type() + + // could have been multiple trying to access, but once first is done this ensures struct + // isn't parsed again. + cs, ok := v.structCache.Get(typ) + if ok { + return cs + } + + cs = &cStruct{name: sName, fields: make([]*cField, 0), fn: v.structLevelFuncs[typ]} + + numFields := current.NumField() + + var ctag *cTag + var fld reflect.StructField + var tag string + var customName string + + for i := 0; i < numFields; i++ { + + fld = typ.Field(i) + + if !fld.Anonymous && len(fld.PkgPath) > 0 { + continue + } + + tag = fld.Tag.Get(v.tagName) + + if tag == skipValidationTag { + continue + } + + customName = fld.Name + + if v.hasTagNameFunc { + name := v.tagNameFunc(fld) + if len(name) > 0 { + customName = name + } + } + + // NOTE: cannot use shared tag cache, because tags may be equal, but things like alias may be different + // and so only struct level caching can be used instead of combined with Field tag caching + + if len(tag) > 0 { + ctag, _ = v.parseFieldTagsRecursive(tag, fld.Name, "", false) + } else { + // even if field doesn't have validations need cTag for traversing to potential inner/nested + // elements of the field. + ctag = new(cTag) + } + + cs.fields = append(cs.fields, &cField{ + idx: i, + name: fld.Name, + altName: customName, + cTags: ctag, + namesEqual: fld.Name == customName, + }) + } + v.structCache.Set(typ, cs) + return cs +} + +func (v *Validate) parseFieldTagsRecursive(tag string, fieldName string, alias string, hasAlias bool) (firstCtag *cTag, current *cTag) { + var t string + noAlias := len(alias) == 0 + tags := strings.Split(tag, tagSeparator) + + for i := 0; i < len(tags); i++ { + t = tags[i] + if noAlias { + alias = t + } + + // check map for alias and process new tags, otherwise process as usual + if tagsVal, found := v.aliases[t]; found { + if i == 0 { + firstCtag, current = v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + } else { + next, curr := v.parseFieldTagsRecursive(tagsVal, fieldName, t, true) + current.next, current = next, curr + + } + continue + } + + var prevTag tagType + + if i == 0 { + current = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true, typeof: typeDefault} + firstCtag = current + } else { + prevTag = current.typeof + current.next = &cTag{aliasTag: alias, hasAlias: hasAlias, hasTag: true} + current = current.next + } + + switch t { + case diveTag: + current.typeof = typeDive + continue + + case keysTag: + current.typeof = typeKeys + + if i == 0 || prevTag != typeDive { + panic(fmt.Sprintf("'%s' tag must be immediately preceded by the '%s' tag", keysTag, diveTag)) + } + + current.typeof = typeKeys + + // need to pass along only keys tag + // need to increment i to skip over the keys tags + b := make([]byte, 0, 64) + + i++ + + for ; i < len(tags); i++ { + + b = append(b, tags[i]...) + b = append(b, ',') + + if tags[i] == endKeysTag { + break + } + } + + current.keys, _ = v.parseFieldTagsRecursive(string(b[:len(b)-1]), fieldName, "", false) + continue + + case endKeysTag: + current.typeof = typeEndKeys + + // if there are more in tags then there was no keysTag defined + // and an error should be thrown + if i != len(tags)-1 { + panic(keysTagNotDefined) + } + return + + case omitempty: + current.typeof = typeOmitEmpty + continue + + case structOnlyTag: + current.typeof = typeStructOnly + continue + + case noStructLevelTag: + current.typeof = typeNoStructLevel + continue + + default: + if t == isdefault { + current.typeof = typeIsDefault + } + // if a pipe character is needed within the param you must use the utf8Pipe representation "0x7C" + orVals := strings.Split(t, orSeparator) + + for j := 0; j < len(orVals); j++ { + vals := strings.SplitN(orVals[j], tagKeySeparator, 2) + if noAlias { + alias = vals[0] + current.aliasTag = alias + } else { + current.actualAliasTag = t + } + + if j > 0 { + current.next = &cTag{aliasTag: alias, actualAliasTag: current.actualAliasTag, hasAlias: hasAlias, hasTag: true} + current = current.next + } + current.hasParam = len(vals) > 1 + + current.tag = vals[0] + if len(current.tag) == 0 { + panic(strings.TrimSpace(fmt.Sprintf(invalidValidation, fieldName))) + } + + if wrapper, ok := v.validations[current.tag]; ok { + current.fn = wrapper.fn + current.runValidationWhenNil = wrapper.runValidatinOnNil + } else { + panic(strings.TrimSpace(fmt.Sprintf(undefinedValidation, current.tag, fieldName))) + } + + if len(orVals) > 1 { + current.typeof = typeOr + } + + if len(vals) > 1 { + current.param = strings.Replace(strings.Replace(vals[1], utf8HexComma, ",", -1), utf8Pipe, "|", -1) + } + } + current.isBlockEnd = true + } + } + return +} + +func (v *Validate) fetchCacheTag(tag string) *cTag { + // find cached tag + ctag, found := v.tagCache.Get(tag) + if !found { + v.tagCache.lock.Lock() + defer v.tagCache.lock.Unlock() + + // could have been multiple trying to access, but once first is done this ensures tag + // isn't parsed again. + ctag, found = v.tagCache.Get(tag) + if !found { + ctag, _ = v.parseFieldTagsRecursive(tag, "", "", false) + v.tagCache.Set(tag, ctag) + } + } + return ctag +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/country_codes.go b/terraform-server/vendor/github.com/go-playground/validator/v10/country_codes.go new file mode 100644 index 00000000..ef81eada --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/country_codes.go @@ -0,0 +1,162 @@ +package validator + +var iso3166_1_alpha2 = map[string]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AF": true, "AX": true, "AL": true, "DZ": true, "AS": true, + "AD": true, "AO": true, "AI": true, "AQ": true, "AG": true, + "AR": true, "AM": true, "AW": true, "AU": true, "AT": true, + "AZ": true, "BS": true, "BH": true, "BD": true, "BB": true, + "BY": true, "BE": true, "BZ": true, "BJ": true, "BM": true, + "BT": true, "BO": true, "BQ": true, "BA": true, "BW": true, + "BV": true, "BR": true, "IO": true, "BN": true, "BG": true, + "BF": true, "BI": true, "KH": true, "CM": true, "CA": true, + "CV": true, "KY": true, "CF": true, "TD": true, "CL": true, + "CN": true, "CX": true, "CC": true, "CO": true, "KM": true, + "CG": true, "CD": true, "CK": true, "CR": true, "CI": true, + "HR": true, "CU": true, "CW": true, "CY": true, "CZ": true, + "DK": true, "DJ": true, "DM": true, "DO": true, "EC": true, + "EG": true, "SV": true, "GQ": true, "ER": true, "EE": true, + "ET": true, "FK": true, "FO": true, "FJ": true, "FI": true, + "FR": true, "GF": true, "PF": true, "TF": true, "GA": true, + "GM": true, "GE": true, "DE": true, "GH": true, "GI": true, + "GR": true, "GL": true, "GD": true, "GP": true, "GU": true, + "GT": true, "GG": true, "GN": true, "GW": true, "GY": true, + "HT": true, "HM": true, "VA": true, "HN": true, "HK": true, + "HU": true, "IS": true, "IN": true, "ID": true, "IR": true, + "IQ": true, "IE": true, "IM": true, "IL": true, "IT": true, + "JM": true, "JP": true, "JE": true, "JO": true, "KZ": true, + "KE": true, "KI": true, "KP": true, "KR": true, "KW": true, + "KG": true, "LA": true, "LV": true, "LB": true, "LS": true, + "LR": true, "LY": true, "LI": true, "LT": true, "LU": true, + "MO": true, "MK": true, "MG": true, "MW": true, "MY": true, + "MV": true, "ML": true, "MT": true, "MH": true, "MQ": true, + "MR": true, "MU": true, "YT": true, "MX": true, "FM": true, + "MD": true, "MC": true, "MN": true, "ME": true, "MS": true, + "MA": true, "MZ": true, "MM": true, "NA": true, "NR": true, + "NP": true, "NL": true, "NC": true, "NZ": true, "NI": true, + "NE": true, "NG": true, "NU": true, "NF": true, "MP": true, + "NO": true, "OM": true, "PK": true, "PW": true, "PS": true, + "PA": true, "PG": true, "PY": true, "PE": true, "PH": true, + "PN": true, "PL": true, "PT": true, "PR": true, "QA": true, + "RE": true, "RO": true, "RU": true, "RW": true, "BL": true, + "SH": true, "KN": true, "LC": true, "MF": true, "PM": true, + "VC": true, "WS": true, "SM": true, "ST": true, "SA": true, + "SN": true, "RS": true, "SC": true, "SL": true, "SG": true, + "SX": true, "SK": true, "SI": true, "SB": true, "SO": true, + "ZA": true, "GS": true, "SS": true, "ES": true, "LK": true, + "SD": true, "SR": true, "SJ": true, "SZ": true, "SE": true, + "CH": true, "SY": true, "TW": true, "TJ": true, "TZ": true, + "TH": true, "TL": true, "TG": true, "TK": true, "TO": true, + "TT": true, "TN": true, "TR": true, "TM": true, "TC": true, + "TV": true, "UG": true, "UA": true, "AE": true, "GB": true, + "US": true, "UM": true, "UY": true, "UZ": true, "VU": true, + "VE": true, "VN": true, "VG": true, "VI": true, "WF": true, + "EH": true, "YE": true, "ZM": true, "ZW": true, +} + +var iso3166_1_alpha3 = map[string]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + "AFG": true, "ALB": true, "DZA": true, "ASM": true, "AND": true, + "AGO": true, "AIA": true, "ATA": true, "ATG": true, "ARG": true, + "ARM": true, "ABW": true, "AUS": true, "AUT": true, "AZE": true, + "BHS": true, "BHR": true, "BGD": true, "BRB": true, "BLR": true, + "BEL": true, "BLZ": true, "BEN": true, "BMU": true, "BTN": true, + "BOL": true, "BES": true, "BIH": true, "BWA": true, "BVT": true, + "BRA": true, "IOT": true, "BRN": true, "BGR": true, "BFA": true, + "BDI": true, "CPV": true, "KHM": true, "CMR": true, "CAN": true, + "CYM": true, "CAF": true, "TCD": true, "CHL": true, "CHN": true, + "CXR": true, "CCK": true, "COL": true, "COM": true, "COD": true, + "COG": true, "COK": true, "CRI": true, "HRV": true, "CUB": true, + "CUW": true, "CYP": true, "CZE": true, "CIV": true, "DNK": true, + "DJI": true, "DMA": true, "DOM": true, "ECU": true, "EGY": true, + "SLV": true, "GNQ": true, "ERI": true, "EST": true, "SWZ": true, + "ETH": true, "FLK": true, "FRO": true, "FJI": true, "FIN": true, + "FRA": true, "GUF": true, "PYF": true, "ATF": true, "GAB": true, + "GMB": true, "GEO": true, "DEU": true, "GHA": true, "GIB": true, + "GRC": true, "GRL": true, "GRD": true, "GLP": true, "GUM": true, + "GTM": true, "GGY": true, "GIN": true, "GNB": true, "GUY": true, + "HTI": true, "HMD": true, "VAT": true, "HND": true, "HKG": true, + "HUN": true, "ISL": true, "IND": true, "IDN": true, "IRN": true, + "IRQ": true, "IRL": true, "IMN": true, "ISR": true, "ITA": true, + "JAM": true, "JPN": true, "JEY": true, "JOR": true, "KAZ": true, + "KEN": true, "KIR": true, "PRK": true, "KOR": true, "KWT": true, + "KGZ": true, "LAO": true, "LVA": true, "LBN": true, "LSO": true, + "LBR": true, "LBY": true, "LIE": true, "LTU": true, "LUX": true, + "MAC": true, "MDG": true, "MWI": true, "MYS": true, "MDV": true, + "MLI": true, "MLT": true, "MHL": true, "MTQ": true, "MRT": true, + "MUS": true, "MYT": true, "MEX": true, "FSM": true, "MDA": true, + "MCO": true, "MNG": true, "MNE": true, "MSR": true, "MAR": true, + "MOZ": true, "MMR": true, "NAM": true, "NRU": true, "NPL": true, + "NLD": true, "NCL": true, "NZL": true, "NIC": true, "NER": true, + "NGA": true, "NIU": true, "NFK": true, "MKD": true, "MNP": true, + "NOR": true, "OMN": true, "PAK": true, "PLW": true, "PSE": true, + "PAN": true, "PNG": true, "PRY": true, "PER": true, "PHL": true, + "PCN": true, "POL": true, "PRT": true, "PRI": true, "QAT": true, + "ROU": true, "RUS": true, "RWA": true, "REU": true, "BLM": true, + "SHN": true, "KNA": true, "LCA": true, "MAF": true, "SPM": true, + "VCT": true, "WSM": true, "SMR": true, "STP": true, "SAU": true, + "SEN": true, "SRB": true, "SYC": true, "SLE": true, "SGP": true, + "SXM": true, "SVK": true, "SVN": true, "SLB": true, "SOM": true, + "ZAF": true, "SGS": true, "SSD": true, "ESP": true, "LKA": true, + "SDN": true, "SUR": true, "SJM": true, "SWE": true, "CHE": true, + "SYR": true, "TWN": true, "TJK": true, "TZA": true, "THA": true, + "TLS": true, "TGO": true, "TKL": true, "TON": true, "TTO": true, + "TUN": true, "TUR": true, "TKM": true, "TCA": true, "TUV": true, + "UGA": true, "UKR": true, "ARE": true, "GBR": true, "UMI": true, + "USA": true, "URY": true, "UZB": true, "VUT": true, "VEN": true, + "VNM": true, "VGB": true, "VIR": true, "WLF": true, "ESH": true, + "YEM": true, "ZMB": true, "ZWE": true, "ALA": true, +} +var iso3166_1_alpha_numeric = map[int]bool{ + // see: https://www.iso.org/iso-3166-country-codes.html + 4: true, 8: true, 12: true, 16: true, 20: true, + 24: true, 660: true, 10: true, 28: true, 32: true, + 51: true, 533: true, 36: true, 40: true, 31: true, + 44: true, 48: true, 50: true, 52: true, 112: true, + 56: true, 84: true, 204: true, 60: true, 64: true, + 68: true, 535: true, 70: true, 72: true, 74: true, + 76: true, 86: true, 96: true, 100: true, 854: true, + 108: true, 132: true, 116: true, 120: true, 124: true, + 136: true, 140: true, 148: true, 152: true, 156: true, + 162: true, 166: true, 170: true, 174: true, 180: true, + 178: true, 184: true, 188: true, 191: true, 192: true, + 531: true, 196: true, 203: true, 384: true, 208: true, + 262: true, 212: true, 214: true, 218: true, 818: true, + 222: true, 226: true, 232: true, 233: true, 748: true, + 231: true, 238: true, 234: true, 242: true, 246: true, + 250: true, 254: true, 258: true, 260: true, 266: true, + 270: true, 268: true, 276: true, 288: true, 292: true, + 300: true, 304: true, 308: true, 312: true, 316: true, + 320: true, 831: true, 324: true, 624: true, 328: true, + 332: true, 334: true, 336: true, 340: true, 344: true, + 348: true, 352: true, 356: true, 360: true, 364: true, + 368: true, 372: true, 833: true, 376: true, 380: true, + 388: true, 392: true, 832: true, 400: true, 398: true, + 404: true, 296: true, 408: true, 410: true, 414: true, + 417: true, 418: true, 428: true, 422: true, 426: true, + 430: true, 434: true, 438: true, 440: true, 442: true, + 446: true, 450: true, 454: true, 458: true, 462: true, + 466: true, 470: true, 584: true, 474: true, 478: true, + 480: true, 175: true, 484: true, 583: true, 498: true, + 492: true, 496: true, 499: true, 500: true, 504: true, + 508: true, 104: true, 516: true, 520: true, 524: true, + 528: true, 540: true, 554: true, 558: true, 562: true, + 566: true, 570: true, 574: true, 807: true, 580: true, + 578: true, 512: true, 586: true, 585: true, 275: true, + 591: true, 598: true, 600: true, 604: true, 608: true, + 612: true, 616: true, 620: true, 630: true, 634: true, + 642: true, 643: true, 646: true, 638: true, 652: true, + 654: true, 659: true, 662: true, 663: true, 666: true, + 670: true, 882: true, 674: true, 678: true, 682: true, + 686: true, 688: true, 690: true, 694: true, 702: true, + 534: true, 703: true, 705: true, 90: true, 706: true, + 710: true, 239: true, 728: true, 724: true, 144: true, + 729: true, 740: true, 744: true, 752: true, 756: true, + 760: true, 158: true, 762: true, 834: true, 764: true, + 626: true, 768: true, 772: true, 776: true, 780: true, + 788: true, 792: true, 795: true, 796: true, 798: true, + 800: true, 804: true, 784: true, 826: true, 581: true, + 840: true, 858: true, 860: true, 548: true, 862: true, + 704: true, 92: true, 850: true, 876: true, 732: true, + 887: true, 894: true, 716: true, 248: true, +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/doc.go b/terraform-server/vendor/github.com/go-playground/validator/v10/doc.go new file mode 100644 index 00000000..a816c20a --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/doc.go @@ -0,0 +1,1308 @@ +/* +Package validator implements value validations for structs and individual fields +based on tags. + +It can also handle Cross-Field and Cross-Struct validation for nested structs +and has the ability to dive into arrays and maps of any type. + +see more examples https://github.com/go-playground/validator/tree/master/_examples + +Validation Functions Return Type error + +Doing things this way is actually the way the standard library does, see the +file.Open method here: + + https://golang.org/pkg/os/#Open. + +The authors return type "error" to avoid the issue discussed in the following, +where err is always != nil: + + http://stackoverflow.com/a/29138676/3158232 + https://github.com/go-playground/validator/issues/134 + +Validator only InvalidValidationError for bad validation input, nil or +ValidationErrors as type error; so, in your code all you need to do is check +if the error returned is not nil, and if it's not check if error is +InvalidValidationError ( if necessary, most of the time it isn't ) type cast +it to type ValidationErrors like so err.(validator.ValidationErrors). + +Custom Validation Functions + +Custom Validation functions can be added. Example: + + // Structure + func customFunc(fl validator.FieldLevel) bool { + + if fl.Field().String() == "invalid" { + return false + } + + return true + } + + validate.RegisterValidation("custom tag name", customFunc) + // NOTES: using the same tag name as an existing function + // will overwrite the existing one + +Cross-Field Validation + +Cross-Field Validation can be done via the following tags: + - eqfield + - nefield + - gtfield + - gtefield + - ltfield + - ltefield + - eqcsfield + - necsfield + - gtcsfield + - gtecsfield + - ltcsfield + - ltecsfield + +If, however, some custom cross-field validation is required, it can be done +using a custom validation. + +Why not just have cross-fields validation tags (i.e. only eqcsfield and not +eqfield)? + +The reason is efficiency. If you want to check a field within the same struct +"eqfield" only has to find the field on the same struct (1 level). But, if we +used "eqcsfield" it could be multiple levels down. Example: + + type Inner struct { + StartDate time.Time + } + + type Outer struct { + InnerStructField *Inner + CreatedAt time.Time `validate:"ltecsfield=InnerStructField.StartDate"` + } + + now := time.Now() + + inner := &Inner{ + StartDate: now, + } + + outer := &Outer{ + InnerStructField: inner, + CreatedAt: now, + } + + errs := validate.Struct(outer) + + // NOTE: when calling validate.Struct(val) topStruct will be the top level struct passed + // into the function + // when calling validate.VarWithValue(val, field, tag) val will be + // whatever you pass, struct, field... + // when calling validate.Field(field, tag) val will be nil + +Multiple Validators + +Multiple validators on a field will process in the order defined. Example: + + type Test struct { + Field `validate:"max=10,min=1"` + } + + // max will be checked then min + +Bad Validator definitions are not handled by the library. Example: + + type Test struct { + Field `validate:"min=10,max=0"` + } + + // this definition of min max will never succeed + +Using Validator Tags + +Baked In Cross-Field validation only compares fields on the same struct. +If Cross-Field + Cross-Struct validation is needed you should implement your +own custom validator. + +Comma (",") is the default separator of validation tags. If you wish to +have a comma included within the parameter (i.e. excludesall=,) you will need to +use the UTF-8 hex representation 0x2C, which is replaced in the code as a comma, +so the above will become excludesall=0x2C. + + type Test struct { + Field `validate:"excludesall=,"` // BAD! Do not include a comma. + Field `validate:"excludesall=0x2C"` // GOOD! Use the UTF-8 hex representation. + } + +Pipe ("|") is the 'or' validation tags deparator. If you wish to +have a pipe included within the parameter i.e. excludesall=| you will need to +use the UTF-8 hex representation 0x7C, which is replaced in the code as a pipe, +so the above will become excludesall=0x7C + + type Test struct { + Field `validate:"excludesall=|"` // BAD! Do not include a a pipe! + Field `validate:"excludesall=0x7C"` // GOOD! Use the UTF-8 hex representation. + } + + +Baked In Validators and Tags + +Here is a list of the current built in validators: + + +Skip Field + +Tells the validation to skip this struct field; this is particularly +handy in ignoring embedded structs from being validated. (Usage: -) + Usage: - + + +Or Operator + +This is the 'or' operator allowing multiple validators to be used and +accepted. (Usage: rgb|rgba) <-- this would allow either rgb or rgba +colors to be accepted. This can also be combined with 'and' for example +( Usage: omitempty,rgb|rgba) + + Usage: | + +StructOnly + +When a field that is a nested struct is encountered, and contains this flag +any validation on the nested struct will be run, but none of the nested +struct fields will be validated. This is useful if inside of your program +you know the struct will be valid, but need to verify it has been assigned. +NOTE: only "required" and "omitempty" can be used on a struct itself. + + Usage: structonly + +NoStructLevel + +Same as structonly tag except that any struct level validations will not run. + + Usage: nostructlevel + +Omit Empty + +Allows conditional validation, for example if a field is not set with +a value (Determined by the "required" validator) then other validation +such as min or max won't run, but if a value is set validation will run. + + Usage: omitempty + +Dive + +This tells the validator to dive into a slice, array or map and validate that +level of the slice, array or map with the validation tags that follow. +Multidimensional nesting is also supported, each level you wish to dive will +require another dive tag. dive has some sub-tags, 'keys' & 'endkeys', please see +the Keys & EndKeys section just below. + + Usage: dive + +Example #1 + + [][]string with validation tag "gt=0,dive,len=1,dive,required" + // gt=0 will be applied to [] + // len=1 will be applied to []string + // required will be applied to string + +Example #2 + + [][]string with validation tag "gt=0,dive,dive,required" + // gt=0 will be applied to [] + // []string will be spared validation + // required will be applied to string + +Keys & EndKeys + +These are to be used together directly after the dive tag and tells the validator +that anything between 'keys' and 'endkeys' applies to the keys of a map and not the +values; think of it like the 'dive' tag, but for map keys instead of values. +Multidimensional nesting is also supported, each level you wish to validate will +require another 'keys' and 'endkeys' tag. These tags are only valid for maps. + + Usage: dive,keys,othertagvalidation(s),endkeys,valuevalidationtags + +Example #1 + + map[string]string with validation tag "gt=0,dive,keys,eg=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to the map keys + // required will be applied to map values + +Example #2 + + map[[2]string]string with validation tag "gt=0,dive,keys,dive,eq=1|eq=2,endkeys,required" + // gt=0 will be applied to the map itself + // eg=1|eq=2 will be applied to each array element in the the map keys + // required will be applied to map values + +Required + +This validates that the value is not the data types default zero value. +For numbers ensures value is not zero. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required + +Required If + +The field under validation must be present and not empty only if all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: required_if + +Examples: + + // require the field if the Field1 is equal to the parameter given: + Usage: required_if=Field1 foobar + + // require the field if the Field1 and Field2 is equal to the value respectively: + Usage: required_if=Field1 foo Field2 bar + +Required Unless + +The field under validation must be present and not empty unless all +the other specified fields are equal to the value following the specified +field. For strings ensures value is not "". For slices, maps, pointers, +interfaces, channels and functions ensures the value is not nil. + + Usage: required_unless + +Examples: + + // require the field unless the Field1 is equal to the parameter given: + Usage: required_unless=Field1 foobar + + // require the field unless the Field1 and Field2 is equal to the value respectively: + Usage: required_unless=Field1 foo Field2 bar + +Required With + +The field under validation must be present and not empty only if any +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with + +Examples: + + // require the field if the Field1 is present: + Usage: required_with=Field1 + + // require the field if the Field1 or Field2 is present: + Usage: required_with=Field1 Field2 + +Required With All + +The field under validation must be present and not empty only if all +of the other specified fields are present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_with_all + +Example: + + // require the field if the Field1 and Field2 is present: + Usage: required_with_all=Field1 Field2 + +Required Without + +The field under validation must be present and not empty only when any +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without + +Examples: + + // require the field if the Field1 is not present: + Usage: required_without=Field1 + + // require the field if the Field1 or Field2 is not present: + Usage: required_without=Field1 Field2 + +Required Without All + +The field under validation must be present and not empty only when all +of the other specified fields are not present. For strings ensures value is +not "". For slices, maps, pointers, interfaces, channels and functions +ensures the value is not nil. + + Usage: required_without_all + +Example: + + // require the field if the Field1 and Field2 is not present: + Usage: required_without_all=Field1 Field2 + +Is Default + +This validates that the value is the default value and is almost the +opposite of required. + + Usage: isdefault + +Length + +For numbers, length will ensure that the value is +equal to the parameter given. For strings, it checks that +the string length is exactly that number of characters. For slices, +arrays, and maps, validates the number of items. + +Example #1 + + Usage: len=10 + +Example #2 (time.Duration) + +For time.Duration, len will ensure that the value is equal to the duration given +in the parameter. + + Usage: len=1h30m + +Maximum + +For numbers, max will ensure that the value is +less than or equal to the parameter given. For strings, it checks +that the string length is at most that number of characters. For +slices, arrays, and maps, validates the number of items. + +Example #1 + + Usage: max=10 + +Example #2 (time.Duration) + +For time.Duration, max will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: max=1h30m + +Minimum + +For numbers, min will ensure that the value is +greater or equal to the parameter given. For strings, it checks that +the string length is at least that number of characters. For slices, +arrays, and maps, validates the number of items. + +Example #1 + + Usage: min=10 + +Example #2 (time.Duration) + +For time.Duration, min will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: min=1h30m + +Equals + +For strings & numbers, eq will ensure that the value is +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + +Example #1 + + Usage: eq=10 + +Example #2 (time.Duration) + +For time.Duration, eq will ensure that the value is equal to the duration given +in the parameter. + + Usage: eq=1h30m + +Not Equal + +For strings & numbers, ne will ensure that the value is not +equal to the parameter given. For slices, arrays, and maps, +validates the number of items. + +Example #1 + + Usage: ne=10 + +Example #2 (time.Duration) + +For time.Duration, ne will ensure that the value is not equal to the duration +given in the parameter. + + Usage: ne=1h30m + +One Of + +For strings, ints, and uints, oneof will ensure that the value +is one of the values in the parameter. The parameter should be +a list of values separated by whitespace. Values may be +strings or numbers. To match strings with spaces in them, include +the target string between single quotes. + + Usage: oneof=red green + oneof='red green' 'blue yellow' + oneof=5 7 9 + +Greater Than + +For numbers, this will ensure that the value is greater than the +parameter given. For strings, it checks that the string length +is greater than that number of characters. For slices, arrays +and maps it validates the number of items. + +Example #1 + + Usage: gt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than time.Now.UTC(). + + Usage: gt + +Example #3 (time.Duration) + +For time.Duration, gt will ensure that the value is greater than the duration +given in the parameter. + + Usage: gt=1h30m + +Greater Than or Equal + +Same as 'min' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: gte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is greater than or equal to time.Now.UTC(). + + Usage: gte + +Example #3 (time.Duration) + +For time.Duration, gte will ensure that the value is greater than or equal to +the duration given in the parameter. + + Usage: gte=1h30m + +Less Than + +For numbers, this will ensure that the value is less than the parameter given. +For strings, it checks that the string length is less than that number of +characters. For slices, arrays, and maps it validates the number of items. + +Example #1 + + Usage: lt=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than time.Now.UTC(). + + Usage: lt + +Example #3 (time.Duration) + +For time.Duration, lt will ensure that the value is less than the duration given +in the parameter. + + Usage: lt=1h30m + +Less Than or Equal + +Same as 'max' above. Kept both to make terminology with 'len' easier. + +Example #1 + + Usage: lte=10 + +Example #2 (time.Time) + +For time.Time ensures the time value is less than or equal to time.Now.UTC(). + + Usage: lte + +Example #3 (time.Duration) + +For time.Duration, lte will ensure that the value is less than or equal to the +duration given in the parameter. + + Usage: lte=1h30m + +Field Equals Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Example #1: + + // Validation on Password field using: + Usage: eqfield=ConfirmPassword + +Example #2: + + // Validating by field: + validate.VarWithValue(password, confirmpassword, "eqfield") + +Field Equals Another Field (relative) + +This does the same as eqfield except that it validates the field provided relative +to the top level struct. + + Usage: eqcsfield=InnerStructField.Field) + +Field Does Not Equal Another Field + +This will validate the field value against another fields value either within +a struct or passed in field. + +Examples: + + // Confirm two colors are not the same: + // + // Validation on Color field: + Usage: nefield=Color2 + + // Validating by field: + validate.VarWithValue(color1, color2, "nefield") + +Field Does Not Equal Another Field (relative) + +This does the same as nefield except that it validates the field provided +relative to the top level struct. + + Usage: necsfield=InnerStructField.Field + +Field Greater Than Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtfield") + +Field Greater Than Another Relative Field + +This does the same as gtfield except that it validates the field provided +relative to the top level struct. + + Usage: gtcsfield=InnerStructField.Field + +Field Greater Than or Equal To Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(gtefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "gtefield") + +Field Greater Than or Equal To Another Relative Field + +This does the same as gtefield except that it validates the field provided relative +to the top level struct. + + Usage: gtecsfield=InnerStructField.Field + +Less Than Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltfield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltfield") + +Less Than Another Relative Field + +This does the same as ltfield except that it validates the field provided relative +to the top level struct. + + Usage: ltcsfield=InnerStructField.Field + +Less Than or Equal To Another Field + +Only valid for Numbers, time.Duration and time.Time types, this will validate +the field value against another fields value either within a struct or passed in +field. usage examples are for validation of a Start and End date: + +Example #1: + + // Validation on End field using: + validate.Struct Usage(ltefield=Start) + +Example #2: + + // Validating by field: + validate.VarWithValue(start, end, "ltefield") + +Less Than or Equal To Another Relative Field + +This does the same as ltefield except that it validates the field provided relative +to the top level struct. + + Usage: ltecsfield=InnerStructField.Field + +Field Contains Another Field + +This does the same as contains except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: containsfield=InnerStructField.Field + +Field Excludes Another Field + +This does the same as excludes except for struct fields. It should only be used +with string types. See the behavior of reflect.Value.String() for behavior on +other types. + + Usage: excludesfield=InnerStructField.Field + +Unique + +For arrays & slices, unique will ensure that there are no duplicates. +For maps, unique will ensure that there are no duplicate values. +For slices of struct, unique will ensure that there are no duplicate values +in a field of the struct specified via a parameter. + + // For arrays, slices, and maps: + Usage: unique + + // For slices of struct: + Usage: unique=field + +Alpha Only + +This validates that a string value contains ASCII alpha characters only + + Usage: alpha + +Alphanumeric + +This validates that a string value contains ASCII alphanumeric characters only + + Usage: alphanum + +Alpha Unicode + +This validates that a string value contains unicode alpha characters only + + Usage: alphaunicode + +Alphanumeric Unicode + +This validates that a string value contains unicode alphanumeric characters only + + Usage: alphanumunicode + +Number + +This validates that a string value contains number values only. +For integers or float it returns true. + + Usage: number + +Numeric + +This validates that a string value contains a basic numeric value. +basic excludes exponents etc... +for integers or float it returns true. + + Usage: numeric + +Hexadecimal String + +This validates that a string value contains a valid hexadecimal. + + Usage: hexadecimal + +Hexcolor String + +This validates that a string value contains a valid hex color including +hashtag (#) + + Usage: hexcolor + +Lowercase String + +This validates that a string value contains only lowercase characters. An empty string is not a valid lowercase string. + + Usage: lowercase + +Uppercase String + +This validates that a string value contains only uppercase characters. An empty string is not a valid uppercase string. + + Usage: uppercase + +RGB String + +This validates that a string value contains a valid rgb color + + Usage: rgb + +RGBA String + +This validates that a string value contains a valid rgba color + + Usage: rgba + +HSL String + +This validates that a string value contains a valid hsl color + + Usage: hsl + +HSLA String + +This validates that a string value contains a valid hsla color + + Usage: hsla + +E.164 Phone Number String + +This validates that a string value contains a valid E.164 Phone number +https://en.wikipedia.org/wiki/E.164 (ex. +1123456789) + + Usage: e164 + +E-mail String + +This validates that a string value contains a valid email +This may not conform to all possibilities of any rfc standard, but neither +does any email provider accept all possibilities. + + Usage: email + +JSON String + +This validates that a string value is valid JSON + + Usage: json + +File path + +This validates that a string value contains a valid file path and that +the file exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: file + +URL String + +This validates that a string value contains a valid url +This will accept any url the golang request uri accepts but must contain +a schema for example http:// or rtmp:// + + Usage: url + +URI String + +This validates that a string value contains a valid uri +This will accept any uri the golang request uri accepts + + Usage: uri + +Urn RFC 2141 String + +This validataes that a string value contains a valid URN +according to the RFC 2141 spec. + + Usage: urn_rfc2141 + +Base64 String + +This validates that a string value contains a valid base64 value. +Although an empty string is valid base64 this will report an empty string +as an error, if you wish to accept an empty string as valid you can use +this with the omitempty tag. + + Usage: base64 + +Base64URL String + +This validates that a string value contains a valid base64 URL safe value +according the the RFC4648 spec. +Although an empty string is a valid base64 URL safe value, this will report +an empty string as an error, if you wish to accept an empty string as valid +you can use this with the omitempty tag. + + Usage: base64url + +Bitcoin Address + +This validates that a string value contains a valid bitcoin address. +The format of the string is checked to ensure it matches one of the three formats +P2PKH, P2SH and performs checksum validation. + + Usage: btc_addr + +Bitcoin Bech32 Address (segwit) + +This validates that a string value contains a valid bitcoin Bech32 address as defined +by bip-0173 (https://github.com/bitcoin/bips/blob/master/bip-0173.mediawiki) +Special thanks to Pieter Wuille for providng reference implementations. + + Usage: btc_addr_bech32 + +Ethereum Address + +This validates that a string value contains a valid ethereum address. +The format of the string is checked to ensure it matches the standard Ethereum address format. + + Usage: eth_addr + +Contains + +This validates that a string value contains the substring value. + + Usage: contains=@ + +Contains Any + +This validates that a string value contains any Unicode code points +in the substring value. + + Usage: containsany=!@#? + +Contains Rune + +This validates that a string value contains the supplied rune value. + + Usage: containsrune=@ + +Excludes + +This validates that a string value does not contain the substring value. + + Usage: excludes=@ + +Excludes All + +This validates that a string value does not contain any Unicode code +points in the substring value. + + Usage: excludesall=!@#? + +Excludes Rune + +This validates that a string value does not contain the supplied rune value. + + Usage: excludesrune=@ + +Starts With + +This validates that a string value starts with the supplied string value + + Usage: startswith=hello + +Ends With + +This validates that a string value ends with the supplied string value + + Usage: endswith=goodbye + +Does Not Start With + +This validates that a string value does not start with the supplied string value + + Usage: startsnotwith=hello + +Does Not End With + +This validates that a string value does not end with the supplied string value + + Usage: endsnotwith=goodbye + +International Standard Book Number + +This validates that a string value contains a valid isbn10 or isbn13 value. + + Usage: isbn + +International Standard Book Number 10 + +This validates that a string value contains a valid isbn10 value. + + Usage: isbn10 + +International Standard Book Number 13 + +This validates that a string value contains a valid isbn13 value. + + Usage: isbn13 + +Universally Unique Identifier UUID + +This validates that a string value contains a valid UUID. Uppercase UUID values will not pass - use `uuid_rfc4122` instead. + + Usage: uuid + +Universally Unique Identifier UUID v3 + +This validates that a string value contains a valid version 3 UUID. Uppercase UUID values will not pass - use `uuid3_rfc4122` instead. + + Usage: uuid3 + +Universally Unique Identifier UUID v4 + +This validates that a string value contains a valid version 4 UUID. Uppercase UUID values will not pass - use `uuid4_rfc4122` instead. + + Usage: uuid4 + +Universally Unique Identifier UUID v5 + +This validates that a string value contains a valid version 5 UUID. Uppercase UUID values will not pass - use `uuid5_rfc4122` instead. + + Usage: uuid5 + +ASCII + +This validates that a string value contains only ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: ascii + +Printable ASCII + +This validates that a string value contains only printable ASCII characters. +NOTE: if the string is blank, this validates as true. + + Usage: printascii + +Multi-Byte Characters + +This validates that a string value contains one or more multibyte characters. +NOTE: if the string is blank, this validates as true. + + Usage: multibyte + +Data URL + +This validates that a string value contains a valid DataURI. +NOTE: this will also validate that the data portion is valid base64 + + Usage: datauri + +Latitude + +This validates that a string value contains a valid latitude. + + Usage: latitude + +Longitude + +This validates that a string value contains a valid longitude. + + Usage: longitude + +Social Security Number SSN + +This validates that a string value contains a valid U.S. Social Security Number. + + Usage: ssn + +Internet Protocol Address IP + +This validates that a string value contains a valid IP Address. + + Usage: ip + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid v4 IP Address. + + Usage: ipv4 + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid v6 IP Address. + + Usage: ipv6 + +Classless Inter-Domain Routing CIDR + +This validates that a string value contains a valid CIDR Address. + + Usage: cidr + +Classless Inter-Domain Routing CIDRv4 + +This validates that a string value contains a valid v4 CIDR Address. + + Usage: cidrv4 + +Classless Inter-Domain Routing CIDRv6 + +This validates that a string value contains a valid v6 CIDR Address. + + Usage: cidrv6 + +Transmission Control Protocol Address TCP + +This validates that a string value contains a valid resolvable TCP Address. + + Usage: tcp_addr + +Transmission Control Protocol Address TCPv4 + +This validates that a string value contains a valid resolvable v4 TCP Address. + + Usage: tcp4_addr + +Transmission Control Protocol Address TCPv6 + +This validates that a string value contains a valid resolvable v6 TCP Address. + + Usage: tcp6_addr + +User Datagram Protocol Address UDP + +This validates that a string value contains a valid resolvable UDP Address. + + Usage: udp_addr + +User Datagram Protocol Address UDPv4 + +This validates that a string value contains a valid resolvable v4 UDP Address. + + Usage: udp4_addr + +User Datagram Protocol Address UDPv6 + +This validates that a string value contains a valid resolvable v6 UDP Address. + + Usage: udp6_addr + +Internet Protocol Address IP + +This validates that a string value contains a valid resolvable IP Address. + + Usage: ip_addr + +Internet Protocol Address IPv4 + +This validates that a string value contains a valid resolvable v4 IP Address. + + Usage: ip4_addr + +Internet Protocol Address IPv6 + +This validates that a string value contains a valid resolvable v6 IP Address. + + Usage: ip6_addr + +Unix domain socket end point Address + +This validates that a string value contains a valid Unix Address. + + Usage: unix_addr + +Media Access Control Address MAC + +This validates that a string value contains a valid MAC Address. + + Usage: mac + +Note: See Go's ParseMAC for accepted formats and types: + + http://golang.org/src/net/mac.go?s=866:918#L29 + +Hostname RFC 952 + +This validates that a string value is a valid Hostname according to RFC 952 https://tools.ietf.org/html/rfc952 + + Usage: hostname + +Hostname RFC 1123 + +This validates that a string value is a valid Hostname according to RFC 1123 https://tools.ietf.org/html/rfc1123 + + Usage: hostname_rfc1123 or if you want to continue to use 'hostname' in your tags, create an alias. + +Full Qualified Domain Name (FQDN) + +This validates that a string value contains a valid FQDN. + + Usage: fqdn + +HTML Tags + +This validates that a string value appears to be an HTML element tag +including those described at https://developer.mozilla.org/en-US/docs/Web/HTML/Element + + Usage: html + +HTML Encoded + +This validates that a string value is a proper character reference in decimal +or hexadecimal format + + Usage: html_encoded + +URL Encoded + +This validates that a string value is percent-encoded (URL encoded) according +to https://tools.ietf.org/html/rfc3986#section-2.1 + + Usage: url_encoded + +Directory + +This validates that a string value contains a valid directory and that +it exists on the machine. +This is done using os.Stat, which is a platform independent function. + + Usage: dir + +HostPort + +This validates that a string value contains a valid DNS hostname and port that +can be used to valiate fields typically passed to sockets and connections. + + Usage: hostname_port + +Datetime + +This validates that a string value is a valid datetime based on the supplied datetime format. +Supplied format must match the official Go time format layout as documented in https://golang.org/pkg/time/ + + Usage: datetime=2006-01-02 + +Iso3166-1 alpha-2 + +This validates that a string value is a valid country code based on iso3166-1 alpha-2 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha2 + +Iso3166-1 alpha-3 + +This validates that a string value is a valid country code based on iso3166-1 alpha-3 standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +Iso3166-1 alpha-numeric + +This validates that a string value is a valid country code based on iso3166-1 alpha-numeric standard. +see: https://www.iso.org/iso-3166-country-codes.html + + Usage: iso3166_1_alpha3 + +TimeZone + +This validates that a string value is a valid time zone based on the time zone database present on the system. +Although empty value and Local value are allowed by time.LoadLocation golang function, they are not allowed by this validator. +More information on https://golang.org/pkg/time/#LoadLocation + + Usage: timezone + + +Alias Validators and Tags + +NOTE: When returning an error, the tag returned in "FieldError" will be +the alias tag unless the dive tag is part of the alias. Everything after the +dive tag is not reported as the alias tag. Also, the "ActualTag" in the before +case will be the actual tag within the alias that failed. + +Here is a list of the current built in alias tags: + + "iscolor" + alias is "hexcolor|rgb|rgba|hsl|hsla" (Usage: iscolor) + "country_code" + alias is "iso3166_1_alpha2|iso3166_1_alpha3|iso3166_1_alpha_numeric" (Usage: country_code) + +Validator notes: + + regex + a regex validator won't be added because commas and = signs can be part + of a regex which conflict with the validation definitions. Although + workarounds can be made, they take away from using pure regex's. + Furthermore it's quick and dirty but the regex's become harder to + maintain and are not reusable, so it's as much a programming philosophy + as anything. + + In place of this new validator functions should be created; a regex can + be used within the validator function and even be precompiled for better + efficiency within regexes.go. + + And the best reason, you can submit a pull request and we can keep on + adding to the validation library of this package! + +Non standard validators + +A collection of validation rules that are frequently needed but are more +complex than the ones found in the baked in validators. +A non standard validator must be registered manually like you would +with your own custom validation functions. + +Example of registration and use: + + type Test struct { + TestField string `validate:"yourtag"` + } + + t := &Test{ + TestField: "Test" + } + + validate := validator.New() + validate.RegisterValidation("yourtag", validators.NotBlank) + +Here is a list of the current non standard validators: + + NotBlank + This validates that the value is not blank or with length zero. + For strings ensures they do not contain only spaces. For channels, maps, slices and arrays + ensures they don't have zero length. For others, a non empty value is required. + + Usage: notblank + +Panics + +This package panics when bad input is provided, this is by design, bad code like +that should not make it to production. + + type Test struct { + TestField string `validate:"nonexistantfunction=1"` + } + + t := &Test{ + TestField: "Test" + } + + validate.Struct(t) // this will panic +*/ +package validator diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/errors.go b/terraform-server/vendor/github.com/go-playground/validator/v10/errors.go new file mode 100644 index 00000000..63293cf9 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/errors.go @@ -0,0 +1,275 @@ +package validator + +import ( + "bytes" + "fmt" + "reflect" + "strings" + + ut "github.com/go-playground/universal-translator" +) + +const ( + fieldErrMsg = "Key: '%s' Error:Field validation for '%s' failed on the '%s' tag" +) + +// ValidationErrorsTranslations is the translation return type +type ValidationErrorsTranslations map[string]string + +// InvalidValidationError describes an invalid argument passed to +// `Struct`, `StructExcept`, StructPartial` or `Field` +type InvalidValidationError struct { + Type reflect.Type +} + +// Error returns InvalidValidationError message +func (e *InvalidValidationError) Error() string { + + if e.Type == nil { + return "validator: (nil)" + } + + return "validator: (nil " + e.Type.String() + ")" +} + +// ValidationErrors is an array of FieldError's +// for use in custom error messages post validation. +type ValidationErrors []FieldError + +// Error is intended for use in development + debugging and not intended to be a production error message. +// It allows ValidationErrors to subscribe to the Error interface. +// All information to create an error message specific to your application is contained within +// the FieldError found within the ValidationErrors array +func (ve ValidationErrors) Error() string { + + buff := bytes.NewBufferString("") + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + + fe = ve[i].(*fieldError) + buff.WriteString(fe.Error()) + buff.WriteString("\n") + } + + return strings.TrimSpace(buff.String()) +} + +// Translate translates all of the ValidationErrors +func (ve ValidationErrors) Translate(ut ut.Translator) ValidationErrorsTranslations { + + trans := make(ValidationErrorsTranslations) + + var fe *fieldError + + for i := 0; i < len(ve); i++ { + fe = ve[i].(*fieldError) + + // // in case an Anonymous struct was used, ensure that the key + // // would be 'Username' instead of ".Username" + // if len(fe.ns) > 0 && fe.ns[:1] == "." { + // trans[fe.ns[1:]] = fe.Translate(ut) + // continue + // } + + trans[fe.ns] = fe.Translate(ut) + } + + return trans +} + +// FieldError contains all functions to get error details +type FieldError interface { + + // returns the validation tag that failed. if the + // validation was an alias, this will return the + // alias name and not the underlying tag that failed. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "iscolor" + Tag() string + + // returns the validation tag that failed, even if an + // alias the actual tag within the alias will be returned. + // If an 'or' validation fails the entire or will be returned. + // + // eg. alias "iscolor": "hexcolor|rgb|rgba|hsl|hsla" + // will return "hexcolor|rgb|rgba|hsl|hsla" + ActualTag() string + + // returns the namespace for the field error, with the tag + // name taking precedence over the field's actual name. + // + // eg. JSON name "User.fname" + // + // See StructNamespace() for a version that returns actual names. + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract it's name + Namespace() string + + // returns the namespace for the field error, with the field's + // actual name. + // + // eq. "User.FirstName" see Namespace for comparison + // + // NOTE: this field can be blank when validating a single primitive field + // using validate.Field(...) as there is no way to extract its name + StructNamespace() string + + // returns the fields name with the tag name taking precedence over the + // field's actual name. + // + // eq. JSON name "fname" + // see StructField for comparison + Field() string + + // returns the field's actual name from the struct, when able to determine. + // + // eq. "FirstName" + // see Field for comparison + StructField() string + + // returns the actual field's value in case needed for creating the error + // message + Value() interface{} + + // returns the param value, in string form for comparison; this will also + // help with generating an error message + Param() string + + // Kind returns the Field's reflect Kind + // + // eg. time.Time's kind is a struct + Kind() reflect.Kind + + // Type returns the Field's reflect Type + // + // // eg. time.Time's type is time.Time + Type() reflect.Type + + // returns the FieldError's translated error + // from the provided 'ut.Translator' and registered 'TranslationFunc' + // + // NOTE: if no registered translator can be found it returns the same as + // calling fe.Error() + Translate(ut ut.Translator) string + + // Error returns the FieldError's message + Error() string +} + +// compile time interface checks +var _ FieldError = new(fieldError) +var _ error = new(fieldError) + +// fieldError contains a single field's validation error along +// with other properties that may be needed for error message creation +// it complies with the FieldError interface +type fieldError struct { + v *Validate + tag string + actualTag string + ns string + structNs string + fieldLen uint8 + structfieldLen uint8 + value interface{} + param string + kind reflect.Kind + typ reflect.Type +} + +// Tag returns the validation tag that failed. +func (fe *fieldError) Tag() string { + return fe.tag +} + +// ActualTag returns the validation tag that failed, even if an +// alias the actual tag within the alias will be returned. +func (fe *fieldError) ActualTag() string { + return fe.actualTag +} + +// Namespace returns the namespace for the field error, with the tag +// name taking precedence over the field's actual name. +func (fe *fieldError) Namespace() string { + return fe.ns +} + +// StructNamespace returns the namespace for the field error, with the field's +// actual name. +func (fe *fieldError) StructNamespace() string { + return fe.structNs +} + +// Field returns the field's name with the tag name taking precedence over the +// field's actual name. +func (fe *fieldError) Field() string { + + return fe.ns[len(fe.ns)-int(fe.fieldLen):] + // // return fe.field + // fld := fe.ns[len(fe.ns)-int(fe.fieldLen):] + + // log.Println("FLD:", fld) + + // if len(fld) > 0 && fld[:1] == "." { + // return fld[1:] + // } + + // return fld +} + +// returns the field's actual name from the struct, when able to determine. +func (fe *fieldError) StructField() string { + // return fe.structField + return fe.structNs[len(fe.structNs)-int(fe.structfieldLen):] +} + +// Value returns the actual field's value in case needed for creating the error +// message +func (fe *fieldError) Value() interface{} { + return fe.value +} + +// Param returns the param value, in string form for comparison; this will +// also help with generating an error message +func (fe *fieldError) Param() string { + return fe.param +} + +// Kind returns the Field's reflect Kind +func (fe *fieldError) Kind() reflect.Kind { + return fe.kind +} + +// Type returns the Field's reflect Type +func (fe *fieldError) Type() reflect.Type { + return fe.typ +} + +// Error returns the fieldError's error message +func (fe *fieldError) Error() string { + return fmt.Sprintf(fieldErrMsg, fe.ns, fe.Field(), fe.tag) +} + +// Translate returns the FieldError's translated error +// from the provided 'ut.Translator' and registered 'TranslationFunc' +// +// NOTE: if no registered translation can be found, it returns the original +// untranslated error message. +func (fe *fieldError) Translate(ut ut.Translator) string { + + m, ok := fe.v.transTagFunc[ut] + if !ok { + return fe.Error() + } + + fn, ok := m[fe.tag] + if !ok { + return fe.Error() + } + + return fn(ut, fe) +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/field_level.go b/terraform-server/vendor/github.com/go-playground/validator/v10/field_level.go new file mode 100644 index 00000000..f0e2a9a8 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/field_level.go @@ -0,0 +1,119 @@ +package validator + +import "reflect" + +// FieldLevel contains all the information and helper functions +// to validate a field +type FieldLevel interface { + // returns the top level struct, if any + Top() reflect.Value + + // returns the current fields parent struct, if any or + // the comparison value if called 'VarWithValue' + Parent() reflect.Value + + // returns current field for validation + Field() reflect.Value + + // returns the field's name with the tag + // name taking precedence over the fields actual name. + FieldName() string + + // returns the struct field's name + StructFieldName() string + + // returns param for validation against current field + Param() string + + // GetTag returns the current validations tag name + GetTag() string + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and it's kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + // + // Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. + GetStructFieldOK() (reflect.Value, reflect.Kind, bool) + + // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + // + // Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. + GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) + + // traverses the parent struct to retrieve a specific field denoted by the provided namespace + // in the param and returns the field, field kind, if it's a nullable type and whether is was successful in retrieving + // the field at all. + // + // NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field + // could not be retrieved because it didn't exist. + GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) + + // GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for + // the field and namespace allowing more extensibility for validators. + GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) +} + +var _ FieldLevel = new(validate) + +// Field returns current field for validation +func (v *validate) Field() reflect.Value { + return v.flField +} + +// FieldName returns the field's name with the tag +// name taking precedence over the fields actual name. +func (v *validate) FieldName() string { + return v.cf.altName +} + +// GetTag returns the current validations tag name +func (v *validate) GetTag() string { + return v.ct.tag +} + +// StructFieldName returns the struct field's name +func (v *validate) StructFieldName() string { + return v.cf.name +} + +// Param returns param for validation against current field +func (v *validate) Param() string { + return v.ct.param +} + +// GetStructFieldOK returns Param returns param for validation against current field +// +// Deprecated: Use GetStructFieldOK2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOK() (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.getStructFieldOKInternal(v.slflParent, v.ct.param) + return current, kind, found +} + +// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +// +// Deprecated: Use GetStructFieldOKAdvanced2() instead which also return if the value is nullable. +func (v *validate) GetStructFieldOKAdvanced(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool) { + current, kind, _, found := v.GetStructFieldOKAdvanced2(val, namespace) + return current, kind, found +} + +// GetStructFieldOK returns Param returns param for validation against current field +func (v *validate) GetStructFieldOK2() (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(v.slflParent, v.ct.param) +} + +// GetStructFieldOKAdvanced is the same as GetStructFieldOK except that it accepts the parent struct to start looking for +// the field and namespace allowing more extensibility for validators. +func (v *validate) GetStructFieldOKAdvanced2(val reflect.Value, namespace string) (reflect.Value, reflect.Kind, bool, bool) { + return v.getStructFieldOKInternal(val, namespace) +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/go.mod b/terraform-server/vendor/github.com/go-playground/validator/v10/go.mod new file mode 100644 index 00000000..d457100e --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/go.mod @@ -0,0 +1,11 @@ +module github.com/go-playground/validator/v10 + +go 1.13 + +require ( + github.com/go-playground/assert/v2 v2.0.1 + github.com/go-playground/locales v0.13.0 + github.com/go-playground/universal-translator v0.17.0 + github.com/leodido/go-urn v1.2.0 + golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 +) diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/go.sum b/terraform-server/vendor/github.com/go-playground/validator/v10/go.sum new file mode 100644 index 00000000..01526427 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/go.sum @@ -0,0 +1,28 @@ +github.com/davecgh/go-spew v1.1.0 h1:ZDRjVQ15GmhC3fiQ8ni8+OwkZQO4DARzQgrnXU1Liz8= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-playground/assert/v2 v2.0.1 h1:MsBgLAaY856+nPRTKrp3/OZK38U/wa0CcBYNjji3q3A= +github.com/go-playground/assert/v2 v2.0.1/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4= +github.com/go-playground/locales v0.13.0 h1:HyWk6mgj5qFqCT5fjGBuRArbVDfE4hi8+e8ceBS/t7Q= +github.com/go-playground/locales v0.13.0/go.mod h1:taPMhCMXrRLJO55olJkUXHZBHCxTMfnGwq/HNwmWNS8= +github.com/go-playground/universal-translator v0.17.0 h1:icxd5fm+REJzpZx7ZfpaD876Lmtgy7VtROAbHHXk8no= +github.com/go-playground/universal-translator v0.17.0/go.mod h1:UkSxE5sNxxRwHyU+Scu5vgOQjsIJAF8j9muTVoKLVtA= +github.com/leodido/go-urn v1.2.0 h1:hpXL4XnriNwQ/ABnpepYM/1vCLWNDfUNts8dX3xTG6Y= +github.com/leodido/go-urn v1.2.0/go.mod h1:+8+nEpDfqqsY+g338gtMEUOtuK+4dEMhiQEgxpxOKII= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.4.0 h1:2E4SXV/wtOkTonXsotYi4li6zVWxYlZuYNCXe9XRJyk= +github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI= +golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto= +golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= +golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/logo.png b/terraform-server/vendor/github.com/go-playground/validator/v10/logo.png new file mode 100644 index 00000000..355000f5 Binary files /dev/null and b/terraform-server/vendor/github.com/go-playground/validator/v10/logo.png differ diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/regexes.go b/terraform-server/vendor/github.com/go-playground/validator/v10/regexes.go new file mode 100644 index 00000000..b741f4e1 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/regexes.go @@ -0,0 +1,101 @@ +package validator + +import "regexp" + +const ( + alphaRegexString = "^[a-zA-Z]+$" + alphaNumericRegexString = "^[a-zA-Z0-9]+$" + alphaUnicodeRegexString = "^[\\p{L}]+$" + alphaUnicodeNumericRegexString = "^[\\p{L}\\p{N}]+$" + numericRegexString = "^[-+]?[0-9]+(?:\\.[0-9]+)?$" + numberRegexString = "^[0-9]+$" + hexadecimalRegexString = "^(0[xX])?[0-9a-fA-F]+$" + hexcolorRegexString = "^#(?:[0-9a-fA-F]{3}|[0-9a-fA-F]{6})$" + rgbRegexString = "^rgb\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*\\)$" + rgbaRegexString = "^rgba\\(\\s*(?:(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])|(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%\\s*,\\s*(?:0|[1-9]\\d?|1\\d\\d?|2[0-4]\\d|25[0-5])%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + hslRegexString = "^hsl\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*\\)$" + hslaRegexString = "^hsla\\(\\s*(?:0|[1-9]\\d?|[12]\\d\\d|3[0-5]\\d|360)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0|[1-9]\\d?|100)%)\\s*,\\s*(?:(?:0.[1-9]*)|[01])\\s*\\)$" + emailRegexString = "^(?:(?:(?:(?:[a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+(?:\\.([a-zA-Z]|\\d|[!#\\$%&'\\*\\+\\-\\/=\\?\\^_`{\\|}~]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])+)*)|(?:(?:\\x22)(?:(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(?:\\x20|\\x09)+)?(?:(?:[\\x01-\\x08\\x0b\\x0c\\x0e-\\x1f\\x7f]|\\x21|[\\x23-\\x5b]|[\\x5d-\\x7e]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[\\x01-\\x09\\x0b\\x0c\\x0d-\\x7f]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}]))))*(?:(?:(?:\\x20|\\x09)*(?:\\x0d\\x0a))?(\\x20|\\x09)+)?(?:\\x22))))@(?:(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|\\d|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.)+(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])|(?:(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])(?:[a-zA-Z]|\\d|-|\\.|~|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])*(?:[a-zA-Z]|[\\x{00A0}-\\x{D7FF}\\x{F900}-\\x{FDCF}\\x{FDF0}-\\x{FFEF}])))\\.?$" + e164RegexString = "^\\+[1-9]?[0-9]{7,14}$" + base64RegexString = "^(?:[A-Za-z0-9+\\/]{4})*(?:[A-Za-z0-9+\\/]{2}==|[A-Za-z0-9+\\/]{3}=|[A-Za-z0-9+\\/]{4})$" + base64URLRegexString = "^(?:[A-Za-z0-9-_]{4})*(?:[A-Za-z0-9-_]{2}==|[A-Za-z0-9-_]{3}=|[A-Za-z0-9-_]{4})$" + iSBN10RegexString = "^(?:[0-9]{9}X|[0-9]{10})$" + iSBN13RegexString = "^(?:(?:97(?:8|9))[0-9]{10})$" + uUID3RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-3[0-9a-f]{3}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID4RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-4[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUID5RegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-5[0-9a-f]{3}-[89ab][0-9a-f]{3}-[0-9a-f]{12}$" + uUIDRegexString = "^[0-9a-f]{8}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{4}-[0-9a-f]{12}$" + uUID3RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-3[0-9a-fA-F]{3}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + uUID4RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-4[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUID5RFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-5[0-9a-fA-F]{3}-[89abAB][0-9a-fA-F]{3}-[0-9a-fA-F]{12}$" + uUIDRFC4122RegexString = "^[0-9a-fA-F]{8}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{4}-[0-9a-fA-F]{12}$" + aSCIIRegexString = "^[\x00-\x7F]*$" + printableASCIIRegexString = "^[\x20-\x7E]*$" + multibyteRegexString = "[^\x00-\x7F]" + dataURIRegexString = `^data:((?:\w+\/(?:([^;]|;[^;]).)+)?)` + latitudeRegexString = "^[-+]?([1-8]?\\d(\\.\\d+)?|90(\\.0+)?)$" + longitudeRegexString = "^[-+]?(180(\\.0+)?|((1[0-7]\\d)|([1-9]?\\d))(\\.\\d+)?)$" + sSNRegexString = `^[0-9]{3}[ -]?(0[1-9]|[1-9][0-9])[ -]?([1-9][0-9]{3}|[0-9][1-9][0-9]{2}|[0-9]{2}[1-9][0-9]|[0-9]{3}[1-9])$` + hostnameRegexStringRFC952 = `^[a-zA-Z]([a-zA-Z0-9\-]+[\.]?)*[a-zA-Z0-9]$` // https://tools.ietf.org/html/rfc952 + hostnameRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62}){1}(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?$` // accepts hostname starting with a digit https://tools.ietf.org/html/rfc1123 + fqdnRegexStringRFC1123 = `^([a-zA-Z0-9]{1}[a-zA-Z0-9_-]{0,62})(\.[a-zA-Z0-9_]{1}[a-zA-Z0-9_-]{0,62})*?(\.[a-zA-Z]{1}[a-zA-Z0-9]{0,62})\.?$` // same as hostnameRegexStringRFC1123 but must contain a non numerical TLD (possibly ending with '.') + btcAddressRegexString = `^[13][a-km-zA-HJ-NP-Z1-9]{25,34}$` // bitcoin address + btcAddressUpperRegexStringBech32 = `^BC1[02-9AC-HJ-NP-Z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + btcAddressLowerRegexStringBech32 = `^bc1[02-9ac-hj-np-z]{7,76}$` // bitcoin bech32 address https://en.bitcoin.it/wiki/Bech32 + ethAddressRegexString = `^0x[0-9a-fA-F]{40}$` + ethAddressUpperRegexString = `^0x[0-9A-F]{40}$` + ethAddressLowerRegexString = `^0x[0-9a-f]{40}$` + uRLEncodedRegexString = `(%[A-Fa-f0-9]{2})` + hTMLEncodedRegexString = `&#[x]?([0-9a-fA-F]{2})|(>)|(<)|(")|(&)+[;]?` + hTMLRegexString = `<[/]?([a-zA-Z]+).*?>` + splitParamsRegexString = `'[^']*'|\S+` +) + +var ( + alphaRegex = regexp.MustCompile(alphaRegexString) + alphaNumericRegex = regexp.MustCompile(alphaNumericRegexString) + alphaUnicodeRegex = regexp.MustCompile(alphaUnicodeRegexString) + alphaUnicodeNumericRegex = regexp.MustCompile(alphaUnicodeNumericRegexString) + numericRegex = regexp.MustCompile(numericRegexString) + numberRegex = regexp.MustCompile(numberRegexString) + hexadecimalRegex = regexp.MustCompile(hexadecimalRegexString) + hexcolorRegex = regexp.MustCompile(hexcolorRegexString) + rgbRegex = regexp.MustCompile(rgbRegexString) + rgbaRegex = regexp.MustCompile(rgbaRegexString) + hslRegex = regexp.MustCompile(hslRegexString) + hslaRegex = regexp.MustCompile(hslaRegexString) + e164Regex = regexp.MustCompile(e164RegexString) + emailRegex = regexp.MustCompile(emailRegexString) + base64Regex = regexp.MustCompile(base64RegexString) + base64URLRegex = regexp.MustCompile(base64URLRegexString) + iSBN10Regex = regexp.MustCompile(iSBN10RegexString) + iSBN13Regex = regexp.MustCompile(iSBN13RegexString) + uUID3Regex = regexp.MustCompile(uUID3RegexString) + uUID4Regex = regexp.MustCompile(uUID4RegexString) + uUID5Regex = regexp.MustCompile(uUID5RegexString) + uUIDRegex = regexp.MustCompile(uUIDRegexString) + uUID3RFC4122Regex = regexp.MustCompile(uUID3RFC4122RegexString) + uUID4RFC4122Regex = regexp.MustCompile(uUID4RFC4122RegexString) + uUID5RFC4122Regex = regexp.MustCompile(uUID5RFC4122RegexString) + uUIDRFC4122Regex = regexp.MustCompile(uUIDRFC4122RegexString) + aSCIIRegex = regexp.MustCompile(aSCIIRegexString) + printableASCIIRegex = regexp.MustCompile(printableASCIIRegexString) + multibyteRegex = regexp.MustCompile(multibyteRegexString) + dataURIRegex = regexp.MustCompile(dataURIRegexString) + latitudeRegex = regexp.MustCompile(latitudeRegexString) + longitudeRegex = regexp.MustCompile(longitudeRegexString) + sSNRegex = regexp.MustCompile(sSNRegexString) + hostnameRegexRFC952 = regexp.MustCompile(hostnameRegexStringRFC952) + hostnameRegexRFC1123 = regexp.MustCompile(hostnameRegexStringRFC1123) + fqdnRegexRFC1123 = regexp.MustCompile(fqdnRegexStringRFC1123) + btcAddressRegex = regexp.MustCompile(btcAddressRegexString) + btcUpperAddressRegexBech32 = regexp.MustCompile(btcAddressUpperRegexStringBech32) + btcLowerAddressRegexBech32 = regexp.MustCompile(btcAddressLowerRegexStringBech32) + ethAddressRegex = regexp.MustCompile(ethAddressRegexString) + ethaddressRegexUpper = regexp.MustCompile(ethAddressUpperRegexString) + ethAddressRegexLower = regexp.MustCompile(ethAddressLowerRegexString) + uRLEncodedRegex = regexp.MustCompile(uRLEncodedRegexString) + hTMLEncodedRegex = regexp.MustCompile(hTMLEncodedRegexString) + hTMLRegex = regexp.MustCompile(hTMLRegexString) + splitParamsRegex = regexp.MustCompile(splitParamsRegexString) +) diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/struct_level.go b/terraform-server/vendor/github.com/go-playground/validator/v10/struct_level.go new file mode 100644 index 00000000..57691ee3 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/struct_level.go @@ -0,0 +1,175 @@ +package validator + +import ( + "context" + "reflect" +) + +// StructLevelFunc accepts all values needed for struct level validation +type StructLevelFunc func(sl StructLevel) + +// StructLevelFuncCtx accepts all values needed for struct level validation +// but also allows passing of contextual validation information via context.Context. +type StructLevelFuncCtx func(ctx context.Context, sl StructLevel) + +// wrapStructLevelFunc wraps normal StructLevelFunc makes it compatible with StructLevelFuncCtx +func wrapStructLevelFunc(fn StructLevelFunc) StructLevelFuncCtx { + return func(ctx context.Context, sl StructLevel) { + fn(sl) + } +} + +// StructLevel contains all the information and helper functions +// to validate a struct +type StructLevel interface { + + // returns the main validation object, in case one wants to call validations internally. + // this is so you don't have to use anonymous functions to get access to the validate + // instance. + Validator() *Validate + + // returns the top level struct, if any + Top() reflect.Value + + // returns the current fields parent struct, if any + Parent() reflect.Value + + // returns the current struct. + Current() reflect.Value + + // ExtractType gets the actual underlying type of field value. + // It will dive into pointers, customTypes and return you the + // underlying value and its kind. + ExtractType(field reflect.Value) (value reflect.Value, kind reflect.Kind, nullable bool) + + // reports an error just by passing the field and tag information + // + // NOTES: + // + // fieldName and altName get appended to the existing namespace that + // validator is on. e.g. pass 'FirstName' or 'Names[0]' depending + // on the nesting + // + // tag can be an existing validation tag or just something you make up + // and process on the flip side it's up to you. + ReportError(field interface{}, fieldName, structFieldName string, tag, param string) + + // reports an error just by passing ValidationErrors + // + // NOTES: + // + // relativeNamespace and relativeActualNamespace get appended to the + // existing namespace that validator is on. + // e.g. pass 'User.FirstName' or 'Users[0].FirstName' depending + // on the nesting. most of the time they will be blank, unless you validate + // at a level lower the the current field depth + ReportValidationErrors(relativeNamespace, relativeActualNamespace string, errs ValidationErrors) +} + +var _ StructLevel = new(validate) + +// Top returns the top level struct +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Top() reflect.Value { + return v.top +} + +// Parent returns the current structs parent +// +// NOTE: this can be the same as the current struct being validated +// if not is a nested struct. +// +// this is only called when within Struct and Field Level validation and +// should not be relied upon for an acurate value otherwise. +func (v *validate) Parent() reflect.Value { + return v.slflParent +} + +// Current returns the current struct. +func (v *validate) Current() reflect.Value { + return v.slCurrent +} + +// Validator returns the main validation object, in case one want to call validations internally. +func (v *validate) Validator() *Validate { + return v.v +} + +// ExtractType gets the actual underlying type of field value. +func (v *validate) ExtractType(field reflect.Value) (reflect.Value, reflect.Kind, bool) { + return v.extractTypeInternal(field, false) +} + +// ReportError reports an error just by passing the field and tag information +func (v *validate) ReportError(field interface{}, fieldName, structFieldName, tag, param string) { + + fv, kind, _ := v.extractTypeInternal(reflect.ValueOf(field), false) + + if len(structFieldName) == 0 { + structFieldName = fieldName + } + + v.str1 = string(append(v.ns, fieldName...)) + + if v.v.hasTagNameFunc || fieldName != structFieldName { + v.str2 = string(append(v.actualNs, structFieldName...)) + } else { + v.str2 = v.str1 + } + + if kind == reflect.Invalid { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + param: param, + kind: kind, + }, + ) + return + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tag, + actualTag: tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(fieldName)), + structfieldLen: uint8(len(structFieldName)), + value: fv.Interface(), + param: param, + kind: kind, + typ: fv.Type(), + }, + ) +} + +// ReportValidationErrors reports ValidationErrors obtained from running validations within the Struct Level validation. +// +// NOTE: this function prepends the current namespace to the relative ones. +func (v *validate) ReportValidationErrors(relativeNamespace, relativeStructNamespace string, errs ValidationErrors) { + + var err *fieldError + + for i := 0; i < len(errs); i++ { + + err = errs[i].(*fieldError) + err.ns = string(append(append(v.ns, relativeNamespace...), err.ns...)) + err.structNs = string(append(append(v.actualNs, relativeStructNamespace...), err.structNs...)) + + v.errs = append(v.errs, err) + } +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/translations.go b/terraform-server/vendor/github.com/go-playground/validator/v10/translations.go new file mode 100644 index 00000000..4d9d75c1 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/translations.go @@ -0,0 +1,11 @@ +package validator + +import ut "github.com/go-playground/universal-translator" + +// TranslationFunc is the function type used to register or override +// custom translations +type TranslationFunc func(ut ut.Translator, fe FieldError) string + +// RegisterTranslationsFunc allows for registering of translations +// for a 'ut.Translator' for use within the 'TranslationFunc' +type RegisterTranslationsFunc func(ut ut.Translator) error diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/util.go b/terraform-server/vendor/github.com/go-playground/validator/v10/util.go new file mode 100644 index 00000000..56420f43 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/util.go @@ -0,0 +1,288 @@ +package validator + +import ( + "reflect" + "strconv" + "strings" + "time" +) + +// extractTypeInternal gets the actual underlying type of field value. +// It will dive into pointers, customTypes and return you the +// underlying value and it's kind. +func (v *validate) extractTypeInternal(current reflect.Value, nullable bool) (reflect.Value, reflect.Kind, bool) { + +BEGIN: + switch current.Kind() { + case reflect.Ptr: + + nullable = true + + if current.IsNil() { + return current, reflect.Ptr, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Interface: + + nullable = true + + if current.IsNil() { + return current, reflect.Interface, nullable + } + + current = current.Elem() + goto BEGIN + + case reflect.Invalid: + return current, reflect.Invalid, nullable + + default: + + if v.v.hasCustomFuncs { + + if fn, ok := v.v.customFuncs[current.Type()]; ok { + current = reflect.ValueOf(fn(current)) + goto BEGIN + } + } + + return current, current.Kind(), nullable + } +} + +// getStructFieldOKInternal traverses a struct to retrieve a specific field denoted by the provided namespace and +// returns the field, field kind and whether is was successful in retrieving the field at all. +// +// NOTE: when not successful ok will be false, this can happen when a nested struct is nil and so the field +// could not be retrieved because it didn't exist. +func (v *validate) getStructFieldOKInternal(val reflect.Value, namespace string) (current reflect.Value, kind reflect.Kind, nullable bool, found bool) { + +BEGIN: + current, kind, nullable = v.ExtractType(val) + if kind == reflect.Invalid { + return + } + + if namespace == "" { + found = true + return + } + + switch kind { + + case reflect.Ptr, reflect.Interface: + return + + case reflect.Struct: + + typ := current.Type() + fld := namespace + var ns string + + if typ != timeType { + + idx := strings.Index(namespace, namespaceSeparator) + + if idx != -1 { + fld = namespace[:idx] + ns = namespace[idx+1:] + } else { + ns = "" + } + + bracketIdx := strings.Index(fld, leftBracket) + if bracketIdx != -1 { + fld = fld[:bracketIdx] + + ns = namespace[bracketIdx:] + } + + val = current.FieldByName(fld) + namespace = ns + goto BEGIN + } + + case reflect.Array, reflect.Slice: + idx := strings.Index(namespace, leftBracket) + idx2 := strings.Index(namespace, rightBracket) + + arrIdx, _ := strconv.Atoi(namespace[idx+1 : idx2]) + + if arrIdx >= current.Len() { + return + } + + startIdx := idx2 + 1 + + if startIdx < len(namespace) { + if namespace[startIdx:startIdx+1] == namespaceSeparator { + startIdx++ + } + } + + val = current.Index(arrIdx) + namespace = namespace[startIdx:] + goto BEGIN + + case reflect.Map: + idx := strings.Index(namespace, leftBracket) + 1 + idx2 := strings.Index(namespace, rightBracket) + + endIdx := idx2 + + if endIdx+1 < len(namespace) { + if namespace[endIdx+1:endIdx+2] == namespaceSeparator { + endIdx++ + } + } + + key := namespace[idx:idx2] + + switch current.Type().Key().Kind() { + case reflect.Int: + i, _ := strconv.Atoi(key) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Int8: + i, _ := strconv.ParseInt(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(int8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int16: + i, _ := strconv.ParseInt(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(int16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int32: + i, _ := strconv.ParseInt(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(int32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Int64: + i, _ := strconv.ParseInt(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Uint: + i, _ := strconv.ParseUint(key, 10, 0) + val = current.MapIndex(reflect.ValueOf(uint(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint8: + i, _ := strconv.ParseUint(key, 10, 8) + val = current.MapIndex(reflect.ValueOf(uint8(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint16: + i, _ := strconv.ParseUint(key, 10, 16) + val = current.MapIndex(reflect.ValueOf(uint16(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint32: + i, _ := strconv.ParseUint(key, 10, 32) + val = current.MapIndex(reflect.ValueOf(uint32(i))) + namespace = namespace[endIdx+1:] + + case reflect.Uint64: + i, _ := strconv.ParseUint(key, 10, 64) + val = current.MapIndex(reflect.ValueOf(i)) + namespace = namespace[endIdx+1:] + + case reflect.Float32: + f, _ := strconv.ParseFloat(key, 32) + val = current.MapIndex(reflect.ValueOf(float32(f))) + namespace = namespace[endIdx+1:] + + case reflect.Float64: + f, _ := strconv.ParseFloat(key, 64) + val = current.MapIndex(reflect.ValueOf(f)) + namespace = namespace[endIdx+1:] + + case reflect.Bool: + b, _ := strconv.ParseBool(key) + val = current.MapIndex(reflect.ValueOf(b)) + namespace = namespace[endIdx+1:] + + // reflect.Type = string + default: + val = current.MapIndex(reflect.ValueOf(key)) + namespace = namespace[endIdx+1:] + } + + goto BEGIN + } + + // if got here there was more namespace, cannot go any deeper + panic("Invalid field namespace") +} + +// asInt returns the parameter as a int64 +// or panics if it can't convert +func asInt(param string) int64 { + i, err := strconv.ParseInt(param, 0, 64) + panicIf(err) + + return i +} + +// asIntFromTimeDuration parses param as time.Duration and returns it as int64 +// or panics on error. +func asIntFromTimeDuration(param string) int64 { + d, err := time.ParseDuration(param) + if err != nil { + // attempt parsing as an an integer assuming nanosecond precision + return asInt(param) + } + return int64(d) +} + +// asIntFromType calls the proper function to parse param as int64, +// given a field's Type t. +func asIntFromType(t reflect.Type, param string) int64 { + switch t { + case timeDurationType: + return asIntFromTimeDuration(param) + default: + return asInt(param) + } +} + +// asUint returns the parameter as a uint64 +// or panics if it can't convert +func asUint(param string) uint64 { + + i, err := strconv.ParseUint(param, 0, 64) + panicIf(err) + + return i +} + +// asFloat returns the parameter as a float64 +// or panics if it can't convert +func asFloat(param string) float64 { + + i, err := strconv.ParseFloat(param, 64) + panicIf(err) + + return i +} + +// asBool returns the parameter as a bool +// or panics if it can't convert +func asBool(param string) bool { + + i, err := strconv.ParseBool(param) + panicIf(err) + + return i +} + +func panicIf(err error) { + if err != nil { + panic(err.Error()) + } +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/validator.go b/terraform-server/vendor/github.com/go-playground/validator/v10/validator.go new file mode 100644 index 00000000..f097f394 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/validator.go @@ -0,0 +1,477 @@ +package validator + +import ( + "context" + "fmt" + "reflect" + "strconv" +) + +// per validate construct +type validate struct { + v *Validate + top reflect.Value + ns []byte + actualNs []byte + errs ValidationErrors + includeExclude map[string]struct{} // reset only if StructPartial or StructExcept are called, no need otherwise + ffn FilterFunc + slflParent reflect.Value // StructLevel & FieldLevel + slCurrent reflect.Value // StructLevel & FieldLevel + flField reflect.Value // StructLevel & FieldLevel + cf *cField // StructLevel & FieldLevel + ct *cTag // StructLevel & FieldLevel + misc []byte // misc reusable + str1 string // misc reusable + str2 string // misc reusable + fldIsPointer bool // StructLevel & FieldLevel + isPartial bool + hasExcludes bool +} + +// parent and current will be the same the first run of validateStruct +func (v *validate) validateStruct(ctx context.Context, parent reflect.Value, current reflect.Value, typ reflect.Type, ns []byte, structNs []byte, ct *cTag) { + + cs, ok := v.v.structCache.Get(typ) + if !ok { + cs = v.v.extractStructCache(current, typ.Name()) + } + + if len(ns) == 0 && len(cs.name) != 0 { + + ns = append(ns, cs.name...) + ns = append(ns, '.') + + structNs = append(structNs, cs.name...) + structNs = append(structNs, '.') + } + + // ct is nil on top level struct, and structs as fields that have no tag info + // so if nil or if not nil and the structonly tag isn't present + if ct == nil || ct.typeof != typeStructOnly { + + var f *cField + + for i := 0; i < len(cs.fields); i++ { + + f = cs.fields[i] + + if v.isPartial { + + if v.ffn != nil { + // used with StructFiltered + if v.ffn(append(structNs, f.name...)) { + continue + } + + } else { + // used with StructPartial & StructExcept + _, ok = v.includeExclude[string(append(structNs, f.name...))] + + if (ok && v.hasExcludes) || (!ok && !v.hasExcludes) { + continue + } + } + } + + v.traverseField(ctx, parent, current.Field(f.idx), ns, structNs, f, f.cTags) + } + } + + // check if any struct level validations, after all field validations already checked. + // first iteration will have no info about nostructlevel tag, and is checked prior to + // calling the next iteration of validateStruct called from traverseField. + if cs.fn != nil { + + v.slflParent = parent + v.slCurrent = current + v.ns = ns + v.actualNs = structNs + + cs.fn(ctx, v) + } +} + +// traverseField validates any field, be it a struct or single field, ensures it's validity and passes it along to be validated via it's tag options +func (v *validate) traverseField(ctx context.Context, parent reflect.Value, current reflect.Value, ns []byte, structNs []byte, cf *cField, ct *cTag) { + var typ reflect.Type + var kind reflect.Kind + + current, kind, v.fldIsPointer = v.extractTypeInternal(current, false) + + switch kind { + case reflect.Ptr, reflect.Interface, reflect.Invalid: + + if ct == nil { + return + } + + if ct.typeof == typeOmitEmpty || ct.typeof == typeIsDefault { + return + } + + if ct.hasTag { + if kind == reflect.Invalid { + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + param: ct.param, + kind: kind, + }, + ) + return + } + + v.str1 = string(append(ns, cf.altName...)) + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + if !ct.runValidationWhenNil { + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: current.Type(), + }, + ) + return + } + } + + case reflect.Struct: + + typ = current.Type() + + if typ != timeType { + + if ct != nil { + + if ct.typeof == typeStructOnly { + goto CONTINUE + } else if ct.typeof == typeIsDefault { + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + return + } + } + + ct = ct.next + } + + if ct != nil && ct.typeof == typeNoStructLevel { + return + } + + CONTINUE: + // if len == 0 then validating using 'Var' or 'VarWithValue' + // Var - doesn't make much sense to do it that way, should call 'Struct', but no harm... + // VarWithField - this allows for validating against each field within the struct against a specific value + // pretty handy in certain situations + if len(cf.name) > 0 { + ns = append(append(ns, cf.altName...), '.') + structNs = append(append(structNs, cf.name...), '.') + } + + v.validateStruct(ctx, current, current, typ, ns, structNs, ct) + return + } + } + + if !ct.hasTag { + return + } + + typ = current.Type() + +OUTER: + for { + if ct == nil { + return + } + + switch ct.typeof { + + case typeOmitEmpty: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !hasValue(v) { + return + } + + ct = ct.next + continue + + case typeEndKeys: + return + + case typeDive: + + ct = ct.next + + // traverse slice or map here + // or panic ;) + switch kind { + case reflect.Slice, reflect.Array: + + var i64 int64 + reusableCF := &cField{} + + for i := 0; i < current.Len(); i++ { + + i64 = int64(i) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = strconv.AppendInt(v.misc, i64, 10) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + v.traverseField(ctx, parent, current.Index(i), ns, structNs, reusableCF, ct) + } + + case reflect.Map: + + var pv string + reusableCF := &cField{} + + for _, key := range current.MapKeys() { + + pv = fmt.Sprintf("%v", key.Interface()) + + v.misc = append(v.misc[0:0], cf.name...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.name = string(v.misc) + + if cf.namesEqual { + reusableCF.altName = reusableCF.name + } else { + v.misc = append(v.misc[0:0], cf.altName...) + v.misc = append(v.misc, '[') + v.misc = append(v.misc, pv...) + v.misc = append(v.misc, ']') + + reusableCF.altName = string(v.misc) + } + + if ct != nil && ct.typeof == typeKeys && ct.keys != nil { + v.traverseField(ctx, parent, key, ns, structNs, reusableCF, ct.keys) + // can be nil when just keys being validated + if ct.next != nil { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct.next) + } + } else { + v.traverseField(ctx, parent, current.MapIndex(key), ns, structNs, reusableCF, ct) + } + } + + default: + // throw error, if not a slice or map then should not have gotten here + // bad dive tag + panic("dive error! can't dive on a non slice or map") + } + + return + + case typeOr: + + v.misc = v.misc[0:0] + + for { + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if ct.fn(ctx, v) { + + // drain rest of the 'or' values, then continue or leave + for { + + ct = ct.next + + if ct == nil { + return + } + + if ct.typeof != typeOr { + continue OUTER + } + } + } + + v.misc = append(v.misc, '|') + v.misc = append(v.misc, ct.tag...) + + if ct.hasParam { + v.misc = append(v.misc, '=') + v.misc = append(v.misc, ct.param...) + } + + if ct.isBlockEnd || ct.next == nil { + // if we get here, no valid 'or' value and no more tags + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + if ct.hasAlias { + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.actualAliasTag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + } else { + + tVal := string(v.misc)[1:] + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: tVal, + actualTag: tVal, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + } + + return + } + + ct = ct.next + } + + default: + + // set Field Level fields + v.slflParent = parent + v.flField = current + v.cf = cf + v.ct = ct + + if !ct.fn(ctx, v) { + + v.str1 = string(append(ns, cf.altName...)) + + if v.v.hasTagNameFunc { + v.str2 = string(append(structNs, cf.name...)) + } else { + v.str2 = v.str1 + } + + v.errs = append(v.errs, + &fieldError{ + v: v.v, + tag: ct.aliasTag, + actualTag: ct.tag, + ns: v.str1, + structNs: v.str2, + fieldLen: uint8(len(cf.altName)), + structfieldLen: uint8(len(cf.name)), + value: current.Interface(), + param: ct.param, + kind: kind, + typ: typ, + }, + ) + + return + } + ct = ct.next + } + } + +} diff --git a/terraform-server/vendor/github.com/go-playground/validator/v10/validator_instance.go b/terraform-server/vendor/github.com/go-playground/validator/v10/validator_instance.go new file mode 100644 index 00000000..fe6a4877 --- /dev/null +++ b/terraform-server/vendor/github.com/go-playground/validator/v10/validator_instance.go @@ -0,0 +1,619 @@ +package validator + +import ( + "context" + "errors" + "fmt" + "reflect" + "strings" + "sync" + "time" + + ut "github.com/go-playground/universal-translator" +) + +const ( + defaultTagName = "validate" + utf8HexComma = "0x2C" + utf8Pipe = "0x7C" + tagSeparator = "," + orSeparator = "|" + tagKeySeparator = "=" + structOnlyTag = "structonly" + noStructLevelTag = "nostructlevel" + omitempty = "omitempty" + isdefault = "isdefault" + requiredWithoutAllTag = "required_without_all" + requiredWithoutTag = "required_without" + requiredWithTag = "required_with" + requiredWithAllTag = "required_with_all" + requiredIfTag = "required_if" + requiredUnlessTag = "required_unless" + skipValidationTag = "-" + diveTag = "dive" + keysTag = "keys" + endKeysTag = "endkeys" + requiredTag = "required" + namespaceSeparator = "." + leftBracket = "[" + rightBracket = "]" + restrictedTagChars = ".[],|=+()`~!@#$%^&*\\\"/?<>{}" + restrictedAliasErr = "Alias '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" + restrictedTagErr = "Tag '%s' either contains restricted characters or is the same as a restricted tag needed for normal operation" +) + +var ( + timeDurationType = reflect.TypeOf(time.Duration(0)) + timeType = reflect.TypeOf(time.Time{}) + + defaultCField = &cField{namesEqual: true} +) + +// FilterFunc is the type used to filter fields using +// StructFiltered(...) function. +// returning true results in the field being filtered/skiped from +// validation +type FilterFunc func(ns []byte) bool + +// CustomTypeFunc allows for overriding or adding custom field type handler functions +// field = field value of the type to return a value to be validated +// example Valuer from sql drive see https://golang.org/src/database/sql/driver/types.go?s=1210:1293#L29 +type CustomTypeFunc func(field reflect.Value) interface{} + +// TagNameFunc allows for adding of a custom tag name parser +type TagNameFunc func(field reflect.StructField) string + +type internalValidationFuncWrapper struct { + fn FuncCtx + runValidatinOnNil bool +} + +// Validate contains the validator settings and cache +type Validate struct { + tagName string + pool *sync.Pool + hasCustomFuncs bool + hasTagNameFunc bool + tagNameFunc TagNameFunc + structLevelFuncs map[reflect.Type]StructLevelFuncCtx + customFuncs map[reflect.Type]CustomTypeFunc + aliases map[string]string + validations map[string]internalValidationFuncWrapper + transTagFunc map[ut.Translator]map[string]TranslationFunc // map[]map[]TranslationFunc + tagCache *tagCache + structCache *structCache +} + +// New returns a new instance of 'validate' with sane defaults. +func New() *Validate { + + tc := new(tagCache) + tc.m.Store(make(map[string]*cTag)) + + sc := new(structCache) + sc.m.Store(make(map[reflect.Type]*cStruct)) + + v := &Validate{ + tagName: defaultTagName, + aliases: make(map[string]string, len(bakedInAliases)), + validations: make(map[string]internalValidationFuncWrapper, len(bakedInValidators)), + tagCache: tc, + structCache: sc, + } + + // must copy alias validators for separate validations to be used in each validator instance + for k, val := range bakedInAliases { + v.RegisterAlias(k, val) + } + + // must copy validators for separate validations to be used in each instance + for k, val := range bakedInValidators { + + switch k { + // these require that even if the value is nil that the validation should run, omitempty still overrides this behaviour + case requiredIfTag, requiredUnlessTag, requiredWithTag, requiredWithAllTag, requiredWithoutTag, requiredWithoutAllTag: + _ = v.registerValidation(k, wrapFunc(val), true, true) + default: + // no need to error check here, baked in will always be valid + _ = v.registerValidation(k, wrapFunc(val), true, false) + } + } + + v.pool = &sync.Pool{ + New: func() interface{} { + return &validate{ + v: v, + ns: make([]byte, 0, 64), + actualNs: make([]byte, 0, 64), + misc: make([]byte, 32), + } + }, + } + + return v +} + +// SetTagName allows for changing of the default tag name of 'validate' +func (v *Validate) SetTagName(name string) { + v.tagName = name +} + +// RegisterTagNameFunc registers a function to get alternate names for StructFields. +// +// eg. to use the names which have been specified for JSON representations of structs, rather than normal Go field names: +// +// validate.RegisterTagNameFunc(func(fld reflect.StructField) string { +// name := strings.SplitN(fld.Tag.Get("json"), ",", 2)[0] +// if name == "-" { +// return "" +// } +// return name +// }) +func (v *Validate) RegisterTagNameFunc(fn TagNameFunc) { + v.tagNameFunc = fn + v.hasTagNameFunc = true +} + +// RegisterValidation adds a validation with the given tag +// +// NOTES: +// - if the key already exists, the previous validation function will be replaced. +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterValidation(tag string, fn Func, callValidationEvenIfNull ...bool) error { + return v.RegisterValidationCtx(tag, wrapFunc(fn), callValidationEvenIfNull...) +} + +// RegisterValidationCtx does the same as RegisterValidation on accepts a FuncCtx validation +// allowing context.Context validation support. +func (v *Validate) RegisterValidationCtx(tag string, fn FuncCtx, callValidationEvenIfNull ...bool) error { + var nilCheckable bool + if len(callValidationEvenIfNull) > 0 { + nilCheckable = callValidationEvenIfNull[0] + } + return v.registerValidation(tag, fn, false, nilCheckable) +} + +func (v *Validate) registerValidation(tag string, fn FuncCtx, bakedIn bool, nilCheckable bool) error { + if len(tag) == 0 { + return errors.New("Function Key cannot be empty") + } + + if fn == nil { + return errors.New("Function cannot be empty") + } + + _, ok := restrictedTags[tag] + if !bakedIn && (ok || strings.ContainsAny(tag, restrictedTagChars)) { + panic(fmt.Sprintf(restrictedTagErr, tag)) + } + v.validations[tag] = internalValidationFuncWrapper{fn: fn, runValidatinOnNil: nilCheckable} + return nil +} + +// RegisterAlias registers a mapping of a single validation tag that +// defines a common or complex set of validation(s) to simplify adding validation +// to structs. +// +// NOTE: this function is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterAlias(alias, tags string) { + + _, ok := restrictedTags[alias] + + if ok || strings.ContainsAny(alias, restrictedTagChars) { + panic(fmt.Sprintf(restrictedAliasErr, alias)) + } + + v.aliases[alias] = tags +} + +// RegisterStructValidation registers a StructLevelFunc against a number of types. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidation(fn StructLevelFunc, types ...interface{}) { + v.RegisterStructValidationCtx(wrapStructLevelFunc(fn), types...) +} + +// RegisterStructValidationCtx registers a StructLevelFuncCtx against a number of types and allows passing +// of contextual validation information via context.Context. +// +// NOTE: +// - this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterStructValidationCtx(fn StructLevelFuncCtx, types ...interface{}) { + + if v.structLevelFuncs == nil { + v.structLevelFuncs = make(map[reflect.Type]StructLevelFuncCtx) + } + + for _, t := range types { + tv := reflect.ValueOf(t) + if tv.Kind() == reflect.Ptr { + t = reflect.Indirect(tv).Interface() + } + + v.structLevelFuncs[reflect.TypeOf(t)] = fn + } +} + +// RegisterCustomTypeFunc registers a CustomTypeFunc against a number of types +// +// NOTE: this method is not thread-safe it is intended that these all be registered prior to any validation +func (v *Validate) RegisterCustomTypeFunc(fn CustomTypeFunc, types ...interface{}) { + + if v.customFuncs == nil { + v.customFuncs = make(map[reflect.Type]CustomTypeFunc) + } + + for _, t := range types { + v.customFuncs[reflect.TypeOf(t)] = fn + } + + v.hasCustomFuncs = true +} + +// RegisterTranslation registers translations against the provided tag. +func (v *Validate) RegisterTranslation(tag string, trans ut.Translator, registerFn RegisterTranslationsFunc, translationFn TranslationFunc) (err error) { + + if v.transTagFunc == nil { + v.transTagFunc = make(map[ut.Translator]map[string]TranslationFunc) + } + + if err = registerFn(trans); err != nil { + return + } + + m, ok := v.transTagFunc[trans] + if !ok { + m = make(map[string]TranslationFunc) + v.transTagFunc[trans] = m + } + + m[tag] = translationFn + + return +} + +// Struct validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) Struct(s interface{}) error { + return v.StructCtx(context.Background(), s) +} + +// StructCtx validates a structs exposed fields, and automatically validates nested structs, unless otherwise specified +// and also allows passing of context.Context for contextual validation information. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructCtx(ctx context.Context, s interface{}) (err error) { + + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = false + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructFiltered validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified. +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFiltered(s interface{}, fn FilterFunc) error { + return v.StructFilteredCtx(context.Background(), s, fn) +} + +// StructFilteredCtx validates a structs exposed fields, that pass the FilterFunc check and automatically validates +// nested structs, unless otherwise specified and also allows passing of contextual validation information via +// context.Context +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructFilteredCtx(ctx context.Context, s interface{}, fn FilterFunc) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = fn + // vd.hasExcludes = false // only need to reset in StructPartial and StructExcept + + vd.validateStruct(ctx, top, val, val.Type(), vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructPartial validates the fields passed in only, ignoring all others. +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartial(s interface{}, fields ...string) error { + return v.StructPartialCtx(context.Background(), s, fields...) +} + +// StructPartialCtx validates the fields passed in only, ignoring all others and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// eg. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructPartialCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = false + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, k := range fields { + + flds := strings.Split(k, namespaceSeparator) + if len(flds) > 0 { + + vd.misc = append(vd.misc[0:0], name...) + vd.misc = append(vd.misc, '.') + + for _, s := range flds { + + idx := strings.Index(s, leftBracket) + + if idx != -1 { + for idx != -1 { + vd.misc = append(vd.misc, s[:idx]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + + idx2 := strings.Index(s, rightBracket) + idx2++ + vd.misc = append(vd.misc, s[idx:idx2]...) + vd.includeExclude[string(vd.misc)] = struct{}{} + s = s[idx2:] + idx = strings.Index(s, leftBracket) + } + } else { + + vd.misc = append(vd.misc, s...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.misc = append(vd.misc, '.') + } + } + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// StructExcept validates all fields except the ones passed in. +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExcept(s interface{}, fields ...string) error { + return v.StructExceptCtx(context.Background(), s, fields...) +} + +// StructExceptCtx validates all fields except the ones passed in and allows passing of contextual +// validation validation information via context.Context +// Fields may be provided in a namespaced fashion relative to the struct provided +// i.e. NestedStruct.Field or NestedArrayField[0].Struct.Name +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +func (v *Validate) StructExceptCtx(ctx context.Context, s interface{}, fields ...string) (err error) { + val := reflect.ValueOf(s) + top := val + + if val.Kind() == reflect.Ptr && !val.IsNil() { + val = val.Elem() + } + + if val.Kind() != reflect.Struct || val.Type() == timeType { + return &InvalidValidationError{Type: reflect.TypeOf(s)} + } + + // good to validate + vd := v.pool.Get().(*validate) + vd.top = top + vd.isPartial = true + vd.ffn = nil + vd.hasExcludes = true + vd.includeExclude = make(map[string]struct{}) + + typ := val.Type() + name := typ.Name() + + for _, key := range fields { + + vd.misc = vd.misc[0:0] + + if len(name) > 0 { + vd.misc = append(vd.misc, name...) + vd.misc = append(vd.misc, '.') + } + + vd.misc = append(vd.misc, key...) + vd.includeExclude[string(vd.misc)] = struct{}{} + } + + vd.validateStruct(ctx, top, val, typ, vd.ns[0:0], vd.actualNs[0:0], nil) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + + v.pool.Put(vd) + + return +} + +// Var validates a single variable using tag style validation. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) Var(field interface{}, tag string) error { + return v.VarCtx(context.Background(), field, tag) +} + +// VarCtx validates a single variable using tag style validation and allows passing of contextual +// validation validation information via context.Context. +// eg. +// var i int +// validate.Var(i, "gt=1,lt=10") +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarCtx(ctx context.Context, field interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + + ctag := v.fetchCacheTag(tag) + val := reflect.ValueOf(field) + vd := v.pool.Get().(*validate) + vd.top = val + vd.isPartial = false + vd.traverseField(ctx, val, val, vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} + +// VarWithValue validates a single variable, against another variable/field's value using tag style validation +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValue(field interface{}, other interface{}, tag string) error { + return v.VarWithValueCtx(context.Background(), field, other, tag) +} + +// VarWithValueCtx validates a single variable, against another variable/field's value using tag style validation and +// allows passing of contextual validation validation information via context.Context. +// eg. +// s1 := "abcd" +// s2 := "abcd" +// validate.VarWithValue(s1, s2, "eqcsfield") // returns true +// +// WARNING: a struct can be passed for validation eg. time.Time is a struct or +// if you have a custom type and have registered a custom type handler, so must +// allow it; however unforeseen validations will occur if trying to validate a +// struct that is meant to be passed to 'validate.Struct' +// +// It returns InvalidValidationError for bad values passed in and nil or ValidationErrors as error otherwise. +// You will need to assert the error if it's not nil eg. err.(validator.ValidationErrors) to access the array of errors. +// validate Array, Slice and maps fields which may contain more than one error +func (v *Validate) VarWithValueCtx(ctx context.Context, field interface{}, other interface{}, tag string) (err error) { + if len(tag) == 0 || tag == skipValidationTag { + return nil + } + ctag := v.fetchCacheTag(tag) + otherVal := reflect.ValueOf(other) + vd := v.pool.Get().(*validate) + vd.top = otherVal + vd.isPartial = false + vd.traverseField(ctx, otherVal, reflect.ValueOf(field), vd.ns[0:0], vd.actualNs[0:0], defaultCField, ctag) + + if len(vd.errs) > 0 { + err = vd.errs + vd.errs = nil + } + v.pool.Put(vd) + return +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/.gitignore b/terraform-server/vendor/github.com/go-sql-driver/mysql/.gitignore new file mode 100644 index 00000000..2de28da1 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/.gitignore @@ -0,0 +1,9 @@ +.DS_Store +.DS_Store? +._* +.Spotlight-V100 +.Trashes +Icon? +ehthumbs.db +Thumbs.db +.idea diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/AUTHORS b/terraform-server/vendor/github.com/go-sql-driver/mysql/AUTHORS new file mode 100644 index 00000000..50afa2c8 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/AUTHORS @@ -0,0 +1,117 @@ +# This is the official list of Go-MySQL-Driver authors for copyright purposes. + +# If you are submitting a patch, please add your name or the name of the +# organization which holds the copyright to this list in alphabetical order. + +# Names should be added to this file as +# Name +# The email address is not required for organizations. +# Please keep the list sorted. + + +# Individual Persons + +Aaron Hopkins +Achille Roussel +Alex Snast +Alexey Palazhchenko +Andrew Reid +Animesh Ray +Arne Hormann +Ariel Mashraki +Asta Xie +Bulat Gaifullin +Caine Jette +Carlos Nieto +Chris Moos +Craig Wilson +Daniel Montoya +Daniel Nichter +Daniël van Eeden +Dave Protasowski +DisposaBoy +Egor Smolyakov +Erwan Martin +Evan Shaw +Frederick Mayle +Gustavo Kristic +Hajime Nakagami +Hanno Braun +Henri Yandell +Hirotaka Yamamoto +Huyiguang +ICHINOSE Shogo +Ilia Cimpoes +INADA Naoki +Jacek Szwec +James Harr +Jeff Hodges +Jeffrey Charles +Jerome Meyer +Jiajia Zhong +Jian Zhen +Joshua Prunier +Julien Lefevre +Julien Schmidt +Justin Li +Justin Nuß +Kamil Dziedzic +Kei Kamikawa +Kevin Malachowski +Kieron Woodhouse +Lennart Rudolph +Leonardo YongUk Kim +Linh Tran Tuan +Lion Yang +Luca Looz +Lucas Liu +Luke Scott +Maciej Zimnoch +Michael Woolnough +Nathanial Murphy +Nicola Peduzzi +Olivier Mengué +oscarzhao +Paul Bonser +Peter Schultz +Rebecca Chin +Reed Allman +Richard Wilkes +Robert Russell +Runrioter Wung +Sho Iizuka +Sho Ikeda +Shuode Li +Simon J Mudd +Soroush Pour +Stan Putrya +Stanley Gunawan +Steven Hartland +Tan Jinhua <312841925 at qq.com> +Thomas Wodarek +Tim Ruffles +Tom Jenkinson +Vladimir Kovpak +Vladyslav Zhelezniak +Xiangyu Hu +Xiaobing Jiang +Xiuming Chen +Xuehong Chan +Zhenye Xie +Zhixin Wen + +# Organizations + +Barracuda Networks, Inc. +Counting Ltd. +DigitalOcean Inc. +Facebook Inc. +GitHub Inc. +Google Inc. +InfoSum Ltd. +Keybase Inc. +Multiplay Ltd. +Percona LLC +Pivotal Inc. +Stripe Inc. +Zendesk Inc. diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md b/terraform-server/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md new file mode 100644 index 00000000..72a738ed --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/CHANGELOG.md @@ -0,0 +1,232 @@ +## Version 1.6 (2021-04-01) + +Changes: + + - Migrate the CI service from travis-ci to GitHub Actions (#1176, #1183, #1190) + - `NullTime` is deprecated (#960, #1144) + - Reduce allocations when building SET command (#1111) + - Performance improvement for time formatting (#1118) + - Performance improvement for time parsing (#1098, #1113) + +New Features: + + - Implement `driver.Validator` interface (#1106, #1174) + - Support returning `uint64` from `Valuer` in `ConvertValue` (#1143) + - Add `json.RawMessage` for converter and prepared statement (#1059) + - Interpolate `json.RawMessage` as `string` (#1058) + - Implements `CheckNamedValue` (#1090) + +Bugfixes: + + - Stop rounding times (#1121, #1172) + - Put zero filler into the SSL handshake packet (#1066) + - Fix checking cancelled connections back into the connection pool (#1095) + - Fix remove last 0 byte for mysql_old_password when password is empty (#1133) + + +## Version 1.5 (2020-01-07) + +Changes: + + - Dropped support Go 1.9 and lower (#823, #829, #886, #1016, #1017) + - Improve buffer handling (#890) + - Document potentially insecure TLS configs (#901) + - Use a double-buffering scheme to prevent data races (#943) + - Pass uint64 values without converting them to string (#838, #955) + - Update collations and make utf8mb4 default (#877, #1054) + - Make NullTime compatible with sql.NullTime in Go 1.13+ (#995) + - Removed CloudSQL support (#993, #1007) + - Add Go Module support (#1003) + +New Features: + + - Implement support of optional TLS (#900) + - Check connection liveness (#934, #964, #997, #1048, #1051, #1052) + - Implement Connector Interface (#941, #958, #1020, #1035) + +Bugfixes: + + - Mark connections as bad on error during ping (#875) + - Mark connections as bad on error during dial (#867) + - Fix connection leak caused by rapid context cancellation (#1024) + - Mark connections as bad on error during Conn.Prepare (#1030) + + +## Version 1.4.1 (2018-11-14) + +Bugfixes: + + - Fix TIME format for binary columns (#818) + - Fix handling of empty auth plugin names (#835) + - Fix caching_sha2_password with empty password (#826) + - Fix canceled context broke mysqlConn (#862) + - Fix OldAuthSwitchRequest support (#870) + - Fix Auth Response packet for cleartext password (#887) + +## Version 1.4 (2018-06-03) + +Changes: + + - Documentation fixes (#530, #535, #567) + - Refactoring (#575, #579, #580, #581, #603, #615, #704) + - Cache column names (#444) + - Sort the DSN parameters in DSNs generated from a config (#637) + - Allow native password authentication by default (#644) + - Use the default port if it is missing in the DSN (#668) + - Removed the `strict` mode (#676) + - Do not query `max_allowed_packet` by default (#680) + - Dropped support Go 1.6 and lower (#696) + - Updated `ConvertValue()` to match the database/sql/driver implementation (#760) + - Document the usage of `0000-00-00T00:00:00` as the time.Time zero value (#783) + - Improved the compatibility of the authentication system (#807) + +New Features: + + - Multi-Results support (#537) + - `rejectReadOnly` DSN option (#604) + - `context.Context` support (#608, #612, #627, #761) + - Transaction isolation level support (#619, #744) + - Read-Only transactions support (#618, #634) + - `NewConfig` function which initializes a config with default values (#679) + - Implemented the `ColumnType` interfaces (#667, #724) + - Support for custom string types in `ConvertValue` (#623) + - Implemented `NamedValueChecker`, improving support for uint64 with high bit set (#690, #709, #710) + - `caching_sha2_password` authentication plugin support (#794, #800, #801, #802) + - Implemented `driver.SessionResetter` (#779) + - `sha256_password` authentication plugin support (#808) + +Bugfixes: + + - Use the DSN hostname as TLS default ServerName if `tls=true` (#564, #718) + - Fixed LOAD LOCAL DATA INFILE for empty files (#590) + - Removed columns definition cache since it sometimes cached invalid data (#592) + - Don't mutate registered TLS configs (#600) + - Make RegisterTLSConfig concurrency-safe (#613) + - Handle missing auth data in the handshake packet correctly (#646) + - Do not retry queries when data was written to avoid data corruption (#302, #736) + - Cache the connection pointer for error handling before invalidating it (#678) + - Fixed imports for appengine/cloudsql (#700) + - Fix sending STMT_LONG_DATA for 0 byte data (#734) + - Set correct capacity for []bytes read from length-encoded strings (#766) + - Make RegisterDial concurrency-safe (#773) + + +## Version 1.3 (2016-12-01) + +Changes: + + - Go 1.1 is no longer supported + - Use decimals fields in MySQL to format time types (#249) + - Buffer optimizations (#269) + - TLS ServerName defaults to the host (#283) + - Refactoring (#400, #410, #437) + - Adjusted documentation for second generation CloudSQL (#485) + - Documented DSN system var quoting rules (#502) + - Made statement.Close() calls idempotent to avoid errors in Go 1.6+ (#512) + +New Features: + + - Enable microsecond resolution on TIME, DATETIME and TIMESTAMP (#249) + - Support for returning table alias on Columns() (#289, #359, #382) + - Placeholder interpolation, can be actived with the DSN parameter `interpolateParams=true` (#309, #318, #490) + - Support for uint64 parameters with high bit set (#332, #345) + - Cleartext authentication plugin support (#327) + - Exported ParseDSN function and the Config struct (#403, #419, #429) + - Read / Write timeouts (#401) + - Support for JSON field type (#414) + - Support for multi-statements and multi-results (#411, #431) + - DSN parameter to set the driver-side max_allowed_packet value manually (#489) + - Native password authentication plugin support (#494, #524) + +Bugfixes: + + - Fixed handling of queries without columns and rows (#255) + - Fixed a panic when SetKeepAlive() failed (#298) + - Handle ERR packets while reading rows (#321) + - Fixed reading NULL length-encoded integers in MySQL 5.6+ (#349) + - Fixed absolute paths support in LOAD LOCAL DATA INFILE (#356) + - Actually zero out bytes in handshake response (#378) + - Fixed race condition in registering LOAD DATA INFILE handler (#383) + - Fixed tests with MySQL 5.7.9+ (#380) + - QueryUnescape TLS config names (#397) + - Fixed "broken pipe" error by writing to closed socket (#390) + - Fixed LOAD LOCAL DATA INFILE buffering (#424) + - Fixed parsing of floats into float64 when placeholders are used (#434) + - Fixed DSN tests with Go 1.7+ (#459) + - Handle ERR packets while waiting for EOF (#473) + - Invalidate connection on error while discarding additional results (#513) + - Allow terminating packets of length 0 (#516) + + +## Version 1.2 (2014-06-03) + +Changes: + + - We switched back to a "rolling release". `go get` installs the current master branch again + - Version v1 of the driver will not be maintained anymore. Go 1.0 is no longer supported by this driver + - Exported errors to allow easy checking from application code + - Enabled TCP Keepalives on TCP connections + - Optimized INFILE handling (better buffer size calculation, lazy init, ...) + - The DSN parser also checks for a missing separating slash + - Faster binary date / datetime to string formatting + - Also exported the MySQLWarning type + - mysqlConn.Close returns the first error encountered instead of ignoring all errors + - writePacket() automatically writes the packet size to the header + - readPacket() uses an iterative approach instead of the recursive approach to merge splitted packets + +New Features: + + - `RegisterDial` allows the usage of a custom dial function to establish the network connection + - Setting the connection collation is possible with the `collation` DSN parameter. This parameter should be preferred over the `charset` parameter + - Logging of critical errors is configurable with `SetLogger` + - Google CloudSQL support + +Bugfixes: + + - Allow more than 32 parameters in prepared statements + - Various old_password fixes + - Fixed TestConcurrent test to pass Go's race detection + - Fixed appendLengthEncodedInteger for large numbers + - Renamed readLengthEnodedString to readLengthEncodedString and skipLengthEnodedString to skipLengthEncodedString (fixed typo) + + +## Version 1.1 (2013-11-02) + +Changes: + + - Go-MySQL-Driver now requires Go 1.1 + - Connections now use the collation `utf8_general_ci` by default. Adding `&charset=UTF8` to the DSN should not be necessary anymore + - Made closing rows and connections error tolerant. This allows for example deferring rows.Close() without checking for errors + - `[]byte(nil)` is now treated as a NULL value. Before, it was treated like an empty string / `[]byte("")` + - DSN parameter values must now be url.QueryEscape'ed. This allows text values to contain special characters, such as '&'. + - Use the IO buffer also for writing. This results in zero allocations (by the driver) for most queries + - Optimized the buffer for reading + - stmt.Query now caches column metadata + - New Logo + - Changed the copyright header to include all contributors + - Improved the LOAD INFILE documentation + - The driver struct is now exported to make the driver directly accessible + - Refactored the driver tests + - Added more benchmarks and moved all to a separate file + - Other small refactoring + +New Features: + + - Added *old_passwords* support: Required in some cases, but must be enabled by adding `allowOldPasswords=true` to the DSN since it is insecure + - Added a `clientFoundRows` parameter: Return the number of matching rows instead of the number of rows changed on UPDATEs + - Added TLS/SSL support: Use a TLS/SSL encrypted connection to the server. Custom TLS configs can be registered and used + +Bugfixes: + + - Fixed MySQL 4.1 support: MySQL 4.1 sends packets with lengths which differ from the specification + - Convert to DB timezone when inserting `time.Time` + - Splitted packets (more than 16MB) are now merged correctly + - Fixed false positive `io.EOF` errors when the data was fully read + - Avoid panics on reuse of closed connections + - Fixed empty string producing false nil values + - Fixed sign byte for positive TIME fields + + +## Version 1.0 (2013-05-14) + +Initial Release diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/LICENSE b/terraform-server/vendor/github.com/go-sql-driver/mysql/LICENSE new file mode 100644 index 00000000..14e2f777 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/LICENSE @@ -0,0 +1,373 @@ +Mozilla Public License Version 2.0 +================================== + +1. Definitions +-------------- + +1.1. "Contributor" + means each individual or legal entity that creates, contributes to + the creation of, or owns Covered Software. + +1.2. "Contributor Version" + means the combination of the Contributions of others (if any) used + by a Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + means Source Code Form to which the initial Contributor has attached + the notice in Exhibit A, the Executable Form of such Source Code + Form, and Modifications of such Source Code Form, in each case + including portions thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + (a) that the initial Contributor has attached the notice described + in Exhibit B to the Covered Software; or + + (b) that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the + terms of a Secondary License. + +1.6. "Executable Form" + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + means a work that combines Covered Software with other material, in + a separate file or files, that is not Covered Software. + +1.8. "License" + means this document. + +1.9. "Licensable" + means having the right to grant, to the maximum extent possible, + whether at the time of the initial grant or subsequently, any and + all of the rights conveyed by this License. + +1.10. "Modifications" + means any of the following: + + (a) any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered + Software; or + + (b) any new file in Source Code Form that contains any Covered + Software. + +1.11. "Patent Claims" of a Contributor + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the + License, by the making, using, selling, offering for sale, having + made, import, or transfer of either its Contributions or its + Contributor Version. + +1.12. "Secondary License" + means either the GNU General Public License, Version 2.0, the GNU + Lesser General Public License, Version 2.1, the GNU Affero General + Public License, Version 3.0, or any later versions of those + licenses. + +1.13. "Source Code Form" + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that + controls, is controlled by, or is under common control with You. For + purposes of this definition, "control" means (a) the power, direct + or indirect, to cause the direction or management of such entity, + whether by contract or otherwise, or (b) ownership of more than + fifty percent (50%) of the outstanding shares or beneficial + ownership of such entity. + +2. License Grants and Conditions +-------------------------------- + +2.1. Grants + +Each Contributor hereby grants You a world-wide, royalty-free, +non-exclusive license: + +(a) under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + +(b) under Patent Claims of such Contributor to make, use, sell, offer + for sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + +The licenses granted in Section 2.1 with respect to any Contribution +become effective for each Contribution on the date the Contributor first +distributes such Contribution. + +2.3. Limitations on Grant Scope + +The licenses granted in this Section 2 are the only rights granted under +this License. No additional rights or licenses will be implied from the +distribution or licensing of Covered Software under this License. +Notwithstanding Section 2.1(b) above, no patent license is granted by a +Contributor: + +(a) for any code that a Contributor has removed from Covered Software; + or + +(b) for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + +(c) under Patent Claims infringed by Covered Software in the absence of + its Contributions. + +This License does not grant any rights in the trademarks, service marks, +or logos of any Contributor (except as may be necessary to comply with +the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + +No Contributor makes additional grants as a result of Your choice to +distribute the Covered Software under a subsequent version of this +License (see Section 10.2) or under the terms of a Secondary License (if +permitted under the terms of Section 3.3). + +2.5. Representation + +Each Contributor represents that the Contributor believes its +Contributions are its original creation(s) or it has sufficient rights +to grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + +This License is not intended to limit any rights You have under +applicable copyright doctrines of fair use, fair dealing, or other +equivalents. + +2.7. Conditions + +Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted +in Section 2.1. + +3. Responsibilities +------------------- + +3.1. Distribution of Source Form + +All distribution of Covered Software in Source Code Form, including any +Modifications that You create or to which You contribute, must be under +the terms of this License. You must inform recipients that the Source +Code Form of the Covered Software is governed by the terms of this +License, and how they can obtain a copy of this License. You may not +attempt to alter or restrict the recipients' rights in the Source Code +Form. + +3.2. Distribution of Executable Form + +If You distribute Covered Software in Executable Form then: + +(a) such Covered Software must also be made available in Source Code + Form, as described in Section 3.1, and You must inform recipients of + the Executable Form how they can obtain a copy of such Source Code + Form by reasonable means in a timely manner, at a charge no more + than the cost of distribution to the recipient; and + +(b) You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter + the recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + +You may create and distribute a Larger Work under terms of Your choice, +provided that You also comply with the requirements of this License for +the Covered Software. If the Larger Work is a combination of Covered +Software with a work governed by one or more Secondary Licenses, and the +Covered Software is not Incompatible With Secondary Licenses, this +License permits You to additionally distribute such Covered Software +under the terms of such Secondary License(s), so that the recipient of +the Larger Work may, at their option, further distribute the Covered +Software under the terms of either this License or such Secondary +License(s). + +3.4. Notices + +You may not remove or alter the substance of any license notices +(including copyright notices, patent notices, disclaimers of warranty, +or limitations of liability) contained within the Source Code Form of +the Covered Software, except that You may alter any license notices to +the extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + +You may choose to offer, and to charge a fee for, warranty, support, +indemnity or liability obligations to one or more recipients of Covered +Software. However, You may do so only on Your own behalf, and not on +behalf of any Contributor. You must make it absolutely clear that any +such warranty, support, indemnity, or liability obligation is offered by +You alone, and You hereby agree to indemnify every Contributor for any +liability incurred by such Contributor as a result of warranty, support, +indemnity or liability terms You offer. You may include additional +disclaimers of warranty and limitations of liability specific to any +jurisdiction. + +4. Inability to Comply Due to Statute or Regulation +--------------------------------------------------- + +If it is impossible for You to comply with any of the terms of this +License with respect to some or all of the Covered Software due to +statute, judicial order, or regulation then You must: (a) comply with +the terms of this License to the maximum extent possible; and (b) +describe the limitations and the code they affect. Such description must +be placed in a text file included with all distributions of the Covered +Software under this License. Except to the extent prohibited by statute +or regulation, such description must be sufficiently detailed for a +recipient of ordinary skill to be able to understand it. + +5. Termination +-------------- + +5.1. The rights granted under this License will terminate automatically +if You fail to comply with any of its terms. However, if You become +compliant, then the rights granted under this License from a particular +Contributor are reinstated (a) provisionally, unless and until such +Contributor explicitly and finally terminates Your grants, and (b) on an +ongoing basis, if such Contributor fails to notify You of the +non-compliance by some reasonable means prior to 60 days after You have +come back into compliance. Moreover, Your grants from a particular +Contributor are reinstated on an ongoing basis if such Contributor +notifies You of the non-compliance by some reasonable means, this is the +first time You have received notice of non-compliance with this License +from such Contributor, and You become compliant prior to 30 days after +Your receipt of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent +infringement claim (excluding declaratory judgment actions, +counter-claims, and cross-claims) alleging that a Contributor Version +directly or indirectly infringes any patent, then the rights granted to +You by any and all Contributors for the Covered Software under Section +2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all +end user license agreements (excluding distributors and resellers) which +have been validly granted by You or Your distributors under this License +prior to termination shall survive termination. + +************************************************************************ +* * +* 6. Disclaimer of Warranty * +* ------------------------- * +* * +* Covered Software is provided under this License on an "as is" * +* basis, without warranty of any kind, either expressed, implied, or * +* statutory, including, without limitation, warranties that the * +* Covered Software is free of defects, merchantable, fit for a * +* particular purpose or non-infringing. The entire risk as to the * +* quality and performance of the Covered Software is with You. * +* Should any Covered Software prove defective in any respect, You * +* (not any Contributor) assume the cost of any necessary servicing, * +* repair, or correction. This disclaimer of warranty constitutes an * +* essential part of this License. No use of any Covered Software is * +* authorized under this License except under this disclaimer. * +* * +************************************************************************ + +************************************************************************ +* * +* 7. Limitation of Liability * +* -------------------------- * +* * +* Under no circumstances and under no legal theory, whether tort * +* (including negligence), contract, or otherwise, shall any * +* Contributor, or anyone who distributes Covered Software as * +* permitted above, be liable to You for any direct, indirect, * +* special, incidental, or consequential damages of any character * +* including, without limitation, damages for lost profits, loss of * +* goodwill, work stoppage, computer failure or malfunction, or any * +* and all other commercial damages or losses, even if such party * +* shall have been informed of the possibility of such damages. This * +* limitation of liability shall not apply to liability for death or * +* personal injury resulting from such party's negligence to the * +* extent applicable law prohibits such limitation. Some * +* jurisdictions do not allow the exclusion or limitation of * +* incidental or consequential damages, so this exclusion and * +* limitation may not apply to You. * +* * +************************************************************************ + +8. Litigation +------------- + +Any litigation relating to this License may be brought only in the +courts of a jurisdiction where the defendant maintains its principal +place of business and such litigation shall be governed by laws of that +jurisdiction, without reference to its conflict-of-law provisions. +Nothing in this Section shall prevent a party's ability to bring +cross-claims or counter-claims. + +9. Miscellaneous +---------------- + +This License represents the complete agreement concerning the subject +matter hereof. If any provision of this License is held to be +unenforceable, such provision shall be reformed only to the extent +necessary to make it enforceable. Any law or regulation which provides +that the language of a contract shall be construed against the drafter +shall not be used to construe this License against a Contributor. + +10. Versions of the License +--------------------------- + +10.1. New Versions + +Mozilla Foundation is the license steward. Except as provided in Section +10.3, no one other than the license steward has the right to modify or +publish new versions of this License. Each version will be given a +distinguishing version number. + +10.2. Effect of New Versions + +You may distribute the Covered Software under the terms of the version +of the License under which You originally received the Covered Software, +or under the terms of any subsequent version published by the license +steward. + +10.3. Modified Versions + +If you create software not governed by this License, and you want to +create a new license for such software, you may create and use a +modified version of this License if you rename the license and remove +any references to the name of the license steward (except to note that +such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary +Licenses + +If You choose to distribute Source Code Form that is Incompatible With +Secondary Licenses under the terms of this version of the License, the +notice described in Exhibit B of this License must be attached. + +Exhibit A - Source Code Form License Notice +------------------------------------------- + + This Source Code Form is subject to the terms of the Mozilla Public + License, v. 2.0. If a copy of the MPL was not distributed with this + file, You can obtain one at http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular +file, then You may include the notice in a location (such as a LICENSE +file in a relevant directory) where a recipient would be likely to look +for such a notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice +--------------------------------------------------------- + + This Source Code Form is "Incompatible With Secondary Licenses", as + defined by the Mozilla Public License, v. 2.0. diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/README.md b/terraform-server/vendor/github.com/go-sql-driver/mysql/README.md new file mode 100644 index 00000000..0b13154f --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/README.md @@ -0,0 +1,520 @@ +# Go-MySQL-Driver + +A MySQL-Driver for Go's [database/sql](https://golang.org/pkg/database/sql/) package + +![Go-MySQL-Driver logo](https://raw.github.com/wiki/go-sql-driver/mysql/gomysql_m.png "Golang Gopher holding the MySQL Dolphin") + +--------------------------------------- + * [Features](#features) + * [Requirements](#requirements) + * [Installation](#installation) + * [Usage](#usage) + * [DSN (Data Source Name)](#dsn-data-source-name) + * [Password](#password) + * [Protocol](#protocol) + * [Address](#address) + * [Parameters](#parameters) + * [Examples](#examples) + * [Connection pool and timeouts](#connection-pool-and-timeouts) + * [context.Context Support](#contextcontext-support) + * [ColumnType Support](#columntype-support) + * [LOAD DATA LOCAL INFILE support](#load-data-local-infile-support) + * [time.Time support](#timetime-support) + * [Unicode support](#unicode-support) + * [Testing / Development](#testing--development) + * [License](#license) + +--------------------------------------- + +## Features + * Lightweight and [fast](https://github.com/go-sql-driver/sql-benchmark "golang MySQL-Driver performance") + * Native Go implementation. No C-bindings, just pure Go + * Connections over TCP/IPv4, TCP/IPv6, Unix domain sockets or [custom protocols](https://godoc.org/github.com/go-sql-driver/mysql#DialFunc) + * Automatic handling of broken connections + * Automatic Connection Pooling *(by database/sql package)* + * Supports queries larger than 16MB + * Full [`sql.RawBytes`](https://golang.org/pkg/database/sql/#RawBytes) support. + * Intelligent `LONG DATA` handling in prepared statements + * Secure `LOAD DATA LOCAL INFILE` support with file allowlisting and `io.Reader` support + * Optional `time.Time` parsing + * Optional placeholder interpolation + +## Requirements + * Go 1.10 or higher. We aim to support the 3 latest versions of Go. + * MySQL (4.1+), MariaDB, Percona Server, Google CloudSQL or Sphinx (2.2.3+) + +--------------------------------------- + +## Installation +Simple install the package to your [$GOPATH](https://github.com/golang/go/wiki/GOPATH "GOPATH") with the [go tool](https://golang.org/cmd/go/ "go command") from shell: +```bash +$ go get -u github.com/go-sql-driver/mysql +``` +Make sure [Git is installed](https://git-scm.com/downloads) on your machine and in your system's `PATH`. + +## Usage +_Go MySQL Driver_ is an implementation of Go's `database/sql/driver` interface. You only need to import the driver and can use the full [`database/sql`](https://golang.org/pkg/database/sql/) API then. + +Use `mysql` as `driverName` and a valid [DSN](#dsn-data-source-name) as `dataSourceName`: + +```go +import ( + "database/sql" + "time" + + _ "github.com/go-sql-driver/mysql" +) + +// ... + +db, err := sql.Open("mysql", "user:password@/dbname") +if err != nil { + panic(err) +} +// See "Important settings" section. +db.SetConnMaxLifetime(time.Minute * 3) +db.SetMaxOpenConns(10) +db.SetMaxIdleConns(10) +``` + +[Examples are available in our Wiki](https://github.com/go-sql-driver/mysql/wiki/Examples "Go-MySQL-Driver Examples"). + +### Important settings + +`db.SetConnMaxLifetime()` is required to ensure connections are closed by the driver safely before connection is closed by MySQL server, OS, or other middlewares. Since some middlewares close idle connections by 5 minutes, we recommend timeout shorter than 5 minutes. This setting helps load balancing and changing system variables too. + +`db.SetMaxOpenConns()` is highly recommended to limit the number of connection used by the application. There is no recommended limit number because it depends on application and MySQL server. + +`db.SetMaxIdleConns()` is recommended to be set same to (or greater than) `db.SetMaxOpenConns()`. When it is smaller than `SetMaxOpenConns()`, connections can be opened and closed very frequently than you expect. Idle connections can be closed by the `db.SetConnMaxLifetime()`. If you want to close idle connections more rapidly, you can use `db.SetConnMaxIdleTime()` since Go 1.15. + + +### DSN (Data Source Name) + +The Data Source Name has a common format, like e.g. [PEAR DB](http://pear.php.net/manual/en/package.database.db.intro-dsn.php) uses it, but without type-prefix (optional parts marked by squared brackets): +``` +[username[:password]@][protocol[(address)]]/dbname[?param1=value1&...¶mN=valueN] +``` + +A DSN in its fullest form: +``` +username:password@protocol(address)/dbname?param=value +``` + +Except for the databasename, all values are optional. So the minimal DSN is: +``` +/dbname +``` + +If you do not want to preselect a database, leave `dbname` empty: +``` +/ +``` +This has the same effect as an empty DSN string: +``` + +``` + +Alternatively, [Config.FormatDSN](https://godoc.org/github.com/go-sql-driver/mysql#Config.FormatDSN) can be used to create a DSN string by filling a struct. + +#### Password +Passwords can consist of any character. Escaping is **not** necessary. + +#### Protocol +See [net.Dial](https://golang.org/pkg/net/#Dial) for more information which networks are available. +In general you should use an Unix domain socket if available and TCP otherwise for best performance. + +#### Address +For TCP and UDP networks, addresses have the form `host[:port]`. +If `port` is omitted, the default port will be used. +If `host` is a literal IPv6 address, it must be enclosed in square brackets. +The functions [net.JoinHostPort](https://golang.org/pkg/net/#JoinHostPort) and [net.SplitHostPort](https://golang.org/pkg/net/#SplitHostPort) manipulate addresses in this form. + +For Unix domain sockets the address is the absolute path to the MySQL-Server-socket, e.g. `/var/run/mysqld/mysqld.sock` or `/tmp/mysql.sock`. + +#### Parameters +*Parameters are case-sensitive!* + +Notice that any of `true`, `TRUE`, `True` or `1` is accepted to stand for a true boolean value. Not surprisingly, false can be specified as any of: `false`, `FALSE`, `False` or `0`. + +##### `allowAllFiles` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowAllFiles=true` disables the file allowlist for `LOAD DATA LOCAL INFILE` and allows *all* files. +[*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html) + +##### `allowCleartextPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`allowCleartextPasswords=true` allows using the [cleartext client side plugin](https://dev.mysql.com/doc/en/cleartext-pluggable-authentication.html) if required by an account, such as one defined with the [PAM authentication plugin](http://dev.mysql.com/doc/en/pam-authentication-plugin.html). Sending passwords in clear text may be a security problem in some configurations. To avoid problems if there is any possibility that the password would be intercepted, clients should connect to MySQL Server using a method that protects the password. Possibilities include [TLS / SSL](#tls), IPsec, or a private network. + +##### `allowNativePasswords` + +``` +Type: bool +Valid Values: true, false +Default: true +``` +`allowNativePasswords=false` disallows the usage of MySQL native password method. + +##### `allowOldPasswords` + +``` +Type: bool +Valid Values: true, false +Default: false +``` +`allowOldPasswords=true` allows the usage of the insecure old password method. This should be avoided, but is necessary in some cases. See also [the old_passwords wiki page](https://github.com/go-sql-driver/mysql/wiki/old_passwords). + +##### `charset` + +``` +Type: string +Valid Values: +Default: none +``` + +Sets the charset used for client-server interaction (`"SET NAMES "`). If multiple charsets are set (separated by a comma), the following charset is used if setting the charset failes. This enables for example support for `utf8mb4` ([introduced in MySQL 5.5.3](http://dev.mysql.com/doc/refman/5.5/en/charset-unicode-utf8mb4.html)) with fallback to `utf8` for older servers (`charset=utf8mb4,utf8`). + +Usage of the `charset` parameter is discouraged because it issues additional queries to the server. +Unless you need the fallback behavior, please use `collation` instead. + +##### `checkConnLiveness` + +``` +Type: bool +Valid Values: true, false +Default: true +``` + +On supported platforms connections retrieved from the connection pool are checked for liveness before using them. If the check fails, the respective connection is marked as bad and the query retried with another connection. +`checkConnLiveness=false` disables this liveness check of connections. + +##### `collation` + +``` +Type: string +Valid Values: +Default: utf8mb4_general_ci +``` + +Sets the collation used for client-server interaction on connection. In contrast to `charset`, `collation` does not issue additional queries. If the specified collation is unavailable on the target server, the connection will fail. + +A list of valid charsets for a server is retrievable with `SHOW COLLATION`. + +The default collation (`utf8mb4_general_ci`) is supported from MySQL 5.5. You should use an older collation (e.g. `utf8_general_ci`) for older MySQL. + +Collations for charset "ucs2", "utf16", "utf16le", and "utf32" can not be used ([ref](https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset)). + + +##### `clientFoundRows` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`clientFoundRows=true` causes an UPDATE to return the number of matching rows instead of the number of rows changed. + +##### `columnsWithAlias` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +When `columnsWithAlias` is true, calls to `sql.Rows.Columns()` will return the table alias and the column name separated by a dot. For example: + +``` +SELECT u.id FROM users as u +``` + +will return `u.id` instead of just `id` if `columnsWithAlias=true`. + +##### `interpolateParams` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +If `interpolateParams` is true, placeholders (`?`) in calls to `db.Query()` and `db.Exec()` are interpolated into a single query string with given parameters. This reduces the number of roundtrips, since the driver has to prepare a statement, execute it with given parameters and close the statement again with `interpolateParams=false`. + +*This can not be used together with the multibyte encodings BIG5, CP932, GB2312, GBK or SJIS. These are rejected as they may [introduce a SQL injection vulnerability](http://stackoverflow.com/a/12118602/3430118)!* + +##### `loc` + +``` +Type: string +Valid Values: +Default: UTC +``` + +Sets the location for time.Time values (when using `parseTime=true`). *"Local"* sets the system's location. See [time.LoadLocation](https://golang.org/pkg/time/#LoadLocation) for details. + +Note that this sets the location for time.Time values but does not change MySQL's [time_zone setting](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html). For that see the [time_zone system variable](#system-variables), which can also be set as a DSN parameter. + +Please keep in mind, that param values must be [url.QueryEscape](https://golang.org/pkg/net/url/#QueryEscape)'ed. Alternatively you can manually replace the `/` with `%2F`. For example `US/Pacific` would be `loc=US%2FPacific`. + +##### `maxAllowedPacket` +``` +Type: decimal number +Default: 4194304 +``` + +Max packet size allowed in bytes. The default value is 4 MiB and should be adjusted to match the server settings. `maxAllowedPacket=0` can be used to automatically fetch the `max_allowed_packet` variable from server *on every connection*. + +##### `multiStatements` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +Allow multiple statements in one query. While this allows batch queries, it also greatly increases the risk of SQL injections. Only the result of the first query is returned, all other results are silently discarded. + +When `multiStatements` is used, `?` parameters must only be used in the first statement. + +##### `parseTime` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + +`parseTime=true` changes the output type of `DATE` and `DATETIME` values to `time.Time` instead of `[]byte` / `string` +The date or datetime like `0000-00-00 00:00:00` is converted into zero value of `time.Time`. + + +##### `readTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O read timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + +##### `rejectReadOnly` + +``` +Type: bool +Valid Values: true, false +Default: false +``` + + +`rejectReadOnly=true` causes the driver to reject read-only connections. This +is for a possible race condition during an automatic failover, where the mysql +client gets connected to a read-only replica after the failover. + +Note that this should be a fairly rare case, as an automatic failover normally +happens when the primary is down, and the race condition shouldn't happen +unless it comes back up online as soon as the failover is kicked off. On the +other hand, when this happens, a MySQL application can get stuck on a +read-only connection until restarted. It is however fairly easy to reproduce, +for example, using a manual failover on AWS Aurora's MySQL-compatible cluster. + +If you are not relying on read-only transactions to reject writes that aren't +supposed to happen, setting this on some MySQL providers (such as AWS Aurora) +is safer for failovers. + +Note that ERROR 1290 can be returned for a `read-only` server and this option will +cause a retry for that error. However the same error number is used for some +other cases. You should ensure your application will never cause an ERROR 1290 +except for `read-only` mode when enabling this option. + + +##### `serverPubKey` + +``` +Type: string +Valid Values: +Default: none +``` + +Server public keys can be registered with [`mysql.RegisterServerPubKey`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterServerPubKey), which can then be used by the assigned name in the DSN. +Public keys are used to transmit encrypted data, e.g. for authentication. +If the server's public key is known, it should be set manually to avoid expensive and potentially insecure transmissions of the public key from the server to the client each time it is required. + + +##### `timeout` + +``` +Type: duration +Default: OS default +``` + +Timeout for establishing connections, aka dial timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### `tls` + +``` +Type: bool / string +Valid Values: true, false, skip-verify, preferred, +Default: false +``` + +`tls=true` enables TLS / SSL encrypted connection to the server. Use `skip-verify` if you want to use a self-signed or invalid certificate (server side) or use `preferred` to use TLS only when advertised by the server. This is similar to `skip-verify`, but additionally allows a fallback to a connection which is not encrypted. Neither `skip-verify` nor `preferred` add any reliable security. You can use a custom TLS config after registering it with [`mysql.RegisterTLSConfig`](https://godoc.org/github.com/go-sql-driver/mysql#RegisterTLSConfig). + + +##### `writeTimeout` + +``` +Type: duration +Default: 0 +``` + +I/O write timeout. The value must be a decimal number with a unit suffix (*"ms"*, *"s"*, *"m"*, *"h"*), such as *"30s"*, *"0.5m"* or *"1m30s"*. + + +##### System Variables + +Any other parameters are interpreted as system variables: + * `=`: `SET =` + * `=`: `SET =` + * `=%27%27`: `SET =''` + +Rules: +* The values for string variables must be quoted with `'`. +* The values must also be [url.QueryEscape](http://golang.org/pkg/net/url/#QueryEscape)'ed! + (which implies values of string variables must be wrapped with `%27`). + +Examples: + * `autocommit=1`: `SET autocommit=1` + * [`time_zone=%27Europe%2FParis%27`](https://dev.mysql.com/doc/refman/5.5/en/time-zone-support.html): `SET time_zone='Europe/Paris'` + * [`transaction_isolation=%27REPEATABLE-READ%27`](https://dev.mysql.com/doc/refman/5.7/en/server-system-variables.html#sysvar_transaction_isolation): `SET transaction_isolation='REPEATABLE-READ'` + + +#### Examples +``` +user@unix(/path/to/socket)/dbname +``` + +``` +root:pw@unix(/tmp/mysql.sock)/myDatabase?loc=Local +``` + +``` +user:password@tcp(localhost:5555)/dbname?tls=skip-verify&autocommit=true +``` + +Treat warnings as errors by setting the system variable [`sql_mode`](https://dev.mysql.com/doc/refman/5.7/en/sql-mode.html): +``` +user:password@/dbname?sql_mode=TRADITIONAL +``` + +TCP via IPv6: +``` +user:password@tcp([de:ad:be:ef::ca:fe]:80)/dbname?timeout=90s&collation=utf8mb4_unicode_ci +``` + +TCP on a remote host, e.g. Amazon RDS: +``` +id:password@tcp(your-amazonaws-uri.com:3306)/dbname +``` + +Google Cloud SQL on App Engine: +``` +user:password@unix(/cloudsql/project-id:region-name:instance-name)/dbname +``` + +TCP using default port (3306) on localhost: +``` +user:password@tcp/dbname?charset=utf8mb4,utf8&sys_var=esc%40ped +``` + +Use the default protocol (tcp) and host (localhost:3306): +``` +user:password@/dbname +``` + +No Database preselected: +``` +user:password@/ +``` + + +### Connection pool and timeouts +The connection pool is managed by Go's database/sql package. For details on how to configure the size of the pool and how long connections stay in the pool see `*DB.SetMaxOpenConns`, `*DB.SetMaxIdleConns`, and `*DB.SetConnMaxLifetime` in the [database/sql documentation](https://golang.org/pkg/database/sql/). The read, write, and dial timeouts for each individual connection are configured with the DSN parameters [`readTimeout`](#readtimeout), [`writeTimeout`](#writetimeout), and [`timeout`](#timeout), respectively. + +## `ColumnType` Support +This driver supports the [`ColumnType` interface](https://golang.org/pkg/database/sql/#ColumnType) introduced in Go 1.8, with the exception of [`ColumnType.Length()`](https://golang.org/pkg/database/sql/#ColumnType.Length), which is currently not supported. + +## `context.Context` Support +Go 1.8 added `database/sql` support for `context.Context`. This driver supports query timeouts and cancellation via contexts. +See [context support in the database/sql package](https://golang.org/doc/go1.8#database_sql) for more details. + + +### `LOAD DATA LOCAL INFILE` support +For this feature you need direct access to the package. Therefore you must change the import path (no `_`): +```go +import "github.com/go-sql-driver/mysql" +``` + +Files must be explicitly allowed by registering them with `mysql.RegisterLocalFile(filepath)` (recommended) or the allowlist check must be deactivated by using the DSN parameter `allowAllFiles=true` ([*Might be insecure!*](http://dev.mysql.com/doc/refman/5.7/en/load-data-local.html)). + +To use a `io.Reader` a handler function must be registered with `mysql.RegisterReaderHandler(name, handler)` which returns a `io.Reader` or `io.ReadCloser`. The Reader is available with the filepath `Reader::` then. Choose different names for different handlers and `DeregisterReaderHandler` when you don't need it anymore. + +See the [godoc of Go-MySQL-Driver](https://godoc.org/github.com/go-sql-driver/mysql "golang mysql driver documentation") for details. + + +### `time.Time` support +The default internal output type of MySQL `DATE` and `DATETIME` values is `[]byte` which allows you to scan the value into a `[]byte`, `string` or `sql.RawBytes` variable in your program. + +However, many want to scan MySQL `DATE` and `DATETIME` values into `time.Time` variables, which is the logical equivalent in Go to `DATE` and `DATETIME` in MySQL. You can do that by changing the internal output type from `[]byte` to `time.Time` with the DSN parameter `parseTime=true`. You can set the default [`time.Time` location](https://golang.org/pkg/time/#Location) with the `loc` DSN parameter. + +**Caution:** As of Go 1.1, this makes `time.Time` the only variable type you can scan `DATE` and `DATETIME` values into. This breaks for example [`sql.RawBytes` support](https://github.com/go-sql-driver/mysql/wiki/Examples#rawbytes). + + +### Unicode support +Since version 1.5 Go-MySQL-Driver automatically uses the collation ` utf8mb4_general_ci` by default. + +Other collations / charsets can be set using the [`collation`](#collation) DSN parameter. + +Version 1.0 of the driver recommended adding `&charset=utf8` (alias for `SET NAMES utf8`) to the DSN to enable proper UTF-8 support. This is not necessary anymore. The [`collation`](#collation) parameter should be preferred to set another collation / charset than the default. + +See http://dev.mysql.com/doc/refman/8.0/en/charset-unicode.html for more details on MySQL's Unicode support. + +## Testing / Development +To run the driver tests you may need to adjust the configuration. See the [Testing Wiki-Page](https://github.com/go-sql-driver/mysql/wiki/Testing "Testing") for details. + +Go-MySQL-Driver is not feature-complete yet. Your help is very appreciated. +If you want to contribute, you can work on an [open issue](https://github.com/go-sql-driver/mysql/issues?state=open) or review a [pull request](https://github.com/go-sql-driver/mysql/pulls). + +See the [Contribution Guidelines](https://github.com/go-sql-driver/mysql/blob/master/.github/CONTRIBUTING.md) for details. + +--------------------------------------- + +## License +Go-MySQL-Driver is licensed under the [Mozilla Public License Version 2.0](https://raw.github.com/go-sql-driver/mysql/master/LICENSE) + +Mozilla summarizes the license scope as follows: +> MPL: The copyleft applies to any files containing MPLed code. + + +That means: + * You can **use** the **unchanged** source code both in private and commercially. + * When distributing, you **must publish** the source code of any **changed files** licensed under the MPL 2.0 under a) the MPL 2.0 itself or b) a compatible license (e.g. GPL 3.0 or Apache License 2.0). + * You **needn't publish** the source code of your library as long as the files licensed under the MPL 2.0 are **unchanged**. + +Please read the [MPL 2.0 FAQ](https://www.mozilla.org/en-US/MPL/2.0/FAQ/) if you have further questions regarding the license. + +You can read the full terms here: [LICENSE](https://raw.github.com/go-sql-driver/mysql/master/LICENSE). + +![Go Gopher and MySQL Dolphin](https://raw.github.com/wiki/go-sql-driver/mysql/go-mysql-driver_m.jpg "Golang Gopher transporting the MySQL Dolphin in a wheelbarrow") diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/auth.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/auth.go new file mode 100644 index 00000000..b2f19e8f --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/auth.go @@ -0,0 +1,425 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/rand" + "crypto/rsa" + "crypto/sha1" + "crypto/sha256" + "crypto/x509" + "encoding/pem" + "fmt" + "sync" +) + +// server pub keys registry +var ( + serverPubKeyLock sync.RWMutex + serverPubKeyRegistry map[string]*rsa.PublicKey +) + +// RegisterServerPubKey registers a server RSA public key which can be used to +// send data in a secure manner to the server without receiving the public key +// in a potentially insecure way from the server first. +// Registered keys can afterwards be used adding serverPubKey= to the DSN. +// +// Note: The provided rsa.PublicKey instance is exclusively owned by the driver +// after registering it and may not be modified. +// +// data, err := ioutil.ReadFile("mykey.pem") +// if err != nil { +// log.Fatal(err) +// } +// +// block, _ := pem.Decode(data) +// if block == nil || block.Type != "PUBLIC KEY" { +// log.Fatal("failed to decode PEM block containing public key") +// } +// +// pub, err := x509.ParsePKIXPublicKey(block.Bytes) +// if err != nil { +// log.Fatal(err) +// } +// +// if rsaPubKey, ok := pub.(*rsa.PublicKey); ok { +// mysql.RegisterServerPubKey("mykey", rsaPubKey) +// } else { +// log.Fatal("not a RSA public key") +// } +// +func RegisterServerPubKey(name string, pubKey *rsa.PublicKey) { + serverPubKeyLock.Lock() + if serverPubKeyRegistry == nil { + serverPubKeyRegistry = make(map[string]*rsa.PublicKey) + } + + serverPubKeyRegistry[name] = pubKey + serverPubKeyLock.Unlock() +} + +// DeregisterServerPubKey removes the public key registered with the given name. +func DeregisterServerPubKey(name string) { + serverPubKeyLock.Lock() + if serverPubKeyRegistry != nil { + delete(serverPubKeyRegistry, name) + } + serverPubKeyLock.Unlock() +} + +func getServerPubKey(name string) (pubKey *rsa.PublicKey) { + serverPubKeyLock.RLock() + if v, ok := serverPubKeyRegistry[name]; ok { + pubKey = v + } + serverPubKeyLock.RUnlock() + return +} + +// Hash password using pre 4.1 (old password) method +// https://github.com/atcurtis/mariadb/blob/master/mysys/my_rnd.c +type myRnd struct { + seed1, seed2 uint32 +} + +const myRndMaxVal = 0x3FFFFFFF + +// Pseudo random number generator +func newMyRnd(seed1, seed2 uint32) *myRnd { + return &myRnd{ + seed1: seed1 % myRndMaxVal, + seed2: seed2 % myRndMaxVal, + } +} + +// Tested to be equivalent to MariaDB's floating point variant +// http://play.golang.org/p/QHvhd4qved +// http://play.golang.org/p/RG0q4ElWDx +func (r *myRnd) NextByte() byte { + r.seed1 = (r.seed1*3 + r.seed2) % myRndMaxVal + r.seed2 = (r.seed1 + r.seed2 + 33) % myRndMaxVal + + return byte(uint64(r.seed1) * 31 / myRndMaxVal) +} + +// Generate binary hash from byte string using insecure pre 4.1 method +func pwHash(password []byte) (result [2]uint32) { + var add uint32 = 7 + var tmp uint32 + + result[0] = 1345345333 + result[1] = 0x12345671 + + for _, c := range password { + // skip spaces and tabs in password + if c == ' ' || c == '\t' { + continue + } + + tmp = uint32(c) + result[0] ^= (((result[0] & 63) + add) * tmp) + (result[0] << 8) + result[1] += (result[1] << 8) ^ result[0] + add += tmp + } + + // Remove sign bit (1<<31)-1) + result[0] &= 0x7FFFFFFF + result[1] &= 0x7FFFFFFF + + return +} + +// Hash password using insecure pre 4.1 method +func scrambleOldPassword(scramble []byte, password string) []byte { + scramble = scramble[:8] + + hashPw := pwHash([]byte(password)) + hashSc := pwHash(scramble) + + r := newMyRnd(hashPw[0]^hashSc[0], hashPw[1]^hashSc[1]) + + var out [8]byte + for i := range out { + out[i] = r.NextByte() + 64 + } + + mask := r.NextByte() + for i := range out { + out[i] ^= mask + } + + return out[:] +} + +// Hash password using 4.1+ method (SHA1) +func scramblePassword(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + // stage1Hash = SHA1(password) + crypt := sha1.New() + crypt.Write([]byte(password)) + stage1 := crypt.Sum(nil) + + // scrambleHash = SHA1(scramble + SHA1(stage1Hash)) + // inner Hash + crypt.Reset() + crypt.Write(stage1) + hash := crypt.Sum(nil) + + // outer Hash + crypt.Reset() + crypt.Write(scramble) + crypt.Write(hash) + scramble = crypt.Sum(nil) + + // token = scrambleHash XOR stage1Hash + for i := range scramble { + scramble[i] ^= stage1[i] + } + return scramble +} + +// Hash password using MySQL 8+ method (SHA256) +func scrambleSHA256Password(scramble []byte, password string) []byte { + if len(password) == 0 { + return nil + } + + // XOR(SHA256(password), SHA256(SHA256(SHA256(password)), scramble)) + + crypt := sha256.New() + crypt.Write([]byte(password)) + message1 := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(message1) + message1Hash := crypt.Sum(nil) + + crypt.Reset() + crypt.Write(message1Hash) + crypt.Write(scramble) + message2 := crypt.Sum(nil) + + for i := range message1 { + message1[i] ^= message2[i] + } + + return message1 +} + +func encryptPassword(password string, seed []byte, pub *rsa.PublicKey) ([]byte, error) { + plain := make([]byte, len(password)+1) + copy(plain, password) + for i := range plain { + j := i % len(seed) + plain[i] ^= seed[j] + } + sha1 := sha1.New() + return rsa.EncryptOAEP(sha1, rand.Reader, pub, plain, nil) +} + +func (mc *mysqlConn) sendEncryptedPassword(seed []byte, pub *rsa.PublicKey) error { + enc, err := encryptPassword(mc.cfg.Passwd, seed, pub) + if err != nil { + return err + } + return mc.writeAuthSwitchPacket(enc) +} + +func (mc *mysqlConn) auth(authData []byte, plugin string) ([]byte, error) { + switch plugin { + case "caching_sha2_password": + authResp := scrambleSHA256Password(authData, mc.cfg.Passwd) + return authResp, nil + + case "mysql_old_password": + if !mc.cfg.AllowOldPasswords { + return nil, ErrOldPassword + } + if len(mc.cfg.Passwd) == 0 { + return nil, nil + } + // Note: there are edge cases where this should work but doesn't; + // this is currently "wontfix": + // https://github.com/go-sql-driver/mysql/issues/184 + authResp := append(scrambleOldPassword(authData[:8], mc.cfg.Passwd), 0) + return authResp, nil + + case "mysql_clear_password": + if !mc.cfg.AllowCleartextPasswords { + return nil, ErrCleartextPassword + } + // http://dev.mysql.com/doc/refman/5.7/en/cleartext-authentication-plugin.html + // http://dev.mysql.com/doc/refman/5.7/en/pam-authentication-plugin.html + return append([]byte(mc.cfg.Passwd), 0), nil + + case "mysql_native_password": + if !mc.cfg.AllowNativePasswords { + return nil, ErrNativePassword + } + // https://dev.mysql.com/doc/internals/en/secure-password-authentication.html + // Native password authentication only need and will need 20-byte challenge. + authResp := scramblePassword(authData[:20], mc.cfg.Passwd) + return authResp, nil + + case "sha256_password": + if len(mc.cfg.Passwd) == 0 { + return []byte{0}, nil + } + if mc.cfg.tls != nil || mc.cfg.Net == "unix" { + // write cleartext auth packet + return append([]byte(mc.cfg.Passwd), 0), nil + } + + pubKey := mc.cfg.pubKey + if pubKey == nil { + // request public key from server + return []byte{1}, nil + } + + // encrypted password + enc, err := encryptPassword(mc.cfg.Passwd, authData, pubKey) + return enc, err + + default: + errLog.Print("unknown auth plugin:", plugin) + return nil, ErrUnknownPlugin + } +} + +func (mc *mysqlConn) handleAuthResult(oldAuthData []byte, plugin string) error { + // Read Result Packet + authData, newPlugin, err := mc.readAuthResult() + if err != nil { + return err + } + + // handle auth plugin switch, if requested + if newPlugin != "" { + // If CLIENT_PLUGIN_AUTH capability is not supported, no new cipher is + // sent and we have to keep using the cipher sent in the init packet. + if authData == nil { + authData = oldAuthData + } else { + // copy data from read buffer to owned slice + copy(oldAuthData, authData) + } + + plugin = newPlugin + + authResp, err := mc.auth(authData, plugin) + if err != nil { + return err + } + if err = mc.writeAuthSwitchPacket(authResp); err != nil { + return err + } + + // Read Result Packet + authData, newPlugin, err = mc.readAuthResult() + if err != nil { + return err + } + + // Do not allow to change the auth plugin more than once + if newPlugin != "" { + return ErrMalformPkt + } + } + + switch plugin { + + // https://insidemysql.com/preparing-your-community-connector-for-mysql-8-part-2-sha256/ + case "caching_sha2_password": + switch len(authData) { + case 0: + return nil // auth successful + case 1: + switch authData[0] { + case cachingSha2PasswordFastAuthSuccess: + if err = mc.readResultOK(); err == nil { + return nil // auth successful + } + + case cachingSha2PasswordPerformFullAuthentication: + if mc.cfg.tls != nil || mc.cfg.Net == "unix" { + // write cleartext auth packet + err = mc.writeAuthSwitchPacket(append([]byte(mc.cfg.Passwd), 0)) + if err != nil { + return err + } + } else { + pubKey := mc.cfg.pubKey + if pubKey == nil { + // request public key from server + data, err := mc.buf.takeSmallBuffer(4 + 1) + if err != nil { + return err + } + data[4] = cachingSha2PasswordRequestPublicKey + mc.writePacket(data) + + // parse public key + if data, err = mc.readPacket(); err != nil { + return err + } + + block, rest := pem.Decode(data[1:]) + if block == nil { + return fmt.Errorf("No Pem data found, data: %s", rest) + } + pkix, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + pubKey = pkix.(*rsa.PublicKey) + } + + // send encrypted password + err = mc.sendEncryptedPassword(oldAuthData, pubKey) + if err != nil { + return err + } + } + return mc.readResultOK() + + default: + return ErrMalformPkt + } + default: + return ErrMalformPkt + } + + case "sha256_password": + switch len(authData) { + case 0: + return nil // auth successful + default: + block, _ := pem.Decode(authData) + pub, err := x509.ParsePKIXPublicKey(block.Bytes) + if err != nil { + return err + } + + // send encrypted password + err = mc.sendEncryptedPassword(oldAuthData, pub.(*rsa.PublicKey)) + if err != nil { + return err + } + return mc.readResultOK() + } + + default: + return nil // auth successful + } + + return err +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/buffer.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/buffer.go new file mode 100644 index 00000000..0774c5c8 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/buffer.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "io" + "net" + "time" +) + +const defaultBufSize = 4096 +const maxCachedBufSize = 256 * 1024 + +// A buffer which is used for both reading and writing. +// This is possible since communication on each connection is synchronous. +// In other words, we can't write and read simultaneously on the same connection. +// The buffer is similar to bufio.Reader / Writer but zero-copy-ish +// Also highly optimized for this particular use case. +// This buffer is backed by two byte slices in a double-buffering scheme +type buffer struct { + buf []byte // buf is a byte buffer who's length and capacity are equal. + nc net.Conn + idx int + length int + timeout time.Duration + dbuf [2][]byte // dbuf is an array with the two byte slices that back this buffer + flipcnt uint // flipccnt is the current buffer counter for double-buffering +} + +// newBuffer allocates and returns a new buffer. +func newBuffer(nc net.Conn) buffer { + fg := make([]byte, defaultBufSize) + return buffer{ + buf: fg, + nc: nc, + dbuf: [2][]byte{fg, nil}, + } +} + +// flip replaces the active buffer with the background buffer +// this is a delayed flip that simply increases the buffer counter; +// the actual flip will be performed the next time we call `buffer.fill` +func (b *buffer) flip() { + b.flipcnt += 1 +} + +// fill reads into the buffer until at least _need_ bytes are in it +func (b *buffer) fill(need int) error { + n := b.length + // fill data into its double-buffering target: if we've called + // flip on this buffer, we'll be copying to the background buffer, + // and then filling it with network data; otherwise we'll just move + // the contents of the current buffer to the front before filling it + dest := b.dbuf[b.flipcnt&1] + + // grow buffer if necessary to fit the whole packet. + if need > len(dest) { + // Round up to the next multiple of the default size + dest = make([]byte, ((need/defaultBufSize)+1)*defaultBufSize) + + // if the allocated buffer is not too large, move it to backing storage + // to prevent extra allocations on applications that perform large reads + if len(dest) <= maxCachedBufSize { + b.dbuf[b.flipcnt&1] = dest + } + } + + // if we're filling the fg buffer, move the existing data to the start of it. + // if we're filling the bg buffer, copy over the data + if n > 0 { + copy(dest[:n], b.buf[b.idx:]) + } + + b.buf = dest + b.idx = 0 + + for { + if b.timeout > 0 { + if err := b.nc.SetReadDeadline(time.Now().Add(b.timeout)); err != nil { + return err + } + } + + nn, err := b.nc.Read(b.buf[n:]) + n += nn + + switch err { + case nil: + if n < need { + continue + } + b.length = n + return nil + + case io.EOF: + if n >= need { + b.length = n + return nil + } + return io.ErrUnexpectedEOF + + default: + return err + } + } +} + +// returns next N bytes from buffer. +// The returned slice is only guaranteed to be valid until the next read +func (b *buffer) readNext(need int) ([]byte, error) { + if b.length < need { + // refill + if err := b.fill(need); err != nil { + return nil, err + } + } + + offset := b.idx + b.idx += need + b.length -= need + return b.buf[offset:b.idx], nil +} + +// takeBuffer returns a buffer with the requested size. +// If possible, a slice from the existing buffer is returned. +// Otherwise a bigger buffer is made. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeBuffer(length int) ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + + // test (cheap) general case first + if length <= cap(b.buf) { + return b.buf[:length], nil + } + + if length < maxPacketSize { + b.buf = make([]byte, length) + return b.buf, nil + } + + // buffer is larger than we want to store. + return make([]byte, length), nil +} + +// takeSmallBuffer is shortcut which can be used if length is +// known to be smaller than defaultBufSize. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeSmallBuffer(length int) ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + return b.buf[:length], nil +} + +// takeCompleteBuffer returns the complete existing buffer. +// This can be used if the necessary buffer size is unknown. +// cap and len of the returned buffer will be equal. +// Only one buffer (total) can be used at a time. +func (b *buffer) takeCompleteBuffer() ([]byte, error) { + if b.length > 0 { + return nil, ErrBusyBuffer + } + return b.buf, nil +} + +// store stores buf, an updated buffer, if its suitable to do so. +func (b *buffer) store(buf []byte) error { + if b.length > 0 { + return ErrBusyBuffer + } else if cap(buf) <= maxPacketSize && cap(buf) > cap(b.buf) { + b.buf = buf[:cap(buf)] + } + return nil +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/collations.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/collations.go new file mode 100644 index 00000000..326a9f7f --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/collations.go @@ -0,0 +1,265 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2014 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const defaultCollation = "utf8mb4_general_ci" +const binaryCollation = "binary" + +// A list of available collations mapped to the internal ID. +// To update this map use the following MySQL query: +// SELECT COLLATION_NAME, ID FROM information_schema.COLLATIONS WHERE ID<256 ORDER BY ID +// +// Handshake packet have only 1 byte for collation_id. So we can't use collations with ID > 255. +// +// ucs2, utf16, and utf32 can't be used for connection charset. +// https://dev.mysql.com/doc/refman/5.7/en/charset-connection.html#charset-connection-impermissible-client-charset +// They are commented out to reduce this map. +var collations = map[string]byte{ + "big5_chinese_ci": 1, + "latin2_czech_cs": 2, + "dec8_swedish_ci": 3, + "cp850_general_ci": 4, + "latin1_german1_ci": 5, + "hp8_english_ci": 6, + "koi8r_general_ci": 7, + "latin1_swedish_ci": 8, + "latin2_general_ci": 9, + "swe7_swedish_ci": 10, + "ascii_general_ci": 11, + "ujis_japanese_ci": 12, + "sjis_japanese_ci": 13, + "cp1251_bulgarian_ci": 14, + "latin1_danish_ci": 15, + "hebrew_general_ci": 16, + "tis620_thai_ci": 18, + "euckr_korean_ci": 19, + "latin7_estonian_cs": 20, + "latin2_hungarian_ci": 21, + "koi8u_general_ci": 22, + "cp1251_ukrainian_ci": 23, + "gb2312_chinese_ci": 24, + "greek_general_ci": 25, + "cp1250_general_ci": 26, + "latin2_croatian_ci": 27, + "gbk_chinese_ci": 28, + "cp1257_lithuanian_ci": 29, + "latin5_turkish_ci": 30, + "latin1_german2_ci": 31, + "armscii8_general_ci": 32, + "utf8_general_ci": 33, + "cp1250_czech_cs": 34, + //"ucs2_general_ci": 35, + "cp866_general_ci": 36, + "keybcs2_general_ci": 37, + "macce_general_ci": 38, + "macroman_general_ci": 39, + "cp852_general_ci": 40, + "latin7_general_ci": 41, + "latin7_general_cs": 42, + "macce_bin": 43, + "cp1250_croatian_ci": 44, + "utf8mb4_general_ci": 45, + "utf8mb4_bin": 46, + "latin1_bin": 47, + "latin1_general_ci": 48, + "latin1_general_cs": 49, + "cp1251_bin": 50, + "cp1251_general_ci": 51, + "cp1251_general_cs": 52, + "macroman_bin": 53, + //"utf16_general_ci": 54, + //"utf16_bin": 55, + //"utf16le_general_ci": 56, + "cp1256_general_ci": 57, + "cp1257_bin": 58, + "cp1257_general_ci": 59, + //"utf32_general_ci": 60, + //"utf32_bin": 61, + //"utf16le_bin": 62, + "binary": 63, + "armscii8_bin": 64, + "ascii_bin": 65, + "cp1250_bin": 66, + "cp1256_bin": 67, + "cp866_bin": 68, + "dec8_bin": 69, + "greek_bin": 70, + "hebrew_bin": 71, + "hp8_bin": 72, + "keybcs2_bin": 73, + "koi8r_bin": 74, + "koi8u_bin": 75, + "utf8_tolower_ci": 76, + "latin2_bin": 77, + "latin5_bin": 78, + "latin7_bin": 79, + "cp850_bin": 80, + "cp852_bin": 81, + "swe7_bin": 82, + "utf8_bin": 83, + "big5_bin": 84, + "euckr_bin": 85, + "gb2312_bin": 86, + "gbk_bin": 87, + "sjis_bin": 88, + "tis620_bin": 89, + //"ucs2_bin": 90, + "ujis_bin": 91, + "geostd8_general_ci": 92, + "geostd8_bin": 93, + "latin1_spanish_ci": 94, + "cp932_japanese_ci": 95, + "cp932_bin": 96, + "eucjpms_japanese_ci": 97, + "eucjpms_bin": 98, + "cp1250_polish_ci": 99, + //"utf16_unicode_ci": 101, + //"utf16_icelandic_ci": 102, + //"utf16_latvian_ci": 103, + //"utf16_romanian_ci": 104, + //"utf16_slovenian_ci": 105, + //"utf16_polish_ci": 106, + //"utf16_estonian_ci": 107, + //"utf16_spanish_ci": 108, + //"utf16_swedish_ci": 109, + //"utf16_turkish_ci": 110, + //"utf16_czech_ci": 111, + //"utf16_danish_ci": 112, + //"utf16_lithuanian_ci": 113, + //"utf16_slovak_ci": 114, + //"utf16_spanish2_ci": 115, + //"utf16_roman_ci": 116, + //"utf16_persian_ci": 117, + //"utf16_esperanto_ci": 118, + //"utf16_hungarian_ci": 119, + //"utf16_sinhala_ci": 120, + //"utf16_german2_ci": 121, + //"utf16_croatian_ci": 122, + //"utf16_unicode_520_ci": 123, + //"utf16_vietnamese_ci": 124, + //"ucs2_unicode_ci": 128, + //"ucs2_icelandic_ci": 129, + //"ucs2_latvian_ci": 130, + //"ucs2_romanian_ci": 131, + //"ucs2_slovenian_ci": 132, + //"ucs2_polish_ci": 133, + //"ucs2_estonian_ci": 134, + //"ucs2_spanish_ci": 135, + //"ucs2_swedish_ci": 136, + //"ucs2_turkish_ci": 137, + //"ucs2_czech_ci": 138, + //"ucs2_danish_ci": 139, + //"ucs2_lithuanian_ci": 140, + //"ucs2_slovak_ci": 141, + //"ucs2_spanish2_ci": 142, + //"ucs2_roman_ci": 143, + //"ucs2_persian_ci": 144, + //"ucs2_esperanto_ci": 145, + //"ucs2_hungarian_ci": 146, + //"ucs2_sinhala_ci": 147, + //"ucs2_german2_ci": 148, + //"ucs2_croatian_ci": 149, + //"ucs2_unicode_520_ci": 150, + //"ucs2_vietnamese_ci": 151, + //"ucs2_general_mysql500_ci": 159, + //"utf32_unicode_ci": 160, + //"utf32_icelandic_ci": 161, + //"utf32_latvian_ci": 162, + //"utf32_romanian_ci": 163, + //"utf32_slovenian_ci": 164, + //"utf32_polish_ci": 165, + //"utf32_estonian_ci": 166, + //"utf32_spanish_ci": 167, + //"utf32_swedish_ci": 168, + //"utf32_turkish_ci": 169, + //"utf32_czech_ci": 170, + //"utf32_danish_ci": 171, + //"utf32_lithuanian_ci": 172, + //"utf32_slovak_ci": 173, + //"utf32_spanish2_ci": 174, + //"utf32_roman_ci": 175, + //"utf32_persian_ci": 176, + //"utf32_esperanto_ci": 177, + //"utf32_hungarian_ci": 178, + //"utf32_sinhala_ci": 179, + //"utf32_german2_ci": 180, + //"utf32_croatian_ci": 181, + //"utf32_unicode_520_ci": 182, + //"utf32_vietnamese_ci": 183, + "utf8_unicode_ci": 192, + "utf8_icelandic_ci": 193, + "utf8_latvian_ci": 194, + "utf8_romanian_ci": 195, + "utf8_slovenian_ci": 196, + "utf8_polish_ci": 197, + "utf8_estonian_ci": 198, + "utf8_spanish_ci": 199, + "utf8_swedish_ci": 200, + "utf8_turkish_ci": 201, + "utf8_czech_ci": 202, + "utf8_danish_ci": 203, + "utf8_lithuanian_ci": 204, + "utf8_slovak_ci": 205, + "utf8_spanish2_ci": 206, + "utf8_roman_ci": 207, + "utf8_persian_ci": 208, + "utf8_esperanto_ci": 209, + "utf8_hungarian_ci": 210, + "utf8_sinhala_ci": 211, + "utf8_german2_ci": 212, + "utf8_croatian_ci": 213, + "utf8_unicode_520_ci": 214, + "utf8_vietnamese_ci": 215, + "utf8_general_mysql500_ci": 223, + "utf8mb4_unicode_ci": 224, + "utf8mb4_icelandic_ci": 225, + "utf8mb4_latvian_ci": 226, + "utf8mb4_romanian_ci": 227, + "utf8mb4_slovenian_ci": 228, + "utf8mb4_polish_ci": 229, + "utf8mb4_estonian_ci": 230, + "utf8mb4_spanish_ci": 231, + "utf8mb4_swedish_ci": 232, + "utf8mb4_turkish_ci": 233, + "utf8mb4_czech_ci": 234, + "utf8mb4_danish_ci": 235, + "utf8mb4_lithuanian_ci": 236, + "utf8mb4_slovak_ci": 237, + "utf8mb4_spanish2_ci": 238, + "utf8mb4_roman_ci": 239, + "utf8mb4_persian_ci": 240, + "utf8mb4_esperanto_ci": 241, + "utf8mb4_hungarian_ci": 242, + "utf8mb4_sinhala_ci": 243, + "utf8mb4_german2_ci": 244, + "utf8mb4_croatian_ci": 245, + "utf8mb4_unicode_520_ci": 246, + "utf8mb4_vietnamese_ci": 247, + "gb18030_chinese_ci": 248, + "gb18030_bin": 249, + "gb18030_unicode_520_ci": 250, + "utf8mb4_0900_ai_ci": 255, +} + +// A denylist of collations which is unsafe to interpolate parameters. +// These multibyte encodings may contains 0x5c (`\`) in their trailing bytes. +var unsafeCollations = map[string]bool{ + "big5_chinese_ci": true, + "sjis_japanese_ci": true, + "gbk_chinese_ci": true, + "big5_bin": true, + "gb2312_bin": true, + "gbk_bin": true, + "sjis_bin": true, + "cp932_japanese_ci": true, + "cp932_bin": true, + "gb18030_chinese_ci": true, + "gb18030_bin": true, + "gb18030_unicode_520_ci": true, +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/conncheck.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/conncheck.go new file mode 100644 index 00000000..024eb285 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/conncheck.go @@ -0,0 +1,54 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build linux darwin dragonfly freebsd netbsd openbsd solaris illumos + +package mysql + +import ( + "errors" + "io" + "net" + "syscall" +) + +var errUnexpectedRead = errors.New("unexpected read from socket") + +func connCheck(conn net.Conn) error { + var sysErr error + + sysConn, ok := conn.(syscall.Conn) + if !ok { + return nil + } + rawConn, err := sysConn.SyscallConn() + if err != nil { + return err + } + + err = rawConn.Read(func(fd uintptr) bool { + var buf [1]byte + n, err := syscall.Read(int(fd), buf[:]) + switch { + case n == 0 && err == nil: + sysErr = io.EOF + case n > 0: + sysErr = errUnexpectedRead + case err == syscall.EAGAIN || err == syscall.EWOULDBLOCK: + sysErr = nil + default: + sysErr = err + } + return true + }) + if err != nil { + return err + } + + return sysErr +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go new file mode 100644 index 00000000..ea7fb607 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/conncheck_dummy.go @@ -0,0 +1,17 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2019 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build !linux,!darwin,!dragonfly,!freebsd,!netbsd,!openbsd,!solaris,!illumos + +package mysql + +import "net" + +func connCheck(conn net.Conn) error { + return nil +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/connection.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/connection.go new file mode 100644 index 00000000..835f8972 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/connection.go @@ -0,0 +1,650 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" + "encoding/json" + "io" + "net" + "strconv" + "strings" + "time" +) + +type mysqlConn struct { + buf buffer + netConn net.Conn + rawConn net.Conn // underlying connection when netConn is TLS connection. + affectedRows uint64 + insertId uint64 + cfg *Config + maxAllowedPacket int + maxWriteSize int + writeTimeout time.Duration + flags clientFlag + status statusFlag + sequence uint8 + parseTime bool + reset bool // set when the Go SQL package calls ResetSession + + // for context support (Go 1.8+) + watching bool + watcher chan<- context.Context + closech chan struct{} + finished chan<- struct{} + canceled atomicError // set non-nil if conn is canceled + closed atomicBool // set when conn is closed, before closech is closed +} + +// Handles parameters set in DSN after the connection is established +func (mc *mysqlConn) handleParams() (err error) { + var cmdSet strings.Builder + for param, val := range mc.cfg.Params { + switch param { + // Charset: character_set_connection, character_set_client, character_set_results + case "charset": + charsets := strings.Split(val, ",") + for i := range charsets { + // ignore errors here - a charset may not exist + err = mc.exec("SET NAMES " + charsets[i]) + if err == nil { + break + } + } + if err != nil { + return + } + + // Other system vars accumulated in a single SET command + default: + if cmdSet.Len() == 0 { + // Heuristic: 29 chars for each other key=value to reduce reallocations + cmdSet.Grow(4 + len(param) + 1 + len(val) + 30*(len(mc.cfg.Params)-1)) + cmdSet.WriteString("SET ") + } else { + cmdSet.WriteByte(',') + } + cmdSet.WriteString(param) + cmdSet.WriteByte('=') + cmdSet.WriteString(val) + } + } + + if cmdSet.Len() > 0 { + err = mc.exec(cmdSet.String()) + if err != nil { + return + } + } + + return +} + +func (mc *mysqlConn) markBadConn(err error) error { + if mc == nil { + return err + } + if err != errBadConnNoWrite { + return err + } + return driver.ErrBadConn +} + +func (mc *mysqlConn) Begin() (driver.Tx, error) { + return mc.begin(false) +} + +func (mc *mysqlConn) begin(readOnly bool) (driver.Tx, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + var q string + if readOnly { + q = "START TRANSACTION READ ONLY" + } else { + q = "START TRANSACTION" + } + err := mc.exec(q) + if err == nil { + return &mysqlTx{mc}, err + } + return nil, mc.markBadConn(err) +} + +func (mc *mysqlConn) Close() (err error) { + // Makes Close idempotent + if !mc.closed.IsSet() { + err = mc.writeCommandPacket(comQuit) + } + + mc.cleanup() + + return +} + +// Closes the network connection and unsets internal variables. Do not call this +// function after successfully authentication, call Close instead. This function +// is called before auth or on auth failure because MySQL will have already +// closed the network connection. +func (mc *mysqlConn) cleanup() { + if !mc.closed.TrySet(true) { + return + } + + // Makes cleanup idempotent + close(mc.closech) + if mc.netConn == nil { + return + } + if err := mc.netConn.Close(); err != nil { + errLog.Print(err) + } +} + +func (mc *mysqlConn) error() error { + if mc.closed.IsSet() { + if err := mc.canceled.Value(); err != nil { + return err + } + return ErrInvalidConn + } + return nil +} + +func (mc *mysqlConn) Prepare(query string) (driver.Stmt, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := mc.writeCommandPacketStr(comStmtPrepare, query) + if err != nil { + // STMT_PREPARE is safe to retry. So we can return ErrBadConn here. + errLog.Print(err) + return nil, driver.ErrBadConn + } + + stmt := &mysqlStmt{ + mc: mc, + } + + // Read Result + columnCount, err := stmt.readPrepareResultPacket() + if err == nil { + if stmt.paramCount > 0 { + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if columnCount > 0 { + err = mc.readUntilEOF() + } + } + + return stmt, err +} + +func (mc *mysqlConn) interpolateParams(query string, args []driver.Value) (string, error) { + // Number of ? should be same to len(args) + if strings.Count(query, "?") != len(args) { + return "", driver.ErrSkip + } + + buf, err := mc.buf.takeCompleteBuffer() + if err != nil { + // can not take the buffer. Something must be wrong with the connection + errLog.Print(err) + return "", ErrInvalidConn + } + buf = buf[:0] + argPos := 0 + + for i := 0; i < len(query); i++ { + q := strings.IndexByte(query[i:], '?') + if q == -1 { + buf = append(buf, query[i:]...) + break + } + buf = append(buf, query[i:i+q]...) + i += q + + arg := args[argPos] + argPos++ + + if arg == nil { + buf = append(buf, "NULL"...) + continue + } + + switch v := arg.(type) { + case int64: + buf = strconv.AppendInt(buf, v, 10) + case uint64: + // Handle uint64 explicitly because our custom ConvertValue emits unsigned values + buf = strconv.AppendUint(buf, v, 10) + case float64: + buf = strconv.AppendFloat(buf, v, 'g', -1, 64) + case bool: + if v { + buf = append(buf, '1') + } else { + buf = append(buf, '0') + } + case time.Time: + if v.IsZero() { + buf = append(buf, "'0000-00-00'"...) + } else { + buf = append(buf, '\'') + buf, err = appendDateTime(buf, v.In(mc.cfg.Loc)) + if err != nil { + return "", err + } + buf = append(buf, '\'') + } + case json.RawMessage: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + case []byte: + if v == nil { + buf = append(buf, "NULL"...) + } else { + buf = append(buf, "_binary'"...) + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeBytesBackslash(buf, v) + } else { + buf = escapeBytesQuotes(buf, v) + } + buf = append(buf, '\'') + } + case string: + buf = append(buf, '\'') + if mc.status&statusNoBackslashEscapes == 0 { + buf = escapeStringBackslash(buf, v) + } else { + buf = escapeStringQuotes(buf, v) + } + buf = append(buf, '\'') + default: + return "", driver.ErrSkip + } + + if len(buf)+4 > mc.maxAllowedPacket { + return "", driver.ErrSkip + } + } + if argPos != len(args) { + return "", driver.ErrSkip + } + return string(buf), nil +} + +func (mc *mysqlConn) Exec(query string, args []driver.Value) (driver.Result, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try to interpolate the parameters to save extra roundtrips for preparing and closing a statement + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + mc.affectedRows = 0 + mc.insertId = 0 + + err := mc.exec(query) + if err == nil { + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, err + } + return nil, mc.markBadConn(err) +} + +// Internal function to execute commands +func (mc *mysqlConn) exec(query string) error { + // Send command + if err := mc.writeCommandPacketStr(comQuery, query); err != nil { + return mc.markBadConn(err) + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + + return mc.discardResults() +} + +func (mc *mysqlConn) Query(query string, args []driver.Value) (driver.Rows, error) { + return mc.query(query, args) +} + +func (mc *mysqlConn) query(query string, args []driver.Value) (*textRows, error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + if len(args) != 0 { + if !mc.cfg.InterpolateParams { + return nil, driver.ErrSkip + } + // try client-side prepare to reduce roundtrip + prepared, err := mc.interpolateParams(query, args) + if err != nil { + return nil, err + } + query = prepared + } + // Send command + err := mc.writeCommandPacketStr(comQuery, query) + if err == nil { + // Read Result + var resLen int + resLen, err = mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + + if resLen == 0 { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + // Columns + rows.rs.columns, err = mc.readColumns(resLen) + return rows, err + } + } + return nil, mc.markBadConn(err) +} + +// Gets the value of the given MySQL System Variable +// The returned byte slice is only valid until the next read +func (mc *mysqlConn) getSystemVar(name string) ([]byte, error) { + // Send command + if err := mc.writeCommandPacketStr(comQuery, "SELECT @@"+name); err != nil { + return nil, err + } + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err == nil { + rows := new(textRows) + rows.mc = mc + rows.rs.columns = []mysqlField{{fieldType: fieldTypeVarChar}} + + if resLen > 0 { + // Columns + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + dest := make([]driver.Value, resLen) + if err = rows.readRow(dest); err == nil { + return dest[0].([]byte), mc.readUntilEOF() + } + } + return nil, err +} + +// finish is called when the query has canceled. +func (mc *mysqlConn) cancel(err error) { + mc.canceled.Set(err) + mc.cleanup() +} + +// finish is called when the query has succeeded. +func (mc *mysqlConn) finish() { + if !mc.watching || mc.finished == nil { + return + } + select { + case mc.finished <- struct{}{}: + mc.watching = false + case <-mc.closech: + } +} + +// Ping implements driver.Pinger interface +func (mc *mysqlConn) Ping(ctx context.Context) (err error) { + if mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + if err = mc.watchCancel(ctx); err != nil { + return + } + defer mc.finish() + + if err = mc.writeCommandPacket(comPing); err != nil { + return mc.markBadConn(err) + } + + return mc.readResultOK() +} + +// BeginTx implements driver.ConnBeginTx interface +func (mc *mysqlConn) BeginTx(ctx context.Context, opts driver.TxOptions) (driver.Tx, error) { + if mc.closed.IsSet() { + return nil, driver.ErrBadConn + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + if sql.IsolationLevel(opts.Isolation) != sql.LevelDefault { + level, err := mapIsolationLevel(opts.Isolation) + if err != nil { + return nil, err + } + err = mc.exec("SET TRANSACTION ISOLATION LEVEL " + level) + if err != nil { + return nil, err + } + } + + return mc.begin(opts.ReadOnly) +} + +func (mc *mysqlConn) QueryContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := mc.query(query, dargs) + if err != nil { + mc.finish() + return nil, err + } + rows.finish = mc.finish + return rows, err +} + +func (mc *mysqlConn) ExecContext(ctx context.Context, query string, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + defer mc.finish() + + return mc.Exec(query, dargs) +} + +func (mc *mysqlConn) PrepareContext(ctx context.Context, query string) (driver.Stmt, error) { + if err := mc.watchCancel(ctx); err != nil { + return nil, err + } + + stmt, err := mc.Prepare(query) + mc.finish() + if err != nil { + return nil, err + } + + select { + default: + case <-ctx.Done(): + stmt.Close() + return nil, ctx.Err() + } + return stmt, nil +} + +func (stmt *mysqlStmt) QueryContext(ctx context.Context, args []driver.NamedValue) (driver.Rows, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + + rows, err := stmt.query(dargs) + if err != nil { + stmt.mc.finish() + return nil, err + } + rows.finish = stmt.mc.finish + return rows, err +} + +func (stmt *mysqlStmt) ExecContext(ctx context.Context, args []driver.NamedValue) (driver.Result, error) { + dargs, err := namedValueToValue(args) + if err != nil { + return nil, err + } + + if err := stmt.mc.watchCancel(ctx); err != nil { + return nil, err + } + defer stmt.mc.finish() + + return stmt.Exec(dargs) +} + +func (mc *mysqlConn) watchCancel(ctx context.Context) error { + if mc.watching { + // Reach here if canceled, + // so the connection is already invalid + mc.cleanup() + return nil + } + // When ctx is already cancelled, don't watch it. + if err := ctx.Err(); err != nil { + return err + } + // When ctx is not cancellable, don't watch it. + if ctx.Done() == nil { + return nil + } + // When watcher is not alive, can't watch it. + if mc.watcher == nil { + return nil + } + + mc.watching = true + mc.watcher <- ctx + return nil +} + +func (mc *mysqlConn) startWatcher() { + watcher := make(chan context.Context, 1) + mc.watcher = watcher + finished := make(chan struct{}) + mc.finished = finished + go func() { + for { + var ctx context.Context + select { + case ctx = <-watcher: + case <-mc.closech: + return + } + + select { + case <-ctx.Done(): + mc.cancel(ctx.Err()) + case <-finished: + case <-mc.closech: + return + } + } + }() +} + +func (mc *mysqlConn) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} + +// ResetSession implements driver.SessionResetter. +// (From Go 1.10) +func (mc *mysqlConn) ResetSession(ctx context.Context) error { + if mc.closed.IsSet() { + return driver.ErrBadConn + } + mc.reset = true + return nil +} + +// IsValid implements driver.Validator interface +// (From Go 1.15) +func (mc *mysqlConn) IsValid() bool { + return !mc.closed.IsSet() +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/connector.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/connector.go new file mode 100644 index 00000000..d567b4e4 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/connector.go @@ -0,0 +1,146 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2018 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "context" + "database/sql/driver" + "net" +) + +type connector struct { + cfg *Config // immutable private copy. +} + +// Connect implements driver.Connector interface. +// Connect returns a connection to the database. +func (c *connector) Connect(ctx context.Context) (driver.Conn, error) { + var err error + + // New mysqlConn + mc := &mysqlConn{ + maxAllowedPacket: maxPacketSize, + maxWriteSize: maxPacketSize - 1, + closech: make(chan struct{}), + cfg: c.cfg, + } + mc.parseTime = mc.cfg.ParseTime + + // Connect to Server + dialsLock.RLock() + dial, ok := dials[mc.cfg.Net] + dialsLock.RUnlock() + if ok { + dctx := ctx + if mc.cfg.Timeout > 0 { + var cancel context.CancelFunc + dctx, cancel = context.WithTimeout(ctx, c.cfg.Timeout) + defer cancel() + } + mc.netConn, err = dial(dctx, mc.cfg.Addr) + } else { + nd := net.Dialer{Timeout: mc.cfg.Timeout} + mc.netConn, err = nd.DialContext(ctx, mc.cfg.Net, mc.cfg.Addr) + } + + if err != nil { + return nil, err + } + + // Enable TCP Keepalives on TCP connections + if tc, ok := mc.netConn.(*net.TCPConn); ok { + if err := tc.SetKeepAlive(true); err != nil { + // Don't send COM_QUIT before handshake. + mc.netConn.Close() + mc.netConn = nil + return nil, err + } + } + + // Call startWatcher for context support (From Go 1.8) + mc.startWatcher() + if err := mc.watchCancel(ctx); err != nil { + mc.cleanup() + return nil, err + } + defer mc.finish() + + mc.buf = newBuffer(mc.netConn) + + // Set I/O timeouts + mc.buf.timeout = mc.cfg.ReadTimeout + mc.writeTimeout = mc.cfg.WriteTimeout + + // Reading Handshake Initialization Packet + authData, plugin, err := mc.readHandshakePacket() + if err != nil { + mc.cleanup() + return nil, err + } + + if plugin == "" { + plugin = defaultAuthPlugin + } + + // Send Client Authentication Packet + authResp, err := mc.auth(authData, plugin) + if err != nil { + // try the default auth plugin, if using the requested plugin failed + errLog.Print("could not use requested auth plugin '"+plugin+"': ", err.Error()) + plugin = defaultAuthPlugin + authResp, err = mc.auth(authData, plugin) + if err != nil { + mc.cleanup() + return nil, err + } + } + if err = mc.writeHandshakeResponsePacket(authResp, plugin); err != nil { + mc.cleanup() + return nil, err + } + + // Handle response to auth packet, switch methods if possible + if err = mc.handleAuthResult(authData, plugin); err != nil { + // Authentication failed and MySQL has already closed the connection + // (https://dev.mysql.com/doc/internals/en/authentication-fails.html). + // Do not send COM_QUIT, just cleanup and return the error. + mc.cleanup() + return nil, err + } + + if mc.cfg.MaxAllowedPacket > 0 { + mc.maxAllowedPacket = mc.cfg.MaxAllowedPacket + } else { + // Get max allowed packet size + maxap, err := mc.getSystemVar("max_allowed_packet") + if err != nil { + mc.Close() + return nil, err + } + mc.maxAllowedPacket = stringToInt(maxap) - 1 + } + if mc.maxAllowedPacket < maxPacketSize { + mc.maxWriteSize = mc.maxAllowedPacket + } + + // Handle DSN Params + err = mc.handleParams() + if err != nil { + mc.Close() + return nil, err + } + + return mc, nil +} + +// Driver implements driver.Connector interface. +// Driver returns &MySQLDriver{}. +func (c *connector) Driver() driver.Driver { + return &MySQLDriver{} +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/const.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/const.go new file mode 100644 index 00000000..b1e6b85e --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/const.go @@ -0,0 +1,174 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +const ( + defaultAuthPlugin = "mysql_native_password" + defaultMaxAllowedPacket = 4 << 20 // 4 MiB + minProtocolVersion = 10 + maxPacketSize = 1<<24 - 1 + timeFormat = "2006-01-02 15:04:05.999999" +) + +// MySQL constants documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +const ( + iOK byte = 0x00 + iAuthMoreData byte = 0x01 + iLocalInFile byte = 0xfb + iEOF byte = 0xfe + iERR byte = 0xff +) + +// https://dev.mysql.com/doc/internals/en/capability-flags.html#packet-Protocol::CapabilityFlags +type clientFlag uint32 + +const ( + clientLongPassword clientFlag = 1 << iota + clientFoundRows + clientLongFlag + clientConnectWithDB + clientNoSchema + clientCompress + clientODBC + clientLocalFiles + clientIgnoreSpace + clientProtocol41 + clientInteractive + clientSSL + clientIgnoreSIGPIPE + clientTransactions + clientReserved + clientSecureConn + clientMultiStatements + clientMultiResults + clientPSMultiResults + clientPluginAuth + clientConnectAttrs + clientPluginAuthLenEncClientData + clientCanHandleExpiredPasswords + clientSessionTrack + clientDeprecateEOF +) + +const ( + comQuit byte = iota + 1 + comInitDB + comQuery + comFieldList + comCreateDB + comDropDB + comRefresh + comShutdown + comStatistics + comProcessInfo + comConnect + comProcessKill + comDebug + comPing + comTime + comDelayedInsert + comChangeUser + comBinlogDump + comTableDump + comConnectOut + comRegisterSlave + comStmtPrepare + comStmtExecute + comStmtSendLongData + comStmtClose + comStmtReset + comSetOption + comStmtFetch +) + +// https://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnType +type fieldType byte + +const ( + fieldTypeDecimal fieldType = iota + fieldTypeTiny + fieldTypeShort + fieldTypeLong + fieldTypeFloat + fieldTypeDouble + fieldTypeNULL + fieldTypeTimestamp + fieldTypeLongLong + fieldTypeInt24 + fieldTypeDate + fieldTypeTime + fieldTypeDateTime + fieldTypeYear + fieldTypeNewDate + fieldTypeVarChar + fieldTypeBit +) +const ( + fieldTypeJSON fieldType = iota + 0xf5 + fieldTypeNewDecimal + fieldTypeEnum + fieldTypeSet + fieldTypeTinyBLOB + fieldTypeMediumBLOB + fieldTypeLongBLOB + fieldTypeBLOB + fieldTypeVarString + fieldTypeString + fieldTypeGeometry +) + +type fieldFlag uint16 + +const ( + flagNotNULL fieldFlag = 1 << iota + flagPriKey + flagUniqueKey + flagMultipleKey + flagBLOB + flagUnsigned + flagZeroFill + flagBinary + flagEnum + flagAutoIncrement + flagTimestamp + flagSet + flagUnknown1 + flagUnknown2 + flagUnknown3 + flagUnknown4 +) + +// http://dev.mysql.com/doc/internals/en/status-flags.html +type statusFlag uint16 + +const ( + statusInTrans statusFlag = 1 << iota + statusInAutocommit + statusReserved // Not in documentation + statusMoreResultsExists + statusNoGoodIndexUsed + statusNoIndexUsed + statusCursorExists + statusLastRowSent + statusDbDropped + statusNoBackslashEscapes + statusMetadataChanged + statusQueryWasSlow + statusPsOutParams + statusInTransReadonly + statusSessionStateChanged +) + +const ( + cachingSha2PasswordRequestPublicKey = 2 + cachingSha2PasswordFastAuthSuccess = 3 + cachingSha2PasswordPerformFullAuthentication = 4 +) diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/driver.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/driver.go new file mode 100644 index 00000000..c1bdf119 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/driver.go @@ -0,0 +1,107 @@ +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// Package mysql provides a MySQL driver for Go's database/sql package. +// +// The driver should be used via the database/sql package: +// +// import "database/sql" +// import _ "github.com/go-sql-driver/mysql" +// +// db, err := sql.Open("mysql", "user:password@/dbname") +// +// See https://github.com/go-sql-driver/mysql#usage for details +package mysql + +import ( + "context" + "database/sql" + "database/sql/driver" + "net" + "sync" +) + +// MySQLDriver is exported to make the driver directly accessible. +// In general the driver is used via the database/sql package. +type MySQLDriver struct{} + +// DialFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDial +// +// Deprecated: users should register a DialContextFunc instead +type DialFunc func(addr string) (net.Conn, error) + +// DialContextFunc is a function which can be used to establish the network connection. +// Custom dial functions must be registered with RegisterDialContext +type DialContextFunc func(ctx context.Context, addr string) (net.Conn, error) + +var ( + dialsLock sync.RWMutex + dials map[string]DialContextFunc +) + +// RegisterDialContext registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// The current context for the connection and its address is passed to the dial function. +func RegisterDialContext(net string, dial DialContextFunc) { + dialsLock.Lock() + defer dialsLock.Unlock() + if dials == nil { + dials = make(map[string]DialContextFunc) + } + dials[net] = dial +} + +// RegisterDial registers a custom dial function. It can then be used by the +// network address mynet(addr), where mynet is the registered new network. +// addr is passed as a parameter to the dial function. +// +// Deprecated: users should call RegisterDialContext instead +func RegisterDial(network string, dial DialFunc) { + RegisterDialContext(network, func(_ context.Context, addr string) (net.Conn, error) { + return dial(addr) + }) +} + +// Open new Connection. +// See https://github.com/go-sql-driver/mysql#dsn-data-source-name for how +// the DSN string is formatted +func (d MySQLDriver) Open(dsn string) (driver.Conn, error) { + cfg, err := ParseDSN(dsn) + if err != nil { + return nil, err + } + c := &connector{ + cfg: cfg, + } + return c.Connect(context.Background()) +} + +func init() { + sql.Register("mysql", &MySQLDriver{}) +} + +// NewConnector returns new driver.Connector. +func NewConnector(cfg *Config) (driver.Connector, error) { + cfg = cfg.Clone() + // normalize the contents of cfg so calls to NewConnector have the same + // behavior as MySQLDriver.OpenConnector + if err := cfg.normalize(); err != nil { + return nil, err + } + return &connector{cfg: cfg}, nil +} + +// OpenConnector implements driver.DriverContext. +func (d MySQLDriver) OpenConnector(dsn string) (driver.Connector, error) { + cfg, err := ParseDSN(dsn) + if err != nil { + return nil, err + } + return &connector{ + cfg: cfg, + }, nil +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/dsn.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/dsn.go new file mode 100644 index 00000000..93f3548c --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/dsn.go @@ -0,0 +1,560 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2016 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/rsa" + "crypto/tls" + "errors" + "fmt" + "math/big" + "net" + "net/url" + "sort" + "strconv" + "strings" + "time" +) + +var ( + errInvalidDSNUnescaped = errors.New("invalid DSN: did you forget to escape a param value?") + errInvalidDSNAddr = errors.New("invalid DSN: network address not terminated (missing closing brace)") + errInvalidDSNNoSlash = errors.New("invalid DSN: missing the slash separating the database name") + errInvalidDSNUnsafeCollation = errors.New("invalid DSN: interpolateParams can not be used with unsafe collations") +) + +// Config is a configuration parsed from a DSN string. +// If a new Config is created instead of being parsed from a DSN string, +// the NewConfig function should be used, which sets default values. +type Config struct { + User string // Username + Passwd string // Password (requires User) + Net string // Network type + Addr string // Network address (requires Net) + DBName string // Database name + Params map[string]string // Connection parameters + Collation string // Connection collation + Loc *time.Location // Location for time.Time values + MaxAllowedPacket int // Max packet size allowed + ServerPubKey string // Server public key name + pubKey *rsa.PublicKey // Server public key + TLSConfig string // TLS configuration name + tls *tls.Config // TLS configuration + Timeout time.Duration // Dial timeout + ReadTimeout time.Duration // I/O read timeout + WriteTimeout time.Duration // I/O write timeout + + AllowAllFiles bool // Allow all files to be used with LOAD DATA LOCAL INFILE + AllowCleartextPasswords bool // Allows the cleartext client side plugin + AllowNativePasswords bool // Allows the native password authentication method + AllowOldPasswords bool // Allows the old insecure password method + CheckConnLiveness bool // Check connections for liveness before using them + ClientFoundRows bool // Return number of matching rows instead of rows changed + ColumnsWithAlias bool // Prepend table alias to column names + InterpolateParams bool // Interpolate placeholders into query string + MultiStatements bool // Allow multiple statements in one query + ParseTime bool // Parse time values to time.Time + RejectReadOnly bool // Reject read-only connections +} + +// NewConfig creates a new Config and sets default values. +func NewConfig() *Config { + return &Config{ + Collation: defaultCollation, + Loc: time.UTC, + MaxAllowedPacket: defaultMaxAllowedPacket, + AllowNativePasswords: true, + CheckConnLiveness: true, + } +} + +func (cfg *Config) Clone() *Config { + cp := *cfg + if cp.tls != nil { + cp.tls = cfg.tls.Clone() + } + if len(cp.Params) > 0 { + cp.Params = make(map[string]string, len(cfg.Params)) + for k, v := range cfg.Params { + cp.Params[k] = v + } + } + if cfg.pubKey != nil { + cp.pubKey = &rsa.PublicKey{ + N: new(big.Int).Set(cfg.pubKey.N), + E: cfg.pubKey.E, + } + } + return &cp +} + +func (cfg *Config) normalize() error { + if cfg.InterpolateParams && unsafeCollations[cfg.Collation] { + return errInvalidDSNUnsafeCollation + } + + // Set default network if empty + if cfg.Net == "" { + cfg.Net = "tcp" + } + + // Set default address if empty + if cfg.Addr == "" { + switch cfg.Net { + case "tcp": + cfg.Addr = "127.0.0.1:3306" + case "unix": + cfg.Addr = "/tmp/mysql.sock" + default: + return errors.New("default addr for network '" + cfg.Net + "' unknown") + } + } else if cfg.Net == "tcp" { + cfg.Addr = ensureHavePort(cfg.Addr) + } + + switch cfg.TLSConfig { + case "false", "": + // don't set anything + case "true": + cfg.tls = &tls.Config{} + case "skip-verify", "preferred": + cfg.tls = &tls.Config{InsecureSkipVerify: true} + default: + cfg.tls = getTLSConfigClone(cfg.TLSConfig) + if cfg.tls == nil { + return errors.New("invalid value / unknown config name: " + cfg.TLSConfig) + } + } + + if cfg.tls != nil && cfg.tls.ServerName == "" && !cfg.tls.InsecureSkipVerify { + host, _, err := net.SplitHostPort(cfg.Addr) + if err == nil { + cfg.tls.ServerName = host + } + } + + if cfg.ServerPubKey != "" { + cfg.pubKey = getServerPubKey(cfg.ServerPubKey) + if cfg.pubKey == nil { + return errors.New("invalid value / unknown server pub key name: " + cfg.ServerPubKey) + } + } + + return nil +} + +func writeDSNParam(buf *bytes.Buffer, hasParam *bool, name, value string) { + buf.Grow(1 + len(name) + 1 + len(value)) + if !*hasParam { + *hasParam = true + buf.WriteByte('?') + } else { + buf.WriteByte('&') + } + buf.WriteString(name) + buf.WriteByte('=') + buf.WriteString(value) +} + +// FormatDSN formats the given Config into a DSN string which can be passed to +// the driver. +func (cfg *Config) FormatDSN() string { + var buf bytes.Buffer + + // [username[:password]@] + if len(cfg.User) > 0 { + buf.WriteString(cfg.User) + if len(cfg.Passwd) > 0 { + buf.WriteByte(':') + buf.WriteString(cfg.Passwd) + } + buf.WriteByte('@') + } + + // [protocol[(address)]] + if len(cfg.Net) > 0 { + buf.WriteString(cfg.Net) + if len(cfg.Addr) > 0 { + buf.WriteByte('(') + buf.WriteString(cfg.Addr) + buf.WriteByte(')') + } + } + + // /dbname + buf.WriteByte('/') + buf.WriteString(cfg.DBName) + + // [?param1=value1&...¶mN=valueN] + hasParam := false + + if cfg.AllowAllFiles { + hasParam = true + buf.WriteString("?allowAllFiles=true") + } + + if cfg.AllowCleartextPasswords { + writeDSNParam(&buf, &hasParam, "allowCleartextPasswords", "true") + } + + if !cfg.AllowNativePasswords { + writeDSNParam(&buf, &hasParam, "allowNativePasswords", "false") + } + + if cfg.AllowOldPasswords { + writeDSNParam(&buf, &hasParam, "allowOldPasswords", "true") + } + + if !cfg.CheckConnLiveness { + writeDSNParam(&buf, &hasParam, "checkConnLiveness", "false") + } + + if cfg.ClientFoundRows { + writeDSNParam(&buf, &hasParam, "clientFoundRows", "true") + } + + if col := cfg.Collation; col != defaultCollation && len(col) > 0 { + writeDSNParam(&buf, &hasParam, "collation", col) + } + + if cfg.ColumnsWithAlias { + writeDSNParam(&buf, &hasParam, "columnsWithAlias", "true") + } + + if cfg.InterpolateParams { + writeDSNParam(&buf, &hasParam, "interpolateParams", "true") + } + + if cfg.Loc != time.UTC && cfg.Loc != nil { + writeDSNParam(&buf, &hasParam, "loc", url.QueryEscape(cfg.Loc.String())) + } + + if cfg.MultiStatements { + writeDSNParam(&buf, &hasParam, "multiStatements", "true") + } + + if cfg.ParseTime { + writeDSNParam(&buf, &hasParam, "parseTime", "true") + } + + if cfg.ReadTimeout > 0 { + writeDSNParam(&buf, &hasParam, "readTimeout", cfg.ReadTimeout.String()) + } + + if cfg.RejectReadOnly { + writeDSNParam(&buf, &hasParam, "rejectReadOnly", "true") + } + + if len(cfg.ServerPubKey) > 0 { + writeDSNParam(&buf, &hasParam, "serverPubKey", url.QueryEscape(cfg.ServerPubKey)) + } + + if cfg.Timeout > 0 { + writeDSNParam(&buf, &hasParam, "timeout", cfg.Timeout.String()) + } + + if len(cfg.TLSConfig) > 0 { + writeDSNParam(&buf, &hasParam, "tls", url.QueryEscape(cfg.TLSConfig)) + } + + if cfg.WriteTimeout > 0 { + writeDSNParam(&buf, &hasParam, "writeTimeout", cfg.WriteTimeout.String()) + } + + if cfg.MaxAllowedPacket != defaultMaxAllowedPacket { + writeDSNParam(&buf, &hasParam, "maxAllowedPacket", strconv.Itoa(cfg.MaxAllowedPacket)) + } + + // other params + if cfg.Params != nil { + var params []string + for param := range cfg.Params { + params = append(params, param) + } + sort.Strings(params) + for _, param := range params { + writeDSNParam(&buf, &hasParam, param, url.QueryEscape(cfg.Params[param])) + } + } + + return buf.String() +} + +// ParseDSN parses the DSN string to a Config +func ParseDSN(dsn string) (cfg *Config, err error) { + // New config with some default values + cfg = NewConfig() + + // [user[:password]@][net[(addr)]]/dbname[?param1=value1¶mN=valueN] + // Find the last '/' (since the password or the net addr might contain a '/') + foundSlash := false + for i := len(dsn) - 1; i >= 0; i-- { + if dsn[i] == '/' { + foundSlash = true + var j, k int + + // left part is empty if i <= 0 + if i > 0 { + // [username[:password]@][protocol[(address)]] + // Find the last '@' in dsn[:i] + for j = i; j >= 0; j-- { + if dsn[j] == '@' { + // username[:password] + // Find the first ':' in dsn[:j] + for k = 0; k < j; k++ { + if dsn[k] == ':' { + cfg.Passwd = dsn[k+1 : j] + break + } + } + cfg.User = dsn[:k] + + break + } + } + + // [protocol[(address)]] + // Find the first '(' in dsn[j+1:i] + for k = j + 1; k < i; k++ { + if dsn[k] == '(' { + // dsn[i-1] must be == ')' if an address is specified + if dsn[i-1] != ')' { + if strings.ContainsRune(dsn[k+1:i], ')') { + return nil, errInvalidDSNUnescaped + } + return nil, errInvalidDSNAddr + } + cfg.Addr = dsn[k+1 : i-1] + break + } + } + cfg.Net = dsn[j+1 : k] + } + + // dbname[?param1=value1&...¶mN=valueN] + // Find the first '?' in dsn[i+1:] + for j = i + 1; j < len(dsn); j++ { + if dsn[j] == '?' { + if err = parseDSNParams(cfg, dsn[j+1:]); err != nil { + return + } + break + } + } + cfg.DBName = dsn[i+1 : j] + + break + } + } + + if !foundSlash && len(dsn) > 0 { + return nil, errInvalidDSNNoSlash + } + + if err = cfg.normalize(); err != nil { + return nil, err + } + return +} + +// parseDSNParams parses the DSN "query string" +// Values must be url.QueryEscape'ed +func parseDSNParams(cfg *Config, params string) (err error) { + for _, v := range strings.Split(params, "&") { + param := strings.SplitN(v, "=", 2) + if len(param) != 2 { + continue + } + + // cfg params + switch value := param[1]; param[0] { + // Disable INFILE allowlist / enable all files + case "allowAllFiles": + var isBool bool + cfg.AllowAllFiles, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use cleartext authentication mode (MySQL 5.5.10+) + case "allowCleartextPasswords": + var isBool bool + cfg.AllowCleartextPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use native password authentication + case "allowNativePasswords": + var isBool bool + cfg.AllowNativePasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Use old authentication mode (pre MySQL 4.1) + case "allowOldPasswords": + var isBool bool + cfg.AllowOldPasswords, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Check connections for Liveness before using them + case "checkConnLiveness": + var isBool bool + cfg.CheckConnLiveness, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Switch "rowsAffected" mode + case "clientFoundRows": + var isBool bool + cfg.ClientFoundRows, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Collation + case "collation": + cfg.Collation = value + break + + case "columnsWithAlias": + var isBool bool + cfg.ColumnsWithAlias, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Compression + case "compress": + return errors.New("compression not implemented yet") + + // Enable client side placeholder substitution + case "interpolateParams": + var isBool bool + cfg.InterpolateParams, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Time Location + case "loc": + if value, err = url.QueryUnescape(value); err != nil { + return + } + cfg.Loc, err = time.LoadLocation(value) + if err != nil { + return + } + + // multiple statements in one query + case "multiStatements": + var isBool bool + cfg.MultiStatements, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // time.Time parsing + case "parseTime": + var isBool bool + cfg.ParseTime, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // I/O read Timeout + case "readTimeout": + cfg.ReadTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // Reject read-only connections + case "rejectReadOnly": + var isBool bool + cfg.RejectReadOnly, isBool = readBool(value) + if !isBool { + return errors.New("invalid bool value: " + value) + } + + // Server public key + case "serverPubKey": + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for server pub key name: %v", err) + } + cfg.ServerPubKey = name + + // Strict mode + case "strict": + panic("strict mode has been removed. See https://github.com/go-sql-driver/mysql/wiki/strict-mode") + + // Dial Timeout + case "timeout": + cfg.Timeout, err = time.ParseDuration(value) + if err != nil { + return + } + + // TLS-Encryption + case "tls": + boolValue, isBool := readBool(value) + if isBool { + if boolValue { + cfg.TLSConfig = "true" + } else { + cfg.TLSConfig = "false" + } + } else if vl := strings.ToLower(value); vl == "skip-verify" || vl == "preferred" { + cfg.TLSConfig = vl + } else { + name, err := url.QueryUnescape(value) + if err != nil { + return fmt.Errorf("invalid value for TLS config name: %v", err) + } + cfg.TLSConfig = name + } + + // I/O write Timeout + case "writeTimeout": + cfg.WriteTimeout, err = time.ParseDuration(value) + if err != nil { + return + } + case "maxAllowedPacket": + cfg.MaxAllowedPacket, err = strconv.Atoi(value) + if err != nil { + return + } + default: + // lazy init + if cfg.Params == nil { + cfg.Params = make(map[string]string) + } + + if cfg.Params[param[0]], err = url.QueryUnescape(value); err != nil { + return + } + } + } + + return +} + +func ensureHavePort(addr string) string { + if _, _, err := net.SplitHostPort(addr); err != nil { + return net.JoinHostPort(addr, "3306") + } + return addr +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/errors.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/errors.go new file mode 100644 index 00000000..760782ff --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/errors.go @@ -0,0 +1,65 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "errors" + "fmt" + "log" + "os" +) + +// Various errors the driver might return. Can change between driver versions. +var ( + ErrInvalidConn = errors.New("invalid connection") + ErrMalformPkt = errors.New("malformed packet") + ErrNoTLS = errors.New("TLS requested but server does not support TLS") + ErrCleartextPassword = errors.New("this user requires clear text authentication. If you still want to use it, please add 'allowCleartextPasswords=1' to your DSN") + ErrNativePassword = errors.New("this user requires mysql native password authentication.") + ErrOldPassword = errors.New("this user requires old password authentication. If you still want to use it, please add 'allowOldPasswords=1' to your DSN. See also https://github.com/go-sql-driver/mysql/wiki/old_passwords") + ErrUnknownPlugin = errors.New("this authentication plugin is not supported") + ErrOldProtocol = errors.New("MySQL server does not support required protocol 41+") + ErrPktSync = errors.New("commands out of sync. You can't run this command now") + ErrPktSyncMul = errors.New("commands out of sync. Did you run multiple statements at once?") + ErrPktTooLarge = errors.New("packet for query is too large. Try adjusting the 'max_allowed_packet' variable on the server") + ErrBusyBuffer = errors.New("busy buffer") + + // errBadConnNoWrite is used for connection errors where nothing was sent to the database yet. + // If this happens first in a function starting a database interaction, it should be replaced by driver.ErrBadConn + // to trigger a resend. + // See https://github.com/go-sql-driver/mysql/pull/302 + errBadConnNoWrite = errors.New("bad connection") +) + +var errLog = Logger(log.New(os.Stderr, "[mysql] ", log.Ldate|log.Ltime|log.Lshortfile)) + +// Logger is used to log critical error messages. +type Logger interface { + Print(v ...interface{}) +} + +// SetLogger is used to set the logger for critical errors. +// The initial logger is os.Stderr. +func SetLogger(logger Logger) error { + if logger == nil { + return errors.New("logger is nil") + } + errLog = logger + return nil +} + +// MySQLError is an error type which represents a single MySQL error +type MySQLError struct { + Number uint16 + Message string +} + +func (me *MySQLError) Error() string { + return fmt.Sprintf("Error %d: %s", me.Number, me.Message) +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/fields.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/fields.go new file mode 100644 index 00000000..ed6c7a37 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/fields.go @@ -0,0 +1,194 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2017 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql" + "reflect" +) + +func (mf *mysqlField) typeDatabaseName() string { + switch mf.fieldType { + case fieldTypeBit: + return "BIT" + case fieldTypeBLOB: + if mf.charSet != collations[binaryCollation] { + return "TEXT" + } + return "BLOB" + case fieldTypeDate: + return "DATE" + case fieldTypeDateTime: + return "DATETIME" + case fieldTypeDecimal: + return "DECIMAL" + case fieldTypeDouble: + return "DOUBLE" + case fieldTypeEnum: + return "ENUM" + case fieldTypeFloat: + return "FLOAT" + case fieldTypeGeometry: + return "GEOMETRY" + case fieldTypeInt24: + return "MEDIUMINT" + case fieldTypeJSON: + return "JSON" + case fieldTypeLong: + return "INT" + case fieldTypeLongBLOB: + if mf.charSet != collations[binaryCollation] { + return "LONGTEXT" + } + return "LONGBLOB" + case fieldTypeLongLong: + return "BIGINT" + case fieldTypeMediumBLOB: + if mf.charSet != collations[binaryCollation] { + return "MEDIUMTEXT" + } + return "MEDIUMBLOB" + case fieldTypeNewDate: + return "DATE" + case fieldTypeNewDecimal: + return "DECIMAL" + case fieldTypeNULL: + return "NULL" + case fieldTypeSet: + return "SET" + case fieldTypeShort: + return "SMALLINT" + case fieldTypeString: + if mf.charSet == collations[binaryCollation] { + return "BINARY" + } + return "CHAR" + case fieldTypeTime: + return "TIME" + case fieldTypeTimestamp: + return "TIMESTAMP" + case fieldTypeTiny: + return "TINYINT" + case fieldTypeTinyBLOB: + if mf.charSet != collations[binaryCollation] { + return "TINYTEXT" + } + return "TINYBLOB" + case fieldTypeVarChar: + if mf.charSet == collations[binaryCollation] { + return "VARBINARY" + } + return "VARCHAR" + case fieldTypeVarString: + if mf.charSet == collations[binaryCollation] { + return "VARBINARY" + } + return "VARCHAR" + case fieldTypeYear: + return "YEAR" + default: + return "" + } +} + +var ( + scanTypeFloat32 = reflect.TypeOf(float32(0)) + scanTypeFloat64 = reflect.TypeOf(float64(0)) + scanTypeInt8 = reflect.TypeOf(int8(0)) + scanTypeInt16 = reflect.TypeOf(int16(0)) + scanTypeInt32 = reflect.TypeOf(int32(0)) + scanTypeInt64 = reflect.TypeOf(int64(0)) + scanTypeNullFloat = reflect.TypeOf(sql.NullFloat64{}) + scanTypeNullInt = reflect.TypeOf(sql.NullInt64{}) + scanTypeNullTime = reflect.TypeOf(nullTime{}) + scanTypeUint8 = reflect.TypeOf(uint8(0)) + scanTypeUint16 = reflect.TypeOf(uint16(0)) + scanTypeUint32 = reflect.TypeOf(uint32(0)) + scanTypeUint64 = reflect.TypeOf(uint64(0)) + scanTypeRawBytes = reflect.TypeOf(sql.RawBytes{}) + scanTypeUnknown = reflect.TypeOf(new(interface{})) +) + +type mysqlField struct { + tableName string + name string + length uint32 + flags fieldFlag + fieldType fieldType + decimals byte + charSet uint8 +} + +func (mf *mysqlField) scanType() reflect.Type { + switch mf.fieldType { + case fieldTypeTiny: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint8 + } + return scanTypeInt8 + } + return scanTypeNullInt + + case fieldTypeShort, fieldTypeYear: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint16 + } + return scanTypeInt16 + } + return scanTypeNullInt + + case fieldTypeInt24, fieldTypeLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint32 + } + return scanTypeInt32 + } + return scanTypeNullInt + + case fieldTypeLongLong: + if mf.flags&flagNotNULL != 0 { + if mf.flags&flagUnsigned != 0 { + return scanTypeUint64 + } + return scanTypeInt64 + } + return scanTypeNullInt + + case fieldTypeFloat: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat32 + } + return scanTypeNullFloat + + case fieldTypeDouble: + if mf.flags&flagNotNULL != 0 { + return scanTypeFloat64 + } + return scanTypeNullFloat + + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON, + fieldTypeTime: + return scanTypeRawBytes + + case fieldTypeDate, fieldTypeNewDate, + fieldTypeTimestamp, fieldTypeDateTime: + // NullTime is always returned for more consistent behavior as it can + // handle both cases of parseTime regardless if the field is nullable. + return scanTypeNullTime + + default: + return scanTypeUnknown + } +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/fuzz.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/fuzz.go new file mode 100644 index 00000000..fa75adf6 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/fuzz.go @@ -0,0 +1,24 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package. +// +// Copyright 2020 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build gofuzz + +package mysql + +import ( + "database/sql" +) + +func Fuzz(data []byte) int { + db, err := sql.Open("mysql", string(data)) + if err != nil { + return 0 + } + db.Close() + return 1 +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/go.mod b/terraform-server/vendor/github.com/go-sql-driver/mysql/go.mod new file mode 100644 index 00000000..fffbf6a9 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/go.mod @@ -0,0 +1,3 @@ +module github.com/go-sql-driver/mysql + +go 1.10 diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/infile.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/infile.go new file mode 100644 index 00000000..60effdfc --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/infile.go @@ -0,0 +1,182 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "fmt" + "io" + "os" + "strings" + "sync" +) + +var ( + fileRegister map[string]bool + fileRegisterLock sync.RWMutex + readerRegister map[string]func() io.Reader + readerRegisterLock sync.RWMutex +) + +// RegisterLocalFile adds the given file to the file allowlist, +// so that it can be used by "LOAD DATA LOCAL INFILE ". +// Alternatively you can allow the use of all local files with +// the DSN parameter 'allowAllFiles=true' +// +// filePath := "/home/gopher/data.csv" +// mysql.RegisterLocalFile(filePath) +// err := db.Exec("LOAD DATA LOCAL INFILE '" + filePath + "' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterLocalFile(filePath string) { + fileRegisterLock.Lock() + // lazy map init + if fileRegister == nil { + fileRegister = make(map[string]bool) + } + + fileRegister[strings.Trim(filePath, `"`)] = true + fileRegisterLock.Unlock() +} + +// DeregisterLocalFile removes the given filepath from the allowlist. +func DeregisterLocalFile(filePath string) { + fileRegisterLock.Lock() + delete(fileRegister, strings.Trim(filePath, `"`)) + fileRegisterLock.Unlock() +} + +// RegisterReaderHandler registers a handler function which is used +// to receive a io.Reader. +// The Reader can be used by "LOAD DATA LOCAL INFILE Reader::". +// If the handler returns a io.ReadCloser Close() is called when the +// request is finished. +// +// mysql.RegisterReaderHandler("data", func() io.Reader { +// var csvReader io.Reader // Some Reader that returns CSV data +// ... // Open Reader here +// return csvReader +// }) +// err := db.Exec("LOAD DATA LOCAL INFILE 'Reader::data' INTO TABLE foo") +// if err != nil { +// ... +// +func RegisterReaderHandler(name string, handler func() io.Reader) { + readerRegisterLock.Lock() + // lazy map init + if readerRegister == nil { + readerRegister = make(map[string]func() io.Reader) + } + + readerRegister[name] = handler + readerRegisterLock.Unlock() +} + +// DeregisterReaderHandler removes the ReaderHandler function with +// the given name from the registry. +func DeregisterReaderHandler(name string) { + readerRegisterLock.Lock() + delete(readerRegister, name) + readerRegisterLock.Unlock() +} + +func deferredClose(err *error, closer io.Closer) { + closeErr := closer.Close() + if *err == nil { + *err = closeErr + } +} + +func (mc *mysqlConn) handleInFileRequest(name string) (err error) { + var rdr io.Reader + var data []byte + packetSize := 16 * 1024 // 16KB is small enough for disk readahead and large enough for TCP + if mc.maxWriteSize < packetSize { + packetSize = mc.maxWriteSize + } + + if idx := strings.Index(name, "Reader::"); idx == 0 || (idx > 0 && name[idx-1] == '/') { // io.Reader + // The server might return an an absolute path. See issue #355. + name = name[idx+8:] + + readerRegisterLock.RLock() + handler, inMap := readerRegister[name] + readerRegisterLock.RUnlock() + + if inMap { + rdr = handler() + if rdr != nil { + if cl, ok := rdr.(io.Closer); ok { + defer deferredClose(&err, cl) + } + } else { + err = fmt.Errorf("Reader '%s' is ", name) + } + } else { + err = fmt.Errorf("Reader '%s' is not registered", name) + } + } else { // File + name = strings.Trim(name, `"`) + fileRegisterLock.RLock() + fr := fileRegister[name] + fileRegisterLock.RUnlock() + if mc.cfg.AllowAllFiles || fr { + var file *os.File + var fi os.FileInfo + + if file, err = os.Open(name); err == nil { + defer deferredClose(&err, file) + + // get file size + if fi, err = file.Stat(); err == nil { + rdr = file + if fileSize := int(fi.Size()); fileSize < packetSize { + packetSize = fileSize + } + } + } + } else { + err = fmt.Errorf("local file '%s' is not registered", name) + } + } + + // send content packets + // if packetSize == 0, the Reader contains no data + if err == nil && packetSize > 0 { + data := make([]byte, 4+packetSize) + var n int + for err == nil { + n, err = rdr.Read(data[4:]) + if n > 0 { + if ioErr := mc.writePacket(data[:4+n]); ioErr != nil { + return ioErr + } + } + } + if err == io.EOF { + err = nil + } + } + + // send empty packet (termination) + if data == nil { + data = make([]byte, 4) + } + if ioErr := mc.writePacket(data[:4]); ioErr != nil { + return ioErr + } + + // read OK packet + if err == nil { + return mc.readResultOK() + } + + mc.readPacket() + return err +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime.go new file mode 100644 index 00000000..651723a9 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime.go @@ -0,0 +1,50 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "fmt" + "time" +) + +// Scan implements the Scanner interface. +// The value type must be time.Time or string / []byte (formatted time-string), +// otherwise Scan fails. +func (nt *NullTime) Scan(value interface{}) (err error) { + if value == nil { + nt.Time, nt.Valid = time.Time{}, false + return + } + + switch v := value.(type) { + case time.Time: + nt.Time, nt.Valid = v, true + return + case []byte: + nt.Time, err = parseDateTime(v, time.UTC) + nt.Valid = (err == nil) + return + case string: + nt.Time, err = parseDateTime([]byte(v), time.UTC) + nt.Valid = (err == nil) + return + } + + nt.Valid = false + return fmt.Errorf("Can't convert %T to time.Time", value) +} + +// Value implements the driver Valuer interface. +func (nt NullTime) Value() (driver.Value, error) { + if !nt.Valid { + return nil, nil + } + return nt.Time, nil +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go new file mode 100644 index 00000000..453b4b39 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime_go113.go @@ -0,0 +1,40 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build go1.13 + +package mysql + +import ( + "database/sql" +) + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +// +// Deprecated: NullTime doesn't honor the loc DSN parameter. +// NullTime.Scan interprets a time as UTC, not the loc DSN parameter. +// Use sql.NullTime instead. +type NullTime sql.NullTime + +// for internal use. +// the mysql package uses sql.NullTime if it is available. +// if not, the package uses mysql.NullTime. +type nullTime = sql.NullTime // sql.NullTime is available diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go new file mode 100644 index 00000000..9f7ae27a --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/nulltime_legacy.go @@ -0,0 +1,39 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2013 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +// +build !go1.13 + +package mysql + +import ( + "time" +) + +// NullTime represents a time.Time that may be NULL. +// NullTime implements the Scanner interface so +// it can be used as a scan destination: +// +// var nt NullTime +// err := db.QueryRow("SELECT time FROM foo WHERE id=?", id).Scan(&nt) +// ... +// if nt.Valid { +// // use nt.Time +// } else { +// // NULL value +// } +// +// This NullTime implementation is not driver-specific +type NullTime struct { + Time time.Time + Valid bool // Valid is true if Time is not NULL +} + +// for internal use. +// the mysql package uses sql.NullTime if it is available. +// if not, the package uses mysql.NullTime. +type nullTime = NullTime // sql.NullTime is not available diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/packets.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/packets.go new file mode 100644 index 00000000..6664e5ae --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/packets.go @@ -0,0 +1,1349 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "bytes" + "crypto/tls" + "database/sql/driver" + "encoding/binary" + "encoding/json" + "errors" + "fmt" + "io" + "math" + "time" +) + +// Packets documentation: +// http://dev.mysql.com/doc/internals/en/client-server-protocol.html + +// Read packet to buffer 'data' +func (mc *mysqlConn) readPacket() ([]byte, error) { + var prevData []byte + for { + // read packet header + data, err := mc.buf.readNext(4) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // packet length [24 bit] + pktLen := int(uint32(data[0]) | uint32(data[1])<<8 | uint32(data[2])<<16) + + // check packet sync [8 bit] + if data[3] != mc.sequence { + if data[3] > mc.sequence { + return nil, ErrPktSyncMul + } + return nil, ErrPktSync + } + mc.sequence++ + + // packets with length 0 terminate a previous packet which is a + // multiple of (2^24)-1 bytes long + if pktLen == 0 { + // there was no previous packet + if prevData == nil { + errLog.Print(ErrMalformPkt) + mc.Close() + return nil, ErrInvalidConn + } + + return prevData, nil + } + + // read packet body [pktLen bytes] + data, err = mc.buf.readNext(pktLen) + if err != nil { + if cerr := mc.canceled.Value(); cerr != nil { + return nil, cerr + } + errLog.Print(err) + mc.Close() + return nil, ErrInvalidConn + } + + // return data if this was the last packet + if pktLen < maxPacketSize { + // zero allocations for non-split packets + if prevData == nil { + return data, nil + } + + return append(prevData, data...), nil + } + + prevData = append(prevData, data...) + } +} + +// Write packet buffer 'data' +func (mc *mysqlConn) writePacket(data []byte) error { + pktLen := len(data) - 4 + + if pktLen > mc.maxAllowedPacket { + return ErrPktTooLarge + } + + // Perform a stale connection check. We only perform this check for + // the first query on a connection that has been checked out of the + // connection pool: a fresh connection from the pool is more likely + // to be stale, and it has not performed any previous writes that + // could cause data corruption, so it's safe to return ErrBadConn + // if the check fails. + if mc.reset { + mc.reset = false + conn := mc.netConn + if mc.rawConn != nil { + conn = mc.rawConn + } + var err error + // If this connection has a ReadTimeout which we've been setting on + // reads, reset it to its default value before we attempt a non-blocking + // read, otherwise the scheduler will just time us out before we can read + if mc.cfg.ReadTimeout != 0 { + err = conn.SetReadDeadline(time.Time{}) + } + if err == nil && mc.cfg.CheckConnLiveness { + err = connCheck(conn) + } + if err != nil { + errLog.Print("closing bad idle connection: ", err) + mc.Close() + return driver.ErrBadConn + } + } + + for { + var size int + if pktLen >= maxPacketSize { + data[0] = 0xff + data[1] = 0xff + data[2] = 0xff + size = maxPacketSize + } else { + data[0] = byte(pktLen) + data[1] = byte(pktLen >> 8) + data[2] = byte(pktLen >> 16) + size = pktLen + } + data[3] = mc.sequence + + // Write packet + if mc.writeTimeout > 0 { + if err := mc.netConn.SetWriteDeadline(time.Now().Add(mc.writeTimeout)); err != nil { + return err + } + } + + n, err := mc.netConn.Write(data[:4+size]) + if err == nil && n == 4+size { + mc.sequence++ + if size != maxPacketSize { + return nil + } + pktLen -= size + data = data[size:] + continue + } + + // Handle error + if err == nil { // n != len(data) + mc.cleanup() + errLog.Print(ErrMalformPkt) + } else { + if cerr := mc.canceled.Value(); cerr != nil { + return cerr + } + if n == 0 && pktLen == len(data)-4 { + // only for the first loop iteration when nothing was written yet + return errBadConnNoWrite + } + mc.cleanup() + errLog.Print(err) + } + return ErrInvalidConn + } +} + +/****************************************************************************** +* Initialization Process * +******************************************************************************/ + +// Handshake Initialization Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::Handshake +func (mc *mysqlConn) readHandshakePacket() (data []byte, plugin string, err error) { + data, err = mc.readPacket() + if err != nil { + // for init we can rewrite this to ErrBadConn for sql.Driver to retry, since + // in connection initialization we don't risk retrying non-idempotent actions. + if err == ErrInvalidConn { + return nil, "", driver.ErrBadConn + } + return + } + + if data[0] == iERR { + return nil, "", mc.handleErrorPacket(data) + } + + // protocol version [1 byte] + if data[0] < minProtocolVersion { + return nil, "", fmt.Errorf( + "unsupported protocol version %d. Version %d or higher is required", + data[0], + minProtocolVersion, + ) + } + + // server version [null terminated string] + // connection id [4 bytes] + pos := 1 + bytes.IndexByte(data[1:], 0x00) + 1 + 4 + + // first part of the password cipher [8 bytes] + authData := data[pos : pos+8] + + // (filler) always 0x00 [1 byte] + pos += 8 + 1 + + // capability flags (lower 2 bytes) [2 bytes] + mc.flags = clientFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + if mc.flags&clientProtocol41 == 0 { + return nil, "", ErrOldProtocol + } + if mc.flags&clientSSL == 0 && mc.cfg.tls != nil { + if mc.cfg.TLSConfig == "preferred" { + mc.cfg.tls = nil + } else { + return nil, "", ErrNoTLS + } + } + pos += 2 + + if len(data) > pos { + // character set [1 byte] + // status flags [2 bytes] + // capability flags (upper 2 bytes) [2 bytes] + // length of auth-plugin-data [1 byte] + // reserved (all [00]) [10 bytes] + pos += 1 + 2 + 2 + 1 + 10 + + // second part of the password cipher [mininum 13 bytes], + // where len=MAX(13, length of auth-plugin-data - 8) + // + // The web documentation is ambiguous about the length. However, + // according to mysql-5.7/sql/auth/sql_authentication.cc line 538, + // the 13th byte is "\0 byte, terminating the second part of + // a scramble". So the second part of the password cipher is + // a NULL terminated string that's at least 13 bytes with the + // last byte being NULL. + // + // The official Python library uses the fixed length 12 + // which seems to work but technically could have a hidden bug. + authData = append(authData, data[pos:pos+12]...) + pos += 13 + + // EOF if version (>= 5.5.7 and < 5.5.10) or (>= 5.6.0 and < 5.6.2) + // \NUL otherwise + if end := bytes.IndexByte(data[pos:], 0x00); end != -1 { + plugin = string(data[pos : pos+end]) + } else { + plugin = string(data[pos:]) + } + + // make a memory safe copy of the cipher slice + var b [20]byte + copy(b[:], authData) + return b[:], plugin, nil + } + + // make a memory safe copy of the cipher slice + var b [8]byte + copy(b[:], authData) + return b[:], plugin, nil +} + +// Client Authentication Packet +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::HandshakeResponse +func (mc *mysqlConn) writeHandshakeResponsePacket(authResp []byte, plugin string) error { + // Adjust client flags based on server support + clientFlags := clientProtocol41 | + clientSecureConn | + clientLongPassword | + clientTransactions | + clientLocalFiles | + clientPluginAuth | + clientMultiResults | + mc.flags&clientLongFlag + + if mc.cfg.ClientFoundRows { + clientFlags |= clientFoundRows + } + + // To enable TLS / SSL + if mc.cfg.tls != nil { + clientFlags |= clientSSL + } + + if mc.cfg.MultiStatements { + clientFlags |= clientMultiStatements + } + + // encode length of the auth plugin data + var authRespLEIBuf [9]byte + authRespLen := len(authResp) + authRespLEI := appendLengthEncodedInteger(authRespLEIBuf[:0], uint64(authRespLen)) + if len(authRespLEI) > 1 { + // if the length can not be written in 1 byte, it must be written as a + // length encoded integer + clientFlags |= clientPluginAuthLenEncClientData + } + + pktLen := 4 + 4 + 1 + 23 + len(mc.cfg.User) + 1 + len(authRespLEI) + len(authResp) + 21 + 1 + + // To specify a db name + if n := len(mc.cfg.DBName); n > 0 { + clientFlags |= clientConnectWithDB + pktLen += n + 1 + } + + // Calculate packet length and get buffer with that size + data, err := mc.buf.takeSmallBuffer(pktLen + 4) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // ClientFlags [32 bit] + data[4] = byte(clientFlags) + data[5] = byte(clientFlags >> 8) + data[6] = byte(clientFlags >> 16) + data[7] = byte(clientFlags >> 24) + + // MaxPacketSize [32 bit] (none) + data[8] = 0x00 + data[9] = 0x00 + data[10] = 0x00 + data[11] = 0x00 + + // Charset [1 byte] + var found bool + data[12], found = collations[mc.cfg.Collation] + if !found { + // Note possibility for false negatives: + // could be triggered although the collation is valid if the + // collations map does not contain entries the server supports. + return errors.New("unknown collation") + } + + // Filler [23 bytes] (all 0x00) + pos := 13 + for ; pos < 13+23; pos++ { + data[pos] = 0 + } + + // SSL Connection Request Packet + // http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::SSLRequest + if mc.cfg.tls != nil { + // Send TLS / SSL request packet + if err := mc.writePacket(data[:(4+4+1+23)+4]); err != nil { + return err + } + + // Switch to TLS + tlsConn := tls.Client(mc.netConn, mc.cfg.tls) + if err := tlsConn.Handshake(); err != nil { + return err + } + mc.rawConn = mc.netConn + mc.netConn = tlsConn + mc.buf.nc = tlsConn + } + + // User [null terminated string] + if len(mc.cfg.User) > 0 { + pos += copy(data[pos:], mc.cfg.User) + } + data[pos] = 0x00 + pos++ + + // Auth Data [length encoded integer] + pos += copy(data[pos:], authRespLEI) + pos += copy(data[pos:], authResp) + + // Databasename [null terminated string] + if len(mc.cfg.DBName) > 0 { + pos += copy(data[pos:], mc.cfg.DBName) + data[pos] = 0x00 + pos++ + } + + pos += copy(data[pos:], plugin) + data[pos] = 0x00 + pos++ + + // Send Auth packet + return mc.writePacket(data[:pos]) +} + +// http://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::AuthSwitchResponse +func (mc *mysqlConn) writeAuthSwitchPacket(authData []byte) error { + pktLen := 4 + len(authData) + data, err := mc.buf.takeSmallBuffer(pktLen) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add the auth data [EOF] + copy(data[4:], authData) + return mc.writePacket(data) +} + +/****************************************************************************** +* Command Packets * +******************************************************************************/ + +func (mc *mysqlConn) writeCommandPacket(command byte) error { + // Reset Packet Sequence + mc.sequence = 0 + + data, err := mc.buf.takeSmallBuffer(4 + 1) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketStr(command byte, arg string) error { + // Reset Packet Sequence + mc.sequence = 0 + + pktLen := 1 + len(arg) + data, err := mc.buf.takeBuffer(pktLen + 4) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg + copy(data[5:], arg) + + // Send CMD packet + return mc.writePacket(data) +} + +func (mc *mysqlConn) writeCommandPacketUint32(command byte, arg uint32) error { + // Reset Packet Sequence + mc.sequence = 0 + + data, err := mc.buf.takeSmallBuffer(4 + 1 + 4) + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // Add command byte + data[4] = command + + // Add arg [32 bit] + data[5] = byte(arg) + data[6] = byte(arg >> 8) + data[7] = byte(arg >> 16) + data[8] = byte(arg >> 24) + + // Send CMD packet + return mc.writePacket(data) +} + +/****************************************************************************** +* Result Packets * +******************************************************************************/ + +func (mc *mysqlConn) readAuthResult() ([]byte, string, error) { + data, err := mc.readPacket() + if err != nil { + return nil, "", err + } + + // packet indicator + switch data[0] { + + case iOK: + return nil, "", mc.handleOkPacket(data) + + case iAuthMoreData: + return data[1:], "", err + + case iEOF: + if len(data) == 1 { + // https://dev.mysql.com/doc/internals/en/connection-phase-packets.html#packet-Protocol::OldAuthSwitchRequest + return nil, "mysql_old_password", nil + } + pluginEndIndex := bytes.IndexByte(data, 0x00) + if pluginEndIndex < 0 { + return nil, "", ErrMalformPkt + } + plugin := string(data[1:pluginEndIndex]) + authData := data[pluginEndIndex+1:] + return authData, plugin, nil + + default: // Error otherwise + return nil, "", mc.handleErrorPacket(data) + } +} + +// Returns error if Packet is not an 'Result OK'-Packet +func (mc *mysqlConn) readResultOK() error { + data, err := mc.readPacket() + if err != nil { + return err + } + + if data[0] == iOK { + return mc.handleOkPacket(data) + } + return mc.handleErrorPacket(data) +} + +// Result Set Header Packet +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::Resultset +func (mc *mysqlConn) readResultSetHeaderPacket() (int, error) { + data, err := mc.readPacket() + if err == nil { + switch data[0] { + + case iOK: + return 0, mc.handleOkPacket(data) + + case iERR: + return 0, mc.handleErrorPacket(data) + + case iLocalInFile: + return 0, mc.handleInFileRequest(string(data[1:])) + } + + // column count + num, _, n := readLengthEncodedInteger(data) + if n-len(data) == 0 { + return int(num), nil + } + + return 0, ErrMalformPkt + } + return 0, err +} + +// Error Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-ERR_Packet +func (mc *mysqlConn) handleErrorPacket(data []byte) error { + if data[0] != iERR { + return ErrMalformPkt + } + + // 0xff [1 byte] + + // Error Number [16 bit uint] + errno := binary.LittleEndian.Uint16(data[1:3]) + + // 1792: ER_CANT_EXECUTE_IN_READ_ONLY_TRANSACTION + // 1290: ER_OPTION_PREVENTS_STATEMENT (returned by Aurora during failover) + if (errno == 1792 || errno == 1290) && mc.cfg.RejectReadOnly { + // Oops; we are connected to a read-only connection, and won't be able + // to issue any write statements. Since RejectReadOnly is configured, + // we throw away this connection hoping this one would have write + // permission. This is specifically for a possible race condition + // during failover (e.g. on AWS Aurora). See README.md for more. + // + // We explicitly close the connection before returning + // driver.ErrBadConn to ensure that `database/sql` purges this + // connection and initiates a new one for next statement next time. + mc.Close() + return driver.ErrBadConn + } + + pos := 3 + + // SQL State [optional: # + 5bytes string] + if data[3] == 0x23 { + //sqlstate := string(data[4 : 4+5]) + pos = 9 + } + + // Error Message [string] + return &MySQLError{ + Number: errno, + Message: string(data[pos:]), + } +} + +func readStatus(b []byte) statusFlag { + return statusFlag(b[0]) | statusFlag(b[1])<<8 +} + +// Ok Packet +// http://dev.mysql.com/doc/internals/en/generic-response-packets.html#packet-OK_Packet +func (mc *mysqlConn) handleOkPacket(data []byte) error { + var n, m int + + // 0x00 [1 byte] + + // Affected rows [Length Coded Binary] + mc.affectedRows, _, n = readLengthEncodedInteger(data[1:]) + + // Insert id [Length Coded Binary] + mc.insertId, _, m = readLengthEncodedInteger(data[1+n:]) + + // server_status [2 bytes] + mc.status = readStatus(data[1+n+m : 1+n+m+2]) + if mc.status&statusMoreResultsExists != 0 { + return nil + } + + // warning count [2 bytes] + + return nil +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-Protocol::ColumnDefinition41 +func (mc *mysqlConn) readColumns(count int) ([]mysqlField, error) { + columns := make([]mysqlField, count) + + for i := 0; ; i++ { + data, err := mc.readPacket() + if err != nil { + return nil, err + } + + // EOF Packet + if data[0] == iEOF && (len(data) == 5 || len(data) == 1) { + if i == count { + return columns, nil + } + return nil, fmt.Errorf("column count mismatch n:%d len:%d", count, len(columns)) + } + + // Catalog + pos, err := skipLengthEncodedString(data) + if err != nil { + return nil, err + } + + // Database [len coded string] + n, err := skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Table [len coded string] + if mc.cfg.ColumnsWithAlias { + tableName, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + columns[i].tableName = string(tableName) + } else { + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + } + + // Original table [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Name [len coded string] + name, _, n, err := readLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + columns[i].name = string(name) + pos += n + + // Original name [len coded string] + n, err = skipLengthEncodedString(data[pos:]) + if err != nil { + return nil, err + } + pos += n + + // Filler [uint8] + pos++ + + // Charset [charset, collation uint8] + columns[i].charSet = data[pos] + pos += 2 + + // Length [uint32] + columns[i].length = binary.LittleEndian.Uint32(data[pos : pos+4]) + pos += 4 + + // Field type [uint8] + columns[i].fieldType = fieldType(data[pos]) + pos++ + + // Flags [uint16] + columns[i].flags = fieldFlag(binary.LittleEndian.Uint16(data[pos : pos+2])) + pos += 2 + + // Decimals [uint8] + columns[i].decimals = data[pos] + //pos++ + + // Default value [len coded binary] + //if pos < len(data) { + // defaultVal, _, err = bytesToLengthCodedBinary(data[pos:]) + //} + } +} + +// Read Packets as Field Packets until EOF-Packet or an Error appears +// http://dev.mysql.com/doc/internals/en/com-query-response.html#packet-ProtocolText::ResultsetRow +func (rows *textRows) readRow(dest []driver.Value) error { + mc := rows.mc + + if rows.rs.done { + return io.EOF + } + + data, err := mc.readPacket() + if err != nil { + return err + } + + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + // server_status [2 bytes] + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + if data[0] == iERR { + rows.mc = nil + return mc.handleErrorPacket(data) + } + + // RowSet Packet + var n int + var isNull bool + pos := 0 + + for i := range dest { + // Read bytes and convert to string + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + if !mc.parseTime { + continue + } else { + switch rows.rs.columns[i].fieldType { + case fieldTypeTimestamp, fieldTypeDateTime, + fieldTypeDate, fieldTypeNewDate: + dest[i], err = parseDateTime( + dest[i].([]byte), + mc.cfg.Loc, + ) + if err == nil { + continue + } + default: + continue + } + } + + } else { + dest[i] = nil + continue + } + } + return err // err != nil + } + + return nil +} + +// Reads Packets until EOF-Packet or an Error appears. Returns count of Packets read +func (mc *mysqlConn) readUntilEOF() error { + for { + data, err := mc.readPacket() + if err != nil { + return err + } + + switch data[0] { + case iERR: + return mc.handleErrorPacket(data) + case iEOF: + if len(data) == 5 { + mc.status = readStatus(data[3:]) + } + return nil + } + } +} + +/****************************************************************************** +* Prepared Statements * +******************************************************************************/ + +// Prepare Result Packets +// http://dev.mysql.com/doc/internals/en/com-stmt-prepare-response.html +func (stmt *mysqlStmt) readPrepareResultPacket() (uint16, error) { + data, err := stmt.mc.readPacket() + if err == nil { + // packet indicator [1 byte] + if data[0] != iOK { + return 0, stmt.mc.handleErrorPacket(data) + } + + // statement id [4 bytes] + stmt.id = binary.LittleEndian.Uint32(data[1:5]) + + // Column count [16 bit uint] + columnCount := binary.LittleEndian.Uint16(data[5:7]) + + // Param count [16 bit uint] + stmt.paramCount = int(binary.LittleEndian.Uint16(data[7:9])) + + // Reserved [8 bit] + + // Warning count [16 bit uint] + + return columnCount, nil + } + return 0, err +} + +// http://dev.mysql.com/doc/internals/en/com-stmt-send-long-data.html +func (stmt *mysqlStmt) writeCommandLongData(paramID int, arg []byte) error { + maxLen := stmt.mc.maxAllowedPacket - 1 + pktLen := maxLen + + // After the header (bytes 0-3) follows before the data: + // 1 byte command + // 4 bytes stmtID + // 2 bytes paramID + const dataOffset = 1 + 4 + 2 + + // Cannot use the write buffer since + // a) the buffer is too small + // b) it is in use + data := make([]byte, 4+1+4+2+len(arg)) + + copy(data[4+dataOffset:], arg) + + for argLen := len(arg); argLen > 0; argLen -= pktLen - dataOffset { + if dataOffset+argLen < maxLen { + pktLen = dataOffset + argLen + } + + stmt.mc.sequence = 0 + // Add command byte [1 byte] + data[4] = comStmtSendLongData + + // Add stmtID [32 bit] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // Add paramID [16 bit] + data[9] = byte(paramID) + data[10] = byte(paramID >> 8) + + // Send CMD packet + err := stmt.mc.writePacket(data[:4+pktLen]) + if err == nil { + data = data[pktLen-dataOffset:] + continue + } + return err + + } + + // Reset Packet Sequence + stmt.mc.sequence = 0 + return nil +} + +// Execute Prepared Statement +// http://dev.mysql.com/doc/internals/en/com-stmt-execute.html +func (stmt *mysqlStmt) writeExecutePacket(args []driver.Value) error { + if len(args) != stmt.paramCount { + return fmt.Errorf( + "argument count mismatch (got: %d; has: %d)", + len(args), + stmt.paramCount, + ) + } + + const minPktLen = 4 + 1 + 4 + 1 + 4 + mc := stmt.mc + + // Determine threshold dynamically to avoid packet size shortage. + longDataSize := mc.maxAllowedPacket / (stmt.paramCount + 1) + if longDataSize < 64 { + longDataSize = 64 + } + + // Reset packet-sequence + mc.sequence = 0 + + var data []byte + var err error + + if len(args) == 0 { + data, err = mc.buf.takeBuffer(minPktLen) + } else { + data, err = mc.buf.takeCompleteBuffer() + // In this case the len(data) == cap(data) which is used to optimise the flow below. + } + if err != nil { + // cannot take the buffer. Something must be wrong with the connection + errLog.Print(err) + return errBadConnNoWrite + } + + // command [1 byte] + data[4] = comStmtExecute + + // statement_id [4 bytes] + data[5] = byte(stmt.id) + data[6] = byte(stmt.id >> 8) + data[7] = byte(stmt.id >> 16) + data[8] = byte(stmt.id >> 24) + + // flags (0: CURSOR_TYPE_NO_CURSOR) [1 byte] + data[9] = 0x00 + + // iteration_count (uint32(1)) [4 bytes] + data[10] = 0x01 + data[11] = 0x00 + data[12] = 0x00 + data[13] = 0x00 + + if len(args) > 0 { + pos := minPktLen + + var nullMask []byte + if maskLen, typesLen := (len(args)+7)/8, 1+2*len(args); pos+maskLen+typesLen >= cap(data) { + // buffer has to be extended but we don't know by how much so + // we depend on append after all data with known sizes fit. + // We stop at that because we deal with a lot of columns here + // which makes the required allocation size hard to guess. + tmp := make([]byte, pos+maskLen+typesLen) + copy(tmp[:pos], data[:pos]) + data = tmp + nullMask = data[pos : pos+maskLen] + // No need to clean nullMask as make ensures that. + pos += maskLen + } else { + nullMask = data[pos : pos+maskLen] + for i := range nullMask { + nullMask[i] = 0 + } + pos += maskLen + } + + // newParameterBoundFlag 1 [1 byte] + data[pos] = 0x01 + pos++ + + // type of each parameter [len(args)*2 bytes] + paramTypes := data[pos:] + pos += len(args) * 2 + + // value of each parameter [n bytes] + paramValues := data[pos:pos] + valuesCap := cap(paramValues) + + for i, arg := range args { + // build NULL-bitmap + if arg == nil { + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + continue + } + + if v, ok := arg.(json.RawMessage); ok { + arg = []byte(v) + } + // cache types and values + switch v := arg.(type) { + case int64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case uint64: + paramTypes[i+i] = byte(fieldTypeLongLong) + paramTypes[i+i+1] = 0x80 // type is unsigned + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + uint64(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(uint64(v))..., + ) + } + + case float64: + paramTypes[i+i] = byte(fieldTypeDouble) + paramTypes[i+i+1] = 0x00 + + if cap(paramValues)-len(paramValues)-8 >= 0 { + paramValues = paramValues[:len(paramValues)+8] + binary.LittleEndian.PutUint64( + paramValues[len(paramValues)-8:], + math.Float64bits(v), + ) + } else { + paramValues = append(paramValues, + uint64ToBytes(math.Float64bits(v))..., + ) + } + + case bool: + paramTypes[i+i] = byte(fieldTypeTiny) + paramTypes[i+i+1] = 0x00 + + if v { + paramValues = append(paramValues, 0x01) + } else { + paramValues = append(paramValues, 0x00) + } + + case []byte: + // Common case (non-nil value) first + if v != nil { + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < longDataSize { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, v); err != nil { + return err + } + } + continue + } + + // Handle []byte(nil) as a NULL value + nullMask[i/8] |= 1 << (uint(i) & 7) + paramTypes[i+i] = byte(fieldTypeNULL) + paramTypes[i+i+1] = 0x00 + + case string: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + if len(v) < longDataSize { + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(v)), + ) + paramValues = append(paramValues, v...) + } else { + if err := stmt.writeCommandLongData(i, []byte(v)); err != nil { + return err + } + } + + case time.Time: + paramTypes[i+i] = byte(fieldTypeString) + paramTypes[i+i+1] = 0x00 + + var a [64]byte + var b = a[:0] + + if v.IsZero() { + b = append(b, "0000-00-00"...) + } else { + b, err = appendDateTime(b, v.In(mc.cfg.Loc)) + if err != nil { + return err + } + } + + paramValues = appendLengthEncodedInteger(paramValues, + uint64(len(b)), + ) + paramValues = append(paramValues, b...) + + default: + return fmt.Errorf("cannot convert type: %T", arg) + } + } + + // Check if param values exceeded the available buffer + // In that case we must build the data packet with the new values buffer + if valuesCap != cap(paramValues) { + data = append(data[:pos], paramValues...) + if err = mc.buf.store(data); err != nil { + errLog.Print(err) + return errBadConnNoWrite + } + } + + pos += len(paramValues) + data = data[:pos] + } + + return mc.writePacket(data) +} + +func (mc *mysqlConn) discardResults() error { + for mc.status&statusMoreResultsExists != 0 { + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return err + } + if resLen > 0 { + // columns + if err := mc.readUntilEOF(); err != nil { + return err + } + // rows + if err := mc.readUntilEOF(); err != nil { + return err + } + } + } + return nil +} + +// http://dev.mysql.com/doc/internals/en/binary-protocol-resultset-row.html +func (rows *binaryRows) readRow(dest []driver.Value) error { + data, err := rows.mc.readPacket() + if err != nil { + return err + } + + // packet indicator [1 byte] + if data[0] != iOK { + // EOF Packet + if data[0] == iEOF && len(data) == 5 { + rows.mc.status = readStatus(data[3:]) + rows.rs.done = true + if !rows.HasNextResultSet() { + rows.mc = nil + } + return io.EOF + } + mc := rows.mc + rows.mc = nil + + // Error otherwise + return mc.handleErrorPacket(data) + } + + // NULL-bitmap, [(column-count + 7 + 2) / 8 bytes] + pos := 1 + (len(dest)+7+2)>>3 + nullMask := data[1:pos] + + for i := range dest { + // Field is NULL + // (byte >> bit-pos) % 2 == 1 + if ((nullMask[(i+2)>>3] >> uint((i+2)&7)) & 1) == 1 { + dest[i] = nil + continue + } + + // Convert to byte-coded string + switch rows.rs.columns[i].fieldType { + case fieldTypeNULL: + dest[i] = nil + continue + + // Numeric Types + case fieldTypeTiny: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(data[pos]) + } else { + dest[i] = int64(int8(data[pos])) + } + pos++ + continue + + case fieldTypeShort, fieldTypeYear: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint16(data[pos : pos+2])) + } else { + dest[i] = int64(int16(binary.LittleEndian.Uint16(data[pos : pos+2]))) + } + pos += 2 + continue + + case fieldTypeInt24, fieldTypeLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + dest[i] = int64(binary.LittleEndian.Uint32(data[pos : pos+4])) + } else { + dest[i] = int64(int32(binary.LittleEndian.Uint32(data[pos : pos+4]))) + } + pos += 4 + continue + + case fieldTypeLongLong: + if rows.rs.columns[i].flags&flagUnsigned != 0 { + val := binary.LittleEndian.Uint64(data[pos : pos+8]) + if val > math.MaxInt64 { + dest[i] = uint64ToString(val) + } else { + dest[i] = int64(val) + } + } else { + dest[i] = int64(binary.LittleEndian.Uint64(data[pos : pos+8])) + } + pos += 8 + continue + + case fieldTypeFloat: + dest[i] = math.Float32frombits(binary.LittleEndian.Uint32(data[pos : pos+4])) + pos += 4 + continue + + case fieldTypeDouble: + dest[i] = math.Float64frombits(binary.LittleEndian.Uint64(data[pos : pos+8])) + pos += 8 + continue + + // Length coded Binary Strings + case fieldTypeDecimal, fieldTypeNewDecimal, fieldTypeVarChar, + fieldTypeBit, fieldTypeEnum, fieldTypeSet, fieldTypeTinyBLOB, + fieldTypeMediumBLOB, fieldTypeLongBLOB, fieldTypeBLOB, + fieldTypeVarString, fieldTypeString, fieldTypeGeometry, fieldTypeJSON: + var isNull bool + var n int + dest[i], isNull, n, err = readLengthEncodedString(data[pos:]) + pos += n + if err == nil { + if !isNull { + continue + } else { + dest[i] = nil + continue + } + } + return err + + case + fieldTypeDate, fieldTypeNewDate, // Date YYYY-MM-DD + fieldTypeTime, // Time [-][H]HH:MM:SS[.fractal] + fieldTypeTimestamp, fieldTypeDateTime: // Timestamp YYYY-MM-DD HH:MM:SS[.fractal] + + num, isNull, n := readLengthEncodedInteger(data[pos:]) + pos += n + + switch { + case isNull: + dest[i] = nil + continue + case rows.rs.columns[i].fieldType == fieldTypeTime: + // database/sql does not support an equivalent to TIME, return a string + var dstlen uint8 + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 8 + case 1, 2, 3, 4, 5, 6: + dstlen = 8 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + dest[i], err = formatBinaryTime(data[pos:pos+int(num)], dstlen) + case rows.mc.parseTime: + dest[i], err = parseBinaryDateTime(num, data[pos:], rows.mc.cfg.Loc) + default: + var dstlen uint8 + if rows.rs.columns[i].fieldType == fieldTypeDate { + dstlen = 10 + } else { + switch decimals := rows.rs.columns[i].decimals; decimals { + case 0x00, 0x1f: + dstlen = 19 + case 1, 2, 3, 4, 5, 6: + dstlen = 19 + 1 + decimals + default: + return fmt.Errorf( + "protocol error, illegal decimals value %d", + rows.rs.columns[i].decimals, + ) + } + } + dest[i], err = formatBinaryDateTime(data[pos:pos+int(num)], dstlen) + } + + if err == nil { + pos += int(num) + continue + } else { + return err + } + + // Please report if this happens! + default: + return fmt.Errorf("unknown field type %d", rows.rs.columns[i].fieldType) + } + } + + return nil +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/result.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/result.go new file mode 100644 index 00000000..c6438d03 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/result.go @@ -0,0 +1,22 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlResult struct { + affectedRows int64 + insertId int64 +} + +func (res *mysqlResult) LastInsertId() (int64, error) { + return res.insertId, nil +} + +func (res *mysqlResult) RowsAffected() (int64, error) { + return res.affectedRows, nil +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/rows.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/rows.go new file mode 100644 index 00000000..888bdb5f --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/rows.go @@ -0,0 +1,223 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "io" + "math" + "reflect" +) + +type resultSet struct { + columns []mysqlField + columnNames []string + done bool +} + +type mysqlRows struct { + mc *mysqlConn + rs resultSet + finish func() +} + +type binaryRows struct { + mysqlRows +} + +type textRows struct { + mysqlRows +} + +func (rows *mysqlRows) Columns() []string { + if rows.rs.columnNames != nil { + return rows.rs.columnNames + } + + columns := make([]string, len(rows.rs.columns)) + if rows.mc != nil && rows.mc.cfg.ColumnsWithAlias { + for i := range columns { + if tableName := rows.rs.columns[i].tableName; len(tableName) > 0 { + columns[i] = tableName + "." + rows.rs.columns[i].name + } else { + columns[i] = rows.rs.columns[i].name + } + } + } else { + for i := range columns { + columns[i] = rows.rs.columns[i].name + } + } + + rows.rs.columnNames = columns + return columns +} + +func (rows *mysqlRows) ColumnTypeDatabaseTypeName(i int) string { + return rows.rs.columns[i].typeDatabaseName() +} + +// func (rows *mysqlRows) ColumnTypeLength(i int) (length int64, ok bool) { +// return int64(rows.rs.columns[i].length), true +// } + +func (rows *mysqlRows) ColumnTypeNullable(i int) (nullable, ok bool) { + return rows.rs.columns[i].flags&flagNotNULL == 0, true +} + +func (rows *mysqlRows) ColumnTypePrecisionScale(i int) (int64, int64, bool) { + column := rows.rs.columns[i] + decimals := int64(column.decimals) + + switch column.fieldType { + case fieldTypeDecimal, fieldTypeNewDecimal: + if decimals > 0 { + return int64(column.length) - 2, decimals, true + } + return int64(column.length) - 1, decimals, true + case fieldTypeTimestamp, fieldTypeDateTime, fieldTypeTime: + return decimals, decimals, true + case fieldTypeFloat, fieldTypeDouble: + if decimals == 0x1f { + return math.MaxInt64, math.MaxInt64, true + } + return math.MaxInt64, decimals, true + } + + return 0, 0, false +} + +func (rows *mysqlRows) ColumnTypeScanType(i int) reflect.Type { + return rows.rs.columns[i].scanType() +} + +func (rows *mysqlRows) Close() (err error) { + if f := rows.finish; f != nil { + f() + rows.finish = nil + } + + mc := rows.mc + if mc == nil { + return nil + } + if err := mc.error(); err != nil { + return err + } + + // flip the buffer for this connection if we need to drain it. + // note that for a successful query (i.e. one where rows.next() + // has been called until it returns false), `rows.mc` will be nil + // by the time the user calls `(*Rows).Close`, so we won't reach this + // see: https://github.com/golang/go/commit/651ddbdb5056ded455f47f9c494c67b389622a47 + mc.buf.flip() + + // Remove unread packets from stream + if !rows.rs.done { + err = mc.readUntilEOF() + } + if err == nil { + if err = mc.discardResults(); err != nil { + return err + } + } + + rows.mc = nil + return err +} + +func (rows *mysqlRows) HasNextResultSet() (b bool) { + if rows.mc == nil { + return false + } + return rows.mc.status&statusMoreResultsExists != 0 +} + +func (rows *mysqlRows) nextResultSet() (int, error) { + if rows.mc == nil { + return 0, io.EOF + } + if err := rows.mc.error(); err != nil { + return 0, err + } + + // Remove unread packets from stream + if !rows.rs.done { + if err := rows.mc.readUntilEOF(); err != nil { + return 0, err + } + rows.rs.done = true + } + + if !rows.HasNextResultSet() { + rows.mc = nil + return 0, io.EOF + } + rows.rs = resultSet{} + return rows.mc.readResultSetHeaderPacket() +} + +func (rows *mysqlRows) nextNotEmptyResultSet() (int, error) { + for { + resLen, err := rows.nextResultSet() + if err != nil { + return 0, err + } + + if resLen > 0 { + return resLen, nil + } + + rows.rs.done = true + } +} + +func (rows *binaryRows) NextResultSet() error { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *binaryRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} + +func (rows *textRows) NextResultSet() (err error) { + resLen, err := rows.nextNotEmptyResultSet() + if err != nil { + return err + } + + rows.rs.columns, err = rows.mc.readColumns(resLen) + return err +} + +func (rows *textRows) Next(dest []driver.Value) error { + if mc := rows.mc; mc != nil { + if err := mc.error(); err != nil { + return err + } + + // Fetch next row from stream + return rows.readRow(dest) + } + return io.EOF +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/statement.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/statement.go new file mode 100644 index 00000000..18a3ae49 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/statement.go @@ -0,0 +1,220 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "database/sql/driver" + "encoding/json" + "fmt" + "io" + "reflect" +) + +type mysqlStmt struct { + mc *mysqlConn + id uint32 + paramCount int +} + +func (stmt *mysqlStmt) Close() error { + if stmt.mc == nil || stmt.mc.closed.IsSet() { + // driver.Stmt.Close can be called more than once, thus this function + // has to be idempotent. + // See also Issue #450 and golang/go#16019. + //errLog.Print(ErrInvalidConn) + return driver.ErrBadConn + } + + err := stmt.mc.writeCommandPacketUint32(comStmtClose, stmt.id) + stmt.mc = nil + return err +} + +func (stmt *mysqlStmt) NumInput() int { + return stmt.paramCount +} + +func (stmt *mysqlStmt) ColumnConverter(idx int) driver.ValueConverter { + return converter{} +} + +func (stmt *mysqlStmt) CheckNamedValue(nv *driver.NamedValue) (err error) { + nv.Value, err = converter{}.ConvertValue(nv.Value) + return +} + +func (stmt *mysqlStmt) Exec(args []driver.Value) (driver.Result, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + mc.affectedRows = 0 + mc.insertId = 0 + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + if resLen > 0 { + // Columns + if err = mc.readUntilEOF(); err != nil { + return nil, err + } + + // Rows + if err := mc.readUntilEOF(); err != nil { + return nil, err + } + } + + if err := mc.discardResults(); err != nil { + return nil, err + } + + return &mysqlResult{ + affectedRows: int64(mc.affectedRows), + insertId: int64(mc.insertId), + }, nil +} + +func (stmt *mysqlStmt) Query(args []driver.Value) (driver.Rows, error) { + return stmt.query(args) +} + +func (stmt *mysqlStmt) query(args []driver.Value) (*binaryRows, error) { + if stmt.mc.closed.IsSet() { + errLog.Print(ErrInvalidConn) + return nil, driver.ErrBadConn + } + // Send command + err := stmt.writeExecutePacket(args) + if err != nil { + return nil, stmt.mc.markBadConn(err) + } + + mc := stmt.mc + + // Read Result + resLen, err := mc.readResultSetHeaderPacket() + if err != nil { + return nil, err + } + + rows := new(binaryRows) + + if resLen > 0 { + rows.mc = mc + rows.rs.columns, err = mc.readColumns(resLen) + } else { + rows.rs.done = true + + switch err := rows.NextResultSet(); err { + case nil, io.EOF: + return rows, nil + default: + return nil, err + } + } + + return rows, err +} + +var jsonType = reflect.TypeOf(json.RawMessage{}) + +type converter struct{} + +// ConvertValue mirrors the reference/default converter in database/sql/driver +// with _one_ exception. We support uint64 with their high bit and the default +// implementation does not. This function should be kept in sync with +// database/sql/driver defaultConverter.ConvertValue() except for that +// deliberate difference. +func (c converter) ConvertValue(v interface{}) (driver.Value, error) { + if driver.IsValue(v) { + return v, nil + } + + if vr, ok := v.(driver.Valuer); ok { + sv, err := callValuerValue(vr) + if err != nil { + return nil, err + } + if driver.IsValue(sv) { + return sv, nil + } + // A value returend from the Valuer interface can be "a type handled by + // a database driver's NamedValueChecker interface" so we should accept + // uint64 here as well. + if u, ok := sv.(uint64); ok { + return u, nil + } + return nil, fmt.Errorf("non-Value type %T returned from Value", sv) + } + rv := reflect.ValueOf(v) + switch rv.Kind() { + case reflect.Ptr: + // indirect pointers + if rv.IsNil() { + return nil, nil + } else { + return c.ConvertValue(rv.Elem().Interface()) + } + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64: + return rv.Int(), nil + case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + return rv.Uint(), nil + case reflect.Float32, reflect.Float64: + return rv.Float(), nil + case reflect.Bool: + return rv.Bool(), nil + case reflect.Slice: + switch t := rv.Type(); { + case t == jsonType: + return v, nil + case t.Elem().Kind() == reflect.Uint8: + return rv.Bytes(), nil + default: + return nil, fmt.Errorf("unsupported type %T, a slice of %s", v, t.Elem().Kind()) + } + case reflect.String: + return rv.String(), nil + } + return nil, fmt.Errorf("unsupported type %T, a %s", v, rv.Kind()) +} + +var valuerReflectType = reflect.TypeOf((*driver.Valuer)(nil)).Elem() + +// callValuerValue returns vr.Value(), with one exception: +// If vr.Value is an auto-generated method on a pointer type and the +// pointer is nil, it would panic at runtime in the panicwrap +// method. Treat it like nil instead. +// +// This is so people can implement driver.Value on value types and +// still use nil pointers to those types to mean nil/NULL, just like +// string/*string. +// +// This is an exact copy of the same-named unexported function from the +// database/sql package. +func callValuerValue(vr driver.Valuer) (v driver.Value, err error) { + if rv := reflect.ValueOf(vr); rv.Kind() == reflect.Ptr && + rv.IsNil() && + rv.Type().Elem().Implements(valuerReflectType) { + return nil, nil + } + return vr.Value() +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/transaction.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/transaction.go new file mode 100644 index 00000000..417d7279 --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/transaction.go @@ -0,0 +1,31 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +type mysqlTx struct { + mc *mysqlConn +} + +func (tx *mysqlTx) Commit() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("COMMIT") + tx.mc = nil + return +} + +func (tx *mysqlTx) Rollback() (err error) { + if tx.mc == nil || tx.mc.closed.IsSet() { + return ErrInvalidConn + } + err = tx.mc.exec("ROLLBACK") + tx.mc = nil + return +} diff --git a/terraform-server/vendor/github.com/go-sql-driver/mysql/utils.go b/terraform-server/vendor/github.com/go-sql-driver/mysql/utils.go new file mode 100644 index 00000000..d6545f5b --- /dev/null +++ b/terraform-server/vendor/github.com/go-sql-driver/mysql/utils.go @@ -0,0 +1,868 @@ +// Go MySQL Driver - A MySQL-Driver for Go's database/sql package +// +// Copyright 2012 The Go-MySQL-Driver Authors. All rights reserved. +// +// This Source Code Form is subject to the terms of the Mozilla Public +// License, v. 2.0. If a copy of the MPL was not distributed with this file, +// You can obtain one at http://mozilla.org/MPL/2.0/. + +package mysql + +import ( + "crypto/tls" + "database/sql" + "database/sql/driver" + "encoding/binary" + "errors" + "fmt" + "io" + "strconv" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Registry for custom tls.Configs +var ( + tlsConfigLock sync.RWMutex + tlsConfigRegistry map[string]*tls.Config +) + +// RegisterTLSConfig registers a custom tls.Config to be used with sql.Open. +// Use the key as a value in the DSN where tls=value. +// +// Note: The provided tls.Config is exclusively owned by the driver after +// registering it. +// +// rootCertPool := x509.NewCertPool() +// pem, err := ioutil.ReadFile("/path/ca-cert.pem") +// if err != nil { +// log.Fatal(err) +// } +// if ok := rootCertPool.AppendCertsFromPEM(pem); !ok { +// log.Fatal("Failed to append PEM.") +// } +// clientCert := make([]tls.Certificate, 0, 1) +// certs, err := tls.LoadX509KeyPair("/path/client-cert.pem", "/path/client-key.pem") +// if err != nil { +// log.Fatal(err) +// } +// clientCert = append(clientCert, certs) +// mysql.RegisterTLSConfig("custom", &tls.Config{ +// RootCAs: rootCertPool, +// Certificates: clientCert, +// }) +// db, err := sql.Open("mysql", "user@tcp(localhost:3306)/test?tls=custom") +// +func RegisterTLSConfig(key string, config *tls.Config) error { + if _, isBool := readBool(key); isBool || strings.ToLower(key) == "skip-verify" || strings.ToLower(key) == "preferred" { + return fmt.Errorf("key '%s' is reserved", key) + } + + tlsConfigLock.Lock() + if tlsConfigRegistry == nil { + tlsConfigRegistry = make(map[string]*tls.Config) + } + + tlsConfigRegistry[key] = config + tlsConfigLock.Unlock() + return nil +} + +// DeregisterTLSConfig removes the tls.Config associated with key. +func DeregisterTLSConfig(key string) { + tlsConfigLock.Lock() + if tlsConfigRegistry != nil { + delete(tlsConfigRegistry, key) + } + tlsConfigLock.Unlock() +} + +func getTLSConfigClone(key string) (config *tls.Config) { + tlsConfigLock.RLock() + if v, ok := tlsConfigRegistry[key]; ok { + config = v.Clone() + } + tlsConfigLock.RUnlock() + return +} + +// Returns the bool value of the input. +// The 2nd return value indicates if the input was a valid bool value +func readBool(input string) (value bool, valid bool) { + switch input { + case "1", "true", "TRUE", "True": + return true, true + case "0", "false", "FALSE", "False": + return false, true + } + + // Not a valid bool value + return +} + +/****************************************************************************** +* Time related utils * +******************************************************************************/ + +func parseDateTime(b []byte, loc *time.Location) (time.Time, error) { + const base = "0000-00-00 00:00:00.000000" + switch len(b) { + case 10, 19, 21, 22, 23, 24, 25, 26: // up to "YYYY-MM-DD HH:MM:SS.MMMMMM" + if string(b) == base[:len(b)] { + return time.Time{}, nil + } + + year, err := parseByteYear(b) + if err != nil { + return time.Time{}, err + } + if year <= 0 { + year = 1 + } + + if b[4] != '-' { + return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[4]) + } + + m, err := parseByte2Digits(b[5], b[6]) + if err != nil { + return time.Time{}, err + } + if m <= 0 { + m = 1 + } + month := time.Month(m) + + if b[7] != '-' { + return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[7]) + } + + day, err := parseByte2Digits(b[8], b[9]) + if err != nil { + return time.Time{}, err + } + if day <= 0 { + day = 1 + } + if len(b) == 10 { + return time.Date(year, month, day, 0, 0, 0, 0, loc), nil + } + + if b[10] != ' ' { + return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[10]) + } + + hour, err := parseByte2Digits(b[11], b[12]) + if err != nil { + return time.Time{}, err + } + if b[13] != ':' { + return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[13]) + } + + min, err := parseByte2Digits(b[14], b[15]) + if err != nil { + return time.Time{}, err + } + if b[16] != ':' { + return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[16]) + } + + sec, err := parseByte2Digits(b[17], b[18]) + if err != nil { + return time.Time{}, err + } + if len(b) == 19 { + return time.Date(year, month, day, hour, min, sec, 0, loc), nil + } + + if b[19] != '.' { + return time.Time{}, fmt.Errorf("bad value for field: `%c`", b[19]) + } + nsec, err := parseByteNanoSec(b[20:]) + if err != nil { + return time.Time{}, err + } + return time.Date(year, month, day, hour, min, sec, nsec, loc), nil + default: + return time.Time{}, fmt.Errorf("invalid time bytes: %s", b) + } +} + +func parseByteYear(b []byte) (int, error) { + year, n := 0, 1000 + for i := 0; i < 4; i++ { + v, err := bToi(b[i]) + if err != nil { + return 0, err + } + year += v * n + n = n / 10 + } + return year, nil +} + +func parseByte2Digits(b1, b2 byte) (int, error) { + d1, err := bToi(b1) + if err != nil { + return 0, err + } + d2, err := bToi(b2) + if err != nil { + return 0, err + } + return d1*10 + d2, nil +} + +func parseByteNanoSec(b []byte) (int, error) { + ns, digit := 0, 100000 // max is 6-digits + for i := 0; i < len(b); i++ { + v, err := bToi(b[i]) + if err != nil { + return 0, err + } + ns += v * digit + digit /= 10 + } + // nanoseconds has 10-digits. (needs to scale digits) + // 10 - 6 = 4, so we have to multiple 1000. + return ns * 1000, nil +} + +func bToi(b byte) (int, error) { + if b < '0' || b > '9' { + return 0, errors.New("not [0-9]") + } + return int(b - '0'), nil +} + +func parseBinaryDateTime(num uint64, data []byte, loc *time.Location) (driver.Value, error) { + switch num { + case 0: + return time.Time{}, nil + case 4: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + 0, 0, 0, 0, + loc, + ), nil + case 7: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + 0, + loc, + ), nil + case 11: + return time.Date( + int(binary.LittleEndian.Uint16(data[:2])), // year + time.Month(data[2]), // month + int(data[3]), // day + int(data[4]), // hour + int(data[5]), // minutes + int(data[6]), // seconds + int(binary.LittleEndian.Uint32(data[7:11]))*1000, // nanoseconds + loc, + ), nil + } + return nil, fmt.Errorf("invalid DATETIME packet length %d", num) +} + +func appendDateTime(buf []byte, t time.Time) ([]byte, error) { + year, month, day := t.Date() + hour, min, sec := t.Clock() + nsec := t.Nanosecond() + + if year < 1 || year > 9999 { + return buf, errors.New("year is not in the range [1, 9999]: " + strconv.Itoa(year)) // use errors.New instead of fmt.Errorf to avoid year escape to heap + } + year100 := year / 100 + year1 := year % 100 + + var localBuf [len("2006-01-02T15:04:05.999999999")]byte // does not escape + localBuf[0], localBuf[1], localBuf[2], localBuf[3] = digits10[year100], digits01[year100], digits10[year1], digits01[year1] + localBuf[4] = '-' + localBuf[5], localBuf[6] = digits10[month], digits01[month] + localBuf[7] = '-' + localBuf[8], localBuf[9] = digits10[day], digits01[day] + + if hour == 0 && min == 0 && sec == 0 && nsec == 0 { + return append(buf, localBuf[:10]...), nil + } + + localBuf[10] = ' ' + localBuf[11], localBuf[12] = digits10[hour], digits01[hour] + localBuf[13] = ':' + localBuf[14], localBuf[15] = digits10[min], digits01[min] + localBuf[16] = ':' + localBuf[17], localBuf[18] = digits10[sec], digits01[sec] + + if nsec == 0 { + return append(buf, localBuf[:19]...), nil + } + nsec100000000 := nsec / 100000000 + nsec1000000 := (nsec / 1000000) % 100 + nsec10000 := (nsec / 10000) % 100 + nsec100 := (nsec / 100) % 100 + nsec1 := nsec % 100 + localBuf[19] = '.' + + // milli second + localBuf[20], localBuf[21], localBuf[22] = + digits01[nsec100000000], digits10[nsec1000000], digits01[nsec1000000] + // micro second + localBuf[23], localBuf[24], localBuf[25] = + digits10[nsec10000], digits01[nsec10000], digits10[nsec100] + // nano second + localBuf[26], localBuf[27], localBuf[28] = + digits01[nsec100], digits10[nsec1], digits01[nsec1] + + // trim trailing zeros + n := len(localBuf) + for n > 0 && localBuf[n-1] == '0' { + n-- + } + + return append(buf, localBuf[:n]...), nil +} + +// zeroDateTime is used in formatBinaryDateTime to avoid an allocation +// if the DATE or DATETIME has the zero value. +// It must never be changed. +// The current behavior depends on database/sql copying the result. +var zeroDateTime = []byte("0000-00-00 00:00:00.000000") + +const digits01 = "0123456789012345678901234567890123456789012345678901234567890123456789012345678901234567890123456789" +const digits10 = "0000000000111111111122222222223333333333444444444455555555556666666666777777777788888888889999999999" + +func appendMicrosecs(dst, src []byte, decimals int) []byte { + if decimals <= 0 { + return dst + } + if len(src) == 0 { + return append(dst, ".000000"[:decimals+1]...) + } + + microsecs := binary.LittleEndian.Uint32(src[:4]) + p1 := byte(microsecs / 10000) + microsecs -= 10000 * uint32(p1) + p2 := byte(microsecs / 100) + microsecs -= 100 * uint32(p2) + p3 := byte(microsecs) + + switch decimals { + default: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], digits01[p3], + ) + case 1: + return append(dst, '.', + digits10[p1], + ) + case 2: + return append(dst, '.', + digits10[p1], digits01[p1], + ) + case 3: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], + ) + case 4: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + ) + case 5: + return append(dst, '.', + digits10[p1], digits01[p1], + digits10[p2], digits01[p2], + digits10[p3], + ) + } +} + +func formatBinaryDateTime(src []byte, length uint8) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + return zeroDateTime[:length], nil + } + var dst []byte // return value + var p1, p2, p3 byte // current digit pair + + switch length { + case 10, 19, 21, 22, 23, 24, 25, 26: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s length %d", t, length) + } + switch len(src) { + case 4, 7, 11: + default: + t := "DATE" + if length > 10 { + t += "TIME" + } + return nil, fmt.Errorf("illegal %s packet length %d", t, len(src)) + } + dst = make([]byte, 0, length) + // start with the date + year := binary.LittleEndian.Uint16(src[:2]) + pt := year / 100 + p1 = byte(year - 100*uint16(pt)) + p2, p3 = src[2], src[3] + dst = append(dst, + digits10[pt], digits01[pt], + digits10[p1], digits01[p1], '-', + digits10[p2], digits01[p2], '-', + digits10[p3], digits01[p3], + ) + if length == 10 { + return dst, nil + } + if len(src) == 4 { + return append(dst, zeroDateTime[10:length]...), nil + } + dst = append(dst, ' ') + p1 = src[4] // hour + src = src[5:] + + // p1 is 2-digit hour, src is after hour + p2, p3 = src[0], src[1] + dst = append(dst, + digits10[p1], digits01[p1], ':', + digits10[p2], digits01[p2], ':', + digits10[p3], digits01[p3], + ) + return appendMicrosecs(dst, src[2:], int(length)-20), nil +} + +func formatBinaryTime(src []byte, length uint8) (driver.Value, error) { + // length expects the deterministic length of the zero value, + // negative time and 100+ hours are automatically added if needed + if len(src) == 0 { + return zeroDateTime[11 : 11+length], nil + } + var dst []byte // return value + + switch length { + case + 8, // time (can be up to 10 when negative and 100+ hours) + 10, 11, 12, 13, 14, 15: // time with fractional seconds + default: + return nil, fmt.Errorf("illegal TIME length %d", length) + } + switch len(src) { + case 8, 12: + default: + return nil, fmt.Errorf("invalid TIME packet length %d", len(src)) + } + // +2 to enable negative time and 100+ hours + dst = make([]byte, 0, length+2) + if src[0] == 1 { + dst = append(dst, '-') + } + days := binary.LittleEndian.Uint32(src[1:5]) + hours := int64(days)*24 + int64(src[5]) + + if hours >= 100 { + dst = strconv.AppendInt(dst, hours, 10) + } else { + dst = append(dst, digits10[hours], digits01[hours]) + } + + min, sec := src[6], src[7] + dst = append(dst, ':', + digits10[min], digits01[min], ':', + digits10[sec], digits01[sec], + ) + return appendMicrosecs(dst, src[8:], int(length)-9), nil +} + +/****************************************************************************** +* Convert from and to bytes * +******************************************************************************/ + +func uint64ToBytes(n uint64) []byte { + return []byte{ + byte(n), + byte(n >> 8), + byte(n >> 16), + byte(n >> 24), + byte(n >> 32), + byte(n >> 40), + byte(n >> 48), + byte(n >> 56), + } +} + +func uint64ToString(n uint64) []byte { + var a [20]byte + i := 20 + + // U+0030 = 0 + // ... + // U+0039 = 9 + + var q uint64 + for n >= 10 { + i-- + q = n / 10 + a[i] = uint8(n-q*10) + 0x30 + n = q + } + + i-- + a[i] = uint8(n) + 0x30 + + return a[i:] +} + +// treats string value as unsigned integer representation +func stringToInt(b []byte) int { + val := 0 + for i := range b { + val *= 10 + val += int(b[i] - 0x30) + } + return val +} + +// returns the string read as a bytes slice, wheter the value is NULL, +// the number of bytes read and an error, in case the string is longer than +// the input slice +func readLengthEncodedString(b []byte) ([]byte, bool, int, error) { + // Get length + num, isNull, n := readLengthEncodedInteger(b) + if num < 1 { + return b[n:n], isNull, n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return b[n-int(num) : n : n], false, n, nil + } + return nil, false, n, io.EOF +} + +// returns the number of bytes skipped and an error, in case the string is +// longer than the input slice +func skipLengthEncodedString(b []byte) (int, error) { + // Get length + num, _, n := readLengthEncodedInteger(b) + if num < 1 { + return n, nil + } + + n += int(num) + + // Check data length + if len(b) >= n { + return n, nil + } + return n, io.EOF +} + +// returns the number read, whether the value is NULL and the number of bytes read +func readLengthEncodedInteger(b []byte) (uint64, bool, int) { + // See issue #349 + if len(b) == 0 { + return 0, true, 1 + } + + switch b[0] { + // 251: NULL + case 0xfb: + return 0, true, 1 + + // 252: value of following 2 + case 0xfc: + return uint64(b[1]) | uint64(b[2])<<8, false, 3 + + // 253: value of following 3 + case 0xfd: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16, false, 4 + + // 254: value of following 8 + case 0xfe: + return uint64(b[1]) | uint64(b[2])<<8 | uint64(b[3])<<16 | + uint64(b[4])<<24 | uint64(b[5])<<32 | uint64(b[6])<<40 | + uint64(b[7])<<48 | uint64(b[8])<<56, + false, 9 + } + + // 0-250: value of first byte + return uint64(b[0]), false, 1 +} + +// encodes a uint64 value and appends it to the given bytes slice +func appendLengthEncodedInteger(b []byte, n uint64) []byte { + switch { + case n <= 250: + return append(b, byte(n)) + + case n <= 0xffff: + return append(b, 0xfc, byte(n), byte(n>>8)) + + case n <= 0xffffff: + return append(b, 0xfd, byte(n), byte(n>>8), byte(n>>16)) + } + return append(b, 0xfe, byte(n), byte(n>>8), byte(n>>16), byte(n>>24), + byte(n>>32), byte(n>>40), byte(n>>48), byte(n>>56)) +} + +// reserveBuffer checks cap(buf) and expand buffer to len(buf) + appendSize. +// If cap(buf) is not enough, reallocate new buffer. +func reserveBuffer(buf []byte, appendSize int) []byte { + newSize := len(buf) + appendSize + if cap(buf) < newSize { + // Grow buffer exponentially + newBuf := make([]byte, len(buf)*2+appendSize) + copy(newBuf, buf) + buf = newBuf + } + return buf[:newSize] +} + +// escapeBytesBackslash escapes []byte with backslashes (\) +// This escapes the contents of a string (provided as []byte) by adding backslashes before special +// characters, and turning others into specific escape sequences, such as +// turning newlines into \n and null bytes into \0. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L823-L932 +func escapeBytesBackslash(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringBackslash is similar to escapeBytesBackslash but for string. +func escapeStringBackslash(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + switch c { + case '\x00': + buf[pos] = '\\' + buf[pos+1] = '0' + pos += 2 + case '\n': + buf[pos] = '\\' + buf[pos+1] = 'n' + pos += 2 + case '\r': + buf[pos] = '\\' + buf[pos+1] = 'r' + pos += 2 + case '\x1a': + buf[pos] = '\\' + buf[pos+1] = 'Z' + pos += 2 + case '\'': + buf[pos] = '\\' + buf[pos+1] = '\'' + pos += 2 + case '"': + buf[pos] = '\\' + buf[pos+1] = '"' + pos += 2 + case '\\': + buf[pos] = '\\' + buf[pos+1] = '\\' + pos += 2 + default: + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeBytesQuotes escapes apostrophes in []byte by doubling them up. +// This escapes the contents of a string by doubling up any apostrophes that +// it contains. This is used when the NO_BACKSLASH_ESCAPES SQL_MODE is in +// effect on the server. +// https://github.com/mysql/mysql-server/blob/mysql-5.7.5/mysys/charset.c#L963-L1038 +func escapeBytesQuotes(buf, v []byte) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for _, c := range v { + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +// escapeStringQuotes is similar to escapeBytesQuotes but for string. +func escapeStringQuotes(buf []byte, v string) []byte { + pos := len(buf) + buf = reserveBuffer(buf, len(v)*2) + + for i := 0; i < len(v); i++ { + c := v[i] + if c == '\'' { + buf[pos] = '\'' + buf[pos+1] = '\'' + pos += 2 + } else { + buf[pos] = c + pos++ + } + } + + return buf[:pos] +} + +/****************************************************************************** +* Sync utils * +******************************************************************************/ + +// noCopy may be embedded into structs which must not be copied +// after the first use. +// +// See https://github.com/golang/go/issues/8005#issuecomment-190753527 +// for details. +type noCopy struct{} + +// Lock is a no-op used by -copylocks checker from `go vet`. +func (*noCopy) Lock() {} + +// atomicBool is a wrapper around uint32 for usage as a boolean value with +// atomic access. +type atomicBool struct { + _noCopy noCopy + value uint32 +} + +// IsSet returns whether the current boolean value is true +func (ab *atomicBool) IsSet() bool { + return atomic.LoadUint32(&ab.value) > 0 +} + +// Set sets the value of the bool regardless of the previous value +func (ab *atomicBool) Set(value bool) { + if value { + atomic.StoreUint32(&ab.value, 1) + } else { + atomic.StoreUint32(&ab.value, 0) + } +} + +// TrySet sets the value of the bool and returns whether the value changed +func (ab *atomicBool) TrySet(value bool) bool { + if value { + return atomic.SwapUint32(&ab.value, 1) == 0 + } + return atomic.SwapUint32(&ab.value, 0) > 0 +} + +// atomicError is a wrapper for atomically accessed error values +type atomicError struct { + _noCopy noCopy + value atomic.Value +} + +// Set sets the error value regardless of the previous value. +// The value must not be nil +func (ae *atomicError) Set(value error) { + ae.value.Store(value) +} + +// Value returns the current error value +func (ae *atomicError) Value() error { + if v := ae.value.Load(); v != nil { + // this will panic if the value doesn't implement the error interface + return v.(error) + } + return nil +} + +func namedValueToValue(named []driver.NamedValue) ([]driver.Value, error) { + dargs := make([]driver.Value, len(named)) + for n, param := range named { + if len(param.Name) > 0 { + // TODO: support the use of Named Parameters #561 + return nil, errors.New("mysql: driver does not support the use of Named Parameters") + } + dargs[n] = param.Value + } + return dargs, nil +} + +func mapIsolationLevel(level driver.IsolationLevel) (string, error) { + switch sql.IsolationLevel(level) { + case sql.LevelRepeatableRead: + return "REPEATABLE READ", nil + case sql.LevelReadCommitted: + return "READ COMMITTED", nil + case sql.LevelReadUncommitted: + return "READ UNCOMMITTED", nil + case sql.LevelSerializable: + return "SERIALIZABLE", nil + default: + return "", fmt.Errorf("mysql: unsupported isolation level: %v", level) + } +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/AUTHORS b/terraform-server/vendor/github.com/golang/protobuf/AUTHORS new file mode 100644 index 00000000..15167cd7 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/AUTHORS @@ -0,0 +1,3 @@ +# This source code refers to The Go Authors for copyright purposes. +# The master list of authors is in the main Go distribution, +# visible at http://tip.golang.org/AUTHORS. diff --git a/terraform-server/vendor/github.com/golang/protobuf/CONTRIBUTORS b/terraform-server/vendor/github.com/golang/protobuf/CONTRIBUTORS new file mode 100644 index 00000000..1c4577e9 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/CONTRIBUTORS @@ -0,0 +1,3 @@ +# This source code was written by the Go contributors. +# The master list of contributors is in the main Go distribution, +# visible at http://tip.golang.org/CONTRIBUTORS. diff --git a/terraform-server/vendor/github.com/golang/protobuf/LICENSE b/terraform-server/vendor/github.com/golang/protobuf/LICENSE new file mode 100644 index 00000000..0f646931 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/LICENSE @@ -0,0 +1,28 @@ +Copyright 2010 The Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/clone.go b/terraform-server/vendor/github.com/golang/protobuf/proto/clone.go new file mode 100644 index 00000000..3cd3249f --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/clone.go @@ -0,0 +1,253 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer deep copy and merge. +// TODO: RawMessage. + +package proto + +import ( + "fmt" + "log" + "reflect" + "strings" +) + +// Clone returns a deep copy of a protocol buffer. +func Clone(src Message) Message { + in := reflect.ValueOf(src) + if in.IsNil() { + return src + } + out := reflect.New(in.Type().Elem()) + dst := out.Interface().(Message) + Merge(dst, src) + return dst +} + +// Merger is the interface representing objects that can merge messages of the same type. +type Merger interface { + // Merge merges src into this message. + // Required and optional fields that are set in src will be set to that value in dst. + // Elements of repeated fields will be appended. + // + // Merge may panic if called with a different argument type than the receiver. + Merge(src Message) +} + +// generatedMerger is the custom merge method that generated protos will have. +// We must add this method since a generate Merge method will conflict with +// many existing protos that have a Merge data field already defined. +type generatedMerger interface { + XXX_Merge(src Message) +} + +// Merge merges src into dst. +// Required and optional fields that are set in src will be set to that value in dst. +// Elements of repeated fields will be appended. +// Merge panics if src and dst are not the same type, or if dst is nil. +func Merge(dst, src Message) { + if m, ok := dst.(Merger); ok { + m.Merge(src) + return + } + + in := reflect.ValueOf(src) + out := reflect.ValueOf(dst) + if out.IsNil() { + panic("proto: nil destination") + } + if in.Type() != out.Type() { + panic(fmt.Sprintf("proto.Merge(%T, %T) type mismatch", dst, src)) + } + if in.IsNil() { + return // Merge from nil src is a noop + } + if m, ok := dst.(generatedMerger); ok { + m.XXX_Merge(src) + return + } + mergeStruct(out.Elem(), in.Elem()) +} + +func mergeStruct(out, in reflect.Value) { + sprop := GetProperties(in.Type()) + for i := 0; i < in.NumField(); i++ { + f := in.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + mergeAny(out.Field(i), in.Field(i), false, sprop.Prop[i]) + } + + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + uf := in.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return + } + uin := uf.Bytes() + if len(uin) > 0 { + out.FieldByName("XXX_unrecognized").SetBytes(append([]byte(nil), uin...)) + } +} + +// mergeAny performs a merge between two values of the same type. +// viaPtr indicates whether the values were indirected through a pointer (implying proto2). +// prop is set if this is a struct field (it may be nil). +func mergeAny(out, in reflect.Value, viaPtr bool, prop *Properties) { + if in.Type() == protoMessageType { + if !in.IsNil() { + if out.IsNil() { + out.Set(reflect.ValueOf(Clone(in.Interface().(Message)))) + } else { + Merge(out.Interface().(Message), in.Interface().(Message)) + } + } + return + } + switch in.Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + if !viaPtr && isProto3Zero(in) { + return + } + out.Set(in) + case reflect.Interface: + // Probably a oneof field; copy non-nil values. + if in.IsNil() { + return + } + // Allocate destination if it is not set, or set to a different type. + // Otherwise we will merge as normal. + if out.IsNil() || out.Elem().Type() != in.Elem().Type() { + out.Set(reflect.New(in.Elem().Elem().Type())) // interface -> *T -> T -> new(T) + } + mergeAny(out.Elem(), in.Elem(), false, nil) + case reflect.Map: + if in.Len() == 0 { + return + } + if out.IsNil() { + out.Set(reflect.MakeMap(in.Type())) + } + // For maps with value types of *T or []byte we need to deep copy each value. + elemKind := in.Type().Elem().Kind() + for _, key := range in.MapKeys() { + var val reflect.Value + switch elemKind { + case reflect.Ptr: + val = reflect.New(in.Type().Elem().Elem()) + mergeAny(val, in.MapIndex(key), false, nil) + case reflect.Slice: + val = in.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + default: + val = in.MapIndex(key) + } + out.SetMapIndex(key, val) + } + case reflect.Ptr: + if in.IsNil() { + return + } + if out.IsNil() { + out.Set(reflect.New(in.Elem().Type())) + } + mergeAny(out.Elem(), in.Elem(), true, nil) + case reflect.Slice: + if in.IsNil() { + return + } + if in.Type().Elem().Kind() == reflect.Uint8 { + // []byte is a scalar bytes field, not a repeated field. + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value, and should not + // be merged. + if prop != nil && prop.proto3 && in.Len() == 0 { + return + } + + // Make a deep copy. + // Append to []byte{} instead of []byte(nil) so that we never end up + // with a nil result. + out.SetBytes(append([]byte{}, in.Bytes()...)) + return + } + n := in.Len() + if out.IsNil() { + out.Set(reflect.MakeSlice(in.Type(), 0, n)) + } + switch in.Type().Elem().Kind() { + case reflect.Bool, reflect.Float32, reflect.Float64, reflect.Int32, reflect.Int64, + reflect.String, reflect.Uint32, reflect.Uint64: + out.Set(reflect.AppendSlice(out, in)) + default: + for i := 0; i < n; i++ { + x := reflect.Indirect(reflect.New(in.Type().Elem())) + mergeAny(x, in.Index(i), false, nil) + out.Set(reflect.Append(out, x)) + } + } + case reflect.Struct: + mergeStruct(out, in) + default: + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to copy %v", in) + } +} + +func mergeExtension(out, in map[int32]Extension) { + for extNum, eIn := range in { + eOut := Extension{desc: eIn.desc} + if eIn.value != nil { + v := reflect.New(reflect.TypeOf(eIn.value)).Elem() + mergeAny(v, reflect.ValueOf(eIn.value), false, nil) + eOut.value = v.Interface() + } + if eIn.enc != nil { + eOut.enc = make([]byte, len(eIn.enc)) + copy(eOut.enc, eIn.enc) + } + + out[extNum] = eOut + } +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/decode.go b/terraform-server/vendor/github.com/golang/protobuf/proto/decode.go new file mode 100644 index 00000000..63b0f08b --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/decode.go @@ -0,0 +1,427 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for decoding protocol buffer data to construct in-memory representations. + */ + +import ( + "errors" + "fmt" + "io" +) + +// errOverflow is returned when an integer is too large to be represented. +var errOverflow = errors.New("proto: integer overflow") + +// ErrInternalBadWireType is returned by generated code when an incorrect +// wire type is encountered. It does not get returned to user code. +var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for oneof") + +// DecodeVarint reads a varint-encoded integer from the slice. +// It returns the integer and the number of bytes consumed, or +// zero if there is not enough. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func DecodeVarint(buf []byte) (x uint64, n int) { + for shift := uint(0); shift < 64; shift += 7 { + if n >= len(buf) { + return 0, 0 + } + b := uint64(buf[n]) + n++ + x |= (b & 0x7F) << shift + if (b & 0x80) == 0 { + return x, n + } + } + + // The number is too large to represent in a 64-bit value. + return 0, 0 +} + +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { + i := p.index + l := len(p.buf) + + for shift := uint(0); shift < 64; shift += 7 { + if i >= l { + err = io.ErrUnexpectedEOF + return + } + b := p.buf[i] + i++ + x |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + p.index = i + return + } + } + + // The number is too large to represent in a 64-bit value. + err = errOverflow + return +} + +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + +// DecodeFixed64 reads a 64-bit integer from the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) DecodeFixed64() (x uint64, err error) { + // x, err already 0 + i := p.index + 8 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-8]) + x |= uint64(p.buf[i-7]) << 8 + x |= uint64(p.buf[i-6]) << 16 + x |= uint64(p.buf[i-5]) << 24 + x |= uint64(p.buf[i-4]) << 32 + x |= uint64(p.buf[i-3]) << 40 + x |= uint64(p.buf[i-2]) << 48 + x |= uint64(p.buf[i-1]) << 56 + return +} + +// DecodeFixed32 reads a 32-bit integer from the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) DecodeFixed32() (x uint64, err error) { + // x, err already 0 + i := p.index + 4 + if i < 0 || i > len(p.buf) { + err = io.ErrUnexpectedEOF + return + } + p.index = i + + x = uint64(p.buf[i-4]) + x |= uint64(p.buf[i-3]) << 8 + x |= uint64(p.buf[i-2]) << 16 + x |= uint64(p.buf[i-1]) << 24 + return +} + +// DecodeZigzag64 reads a zigzag-encoded 64-bit integer +// from the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) DecodeZigzag64() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = (x >> 1) ^ uint64((int64(x&1)<<63)>>63) + return +} + +// DecodeZigzag32 reads a zigzag-encoded 32-bit integer +// from the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) DecodeZigzag32() (x uint64, err error) { + x, err = p.DecodeVarint() + if err != nil { + return + } + x = uint64((uint32(x) >> 1) ^ uint32((int32(x&1)<<31)>>31)) + return +} + +// DecodeRawBytes reads a count-delimited byte buffer from the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) DecodeRawBytes(alloc bool) (buf []byte, err error) { + n, err := p.DecodeVarint() + if err != nil { + return nil, err + } + + nb := int(n) + if nb < 0 { + return nil, fmt.Errorf("proto: bad byte length %d", nb) + } + end := p.index + nb + if end < p.index || end > len(p.buf) { + return nil, io.ErrUnexpectedEOF + } + + if !alloc { + // todo: check if can get more uses of alloc=false + buf = p.buf[p.index:end] + p.index += nb + return + } + + buf = make([]byte, nb) + copy(buf, p.buf[p.index:]) + p.index += nb + return +} + +// DecodeStringBytes reads an encoded string from the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) DecodeStringBytes() (s string, err error) { + buf, err := p.DecodeRawBytes(false) + if err != nil { + return + } + return string(buf), nil +} + +// Unmarshaler is the interface representing objects that can +// unmarshal themselves. The argument points to data that may be +// overwritten, so implementations should not keep references to the +// buffer. +// Unmarshal implementations should not clear the receiver. +// Any unmarshaled data should be merged into the receiver. +// Callers of Unmarshal that do not want to retain existing data +// should Reset the receiver before calling Unmarshal. +type Unmarshaler interface { + Unmarshal([]byte) error +} + +// newUnmarshaler is the interface representing objects that can +// unmarshal themselves. The semantics are identical to Unmarshaler. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newUnmarshaler interface { + XXX_Unmarshal([]byte) error +} + +// Unmarshal parses the protocol buffer representation in buf and places the +// decoded result in pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// Unmarshal resets pb before starting to unmarshal, so any +// existing data in pb is always removed. Use UnmarshalMerge +// to preserve and append to existing data. +func Unmarshal(buf []byte, pb Message) error { + pb.Reset() + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// UnmarshalMerge parses the protocol buffer representation in buf and +// writes the decoded result to pb. If the struct underlying pb does not match +// the data in buf, the results can be unpredictable. +// +// UnmarshalMerge merges into existing data in pb. +// Most code should use Unmarshal instead. +func UnmarshalMerge(buf []byte, pb Message) error { + if u, ok := pb.(newUnmarshaler); ok { + return u.XXX_Unmarshal(buf) + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + return u.Unmarshal(buf) + } + return NewBuffer(buf).Unmarshal(pb) +} + +// DecodeMessage reads a count-delimited message from the Buffer. +func (p *Buffer) DecodeMessage(pb Message) error { + enc, err := p.DecodeRawBytes(false) + if err != nil { + return err + } + return NewBuffer(enc).Unmarshal(pb) +} + +// DecodeGroup reads a tag-delimited group from the Buffer. +// StartGroup tag is already consumed. This function consumes +// EndGroup tag. +func (p *Buffer) DecodeGroup(pb Message) error { + b := p.buf[p.index:] + x, y := findEndGroup(b) + if x < 0 { + return io.ErrUnexpectedEOF + } + err := Unmarshal(b[:x], pb) + p.index += y + return err +} + +// Unmarshal parses the protocol buffer representation in the +// Buffer and places the decoded result in pb. If the struct +// underlying pb does not match the data in the buffer, the results can be +// unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. +func (p *Buffer) Unmarshal(pb Message) error { + // If the object can unmarshal itself, let it. + if u, ok := pb.(newUnmarshaler); ok { + err := u.XXX_Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + if u, ok := pb.(Unmarshaler); ok { + // NOTE: The history of proto have unfortunately been inconsistent + // whether Unmarshaler should or should not implicitly clear itself. + // Some implementations do, most do not. + // Thus, calling this here may or may not do what people want. + // + // See https://github.com/golang/protobuf/issues/424 + err := u.Unmarshal(p.buf[p.index:]) + p.index = len(p.buf) + return err + } + + // Slow workaround for messages that aren't Unmarshalers. + // This includes some hand-coded .pb.go files and + // bootstrap protos. + // TODO: fix all of those and then add Unmarshal to + // the Message interface. Then: + // The cast above and code below can be deleted. + // The old unmarshaler can be deleted. + // Clients can call Unmarshal directly (can already do that, actually). + var info InternalMessageInfo + err := info.Unmarshal(pb, p.buf[p.index:]) + p.index = len(p.buf) + return err +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/deprecated.go b/terraform-server/vendor/github.com/golang/protobuf/proto/deprecated.go new file mode 100644 index 00000000..35b882c0 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/deprecated.go @@ -0,0 +1,63 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2018 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import "errors" + +// Deprecated: do not use. +type Stats struct{ Emalloc, Dmalloc, Encode, Decode, Chit, Cmiss, Size uint64 } + +// Deprecated: do not use. +func GetStats() Stats { return Stats{} } + +// Deprecated: do not use. +func MarshalMessageSet(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSet([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func MarshalMessageSetJSON(interface{}) ([]byte, error) { + return nil, errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func UnmarshalMessageSetJSON([]byte, interface{}) error { + return errors.New("proto: not implemented") +} + +// Deprecated: do not use. +func RegisterMessageSetType(Message, int32, string) {} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/discard.go b/terraform-server/vendor/github.com/golang/protobuf/proto/discard.go new file mode 100644 index 00000000..dea2617c --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/discard.go @@ -0,0 +1,350 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2017 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +type generatedDiscarder interface { + XXX_DiscardUnknown() +} + +// DiscardUnknown recursively discards all unknown fields from this message +// and all embedded messages. +// +// When unmarshaling a message with unrecognized fields, the tags and values +// of such fields are preserved in the Message. This allows a later call to +// marshal to be able to produce a message that continues to have those +// unrecognized fields. To avoid this, DiscardUnknown is used to +// explicitly clear the unknown fields after unmarshaling. +// +// For proto2 messages, the unknown fields of message extensions are only +// discarded from messages that have been accessed via GetExtension. +func DiscardUnknown(m Message) { + if m, ok := m.(generatedDiscarder); ok { + m.XXX_DiscardUnknown() + return + } + // TODO: Dynamically populate a InternalMessageInfo for legacy messages, + // but the master branch has no implementation for InternalMessageInfo, + // so it would be more work to replicate that approach. + discardLegacy(m) +} + +// DiscardUnknown recursively discards all unknown fields. +func (a *InternalMessageInfo) DiscardUnknown(m Message) { + di := atomicLoadDiscardInfo(&a.discard) + if di == nil { + di = getDiscardInfo(reflect.TypeOf(m).Elem()) + atomicStoreDiscardInfo(&a.discard, di) + } + di.discard(toPointer(&m)) +} + +type discardInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []discardFieldInfo + unrecognized field +} + +type discardFieldInfo struct { + field field // Offset of field, guaranteed to be valid + discard func(src pointer) +} + +var ( + discardInfoMap = map[reflect.Type]*discardInfo{} + discardInfoLock sync.Mutex +) + +func getDiscardInfo(t reflect.Type) *discardInfo { + discardInfoLock.Lock() + defer discardInfoLock.Unlock() + di := discardInfoMap[t] + if di == nil { + di = &discardInfo{typ: t} + discardInfoMap[t] = di + } + return di +} + +func (di *discardInfo) discard(src pointer) { + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&di.initialized) == 0 { + di.computeDiscardInfo() + } + + for _, fi := range di.fields { + sfp := src.offset(fi.field) + fi.discard(sfp) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(src.asPointerTo(di.typ).Interface()); err == nil { + // Ignore lock since DiscardUnknown is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + DiscardUnknown(m) + } + } + } + + if di.unrecognized.IsValid() { + *src.offset(di.unrecognized).toBytes() = nil + } +} + +func (di *discardInfo) computeDiscardInfo() { + di.lock.Lock() + defer di.lock.Unlock() + if di.initialized != 0 { + return + } + t := di.typ + n := t.NumField() + + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + dfi := discardFieldInfo{field: toField(&f)} + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%v.%s cannot be a slice of pointers to primitive types", t, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%v.%s cannot be a direct struct value", t, f.Name)) + case isSlice: // E.g., []*pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sps := src.getPointerSlice() + for _, sp := range sps { + if !sp.isNil() { + di.discard(sp) + } + } + } + default: // E.g., *pb.T + di := getDiscardInfo(tf) + dfi.discard = func(src pointer) { + sp := src.getPointer() + if !sp.isNil() { + di.discard(sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a map or a slice of map values", t, f.Name)) + default: // E.g., map[K]V + if tf.Elem().Kind() == reflect.Ptr { // Proto struct (e.g., *T) + dfi.discard = func(src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + DiscardUnknown(val.Interface().(Message)) + } + } + } else { + dfi.discard = func(pointer) {} // Noop + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%v.%s cannot be a pointer to a interface or a slice of interface values", t, f.Name)) + default: // E.g., interface{} + // TODO: Make this faster? + dfi.discard = func(src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + DiscardUnknown(sv.Interface().(Message)) + } + } + } + } + default: + continue + } + di.fields = append(di.fields, dfi) + } + + di.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + di.unrecognized = toField(&f) + } + + atomic.StoreInt32(&di.initialized, 1) +} + +func discardLegacy(m Message) { + v := reflect.ValueOf(m) + if v.Kind() != reflect.Ptr || v.IsNil() { + return + } + v = v.Elem() + if v.Kind() != reflect.Struct { + return + } + t := v.Type() + + for i := 0; i < v.NumField(); i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + vf := v.Field(i) + tf := f.Type + + // Unwrap tf to get its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic(fmt.Sprintf("%T.%s cannot be a slice of pointers to primitive types", m, f.Name)) + } + + switch tf.Kind() { + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("%T.%s cannot be a direct struct value", m, f.Name)) + case isSlice: // E.g., []*pb.T + for j := 0; j < vf.Len(); j++ { + discardLegacy(vf.Index(j).Interface().(Message)) + } + default: // E.g., *pb.T + discardLegacy(vf.Interface().(Message)) + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a map or a slice of map values", m, f.Name)) + default: // E.g., map[K]V + tv := vf.Type().Elem() + if tv.Kind() == reflect.Ptr && tv.Implements(protoMessageType) { // Proto struct (e.g., *T) + for _, key := range vf.MapKeys() { + val := vf.MapIndex(key) + discardLegacy(val.Interface().(Message)) + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic(fmt.Sprintf("%T.%s cannot be a pointer to a interface or a slice of interface values", m, f.Name)) + default: // E.g., test_proto.isCommunique_Union interface + if !vf.IsNil() && f.Tag.Get("protobuf_oneof") != "" { + vf = vf.Elem() // E.g., *test_proto.Communique_Msg + if !vf.IsNil() { + vf = vf.Elem() // E.g., test_proto.Communique_Msg + vf = vf.Field(0) // E.g., Proto struct (e.g., *T) or primitive value + if vf.Kind() == reflect.Ptr { + discardLegacy(vf.Interface().(Message)) + } + } + } + } + } + } + + if vf := v.FieldByName("XXX_unrecognized"); vf.IsValid() { + if vf.Type() != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + vf.Set(reflect.ValueOf([]byte(nil))) + } + + // For proto2 messages, only discard unknown fields in message extensions + // that have been accessed via GetExtension. + if em, err := extendable(m); err == nil { + // Ignore lock since discardLegacy is not concurrency safe. + emm, _ := em.extensionsRead() + for _, mx := range emm { + if m, ok := mx.value.(Message); ok { + discardLegacy(m) + } + } + } +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/encode.go b/terraform-server/vendor/github.com/golang/protobuf/proto/encode.go new file mode 100644 index 00000000..3abfed2c --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/encode.go @@ -0,0 +1,203 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "errors" + "reflect" +) + +var ( + // errRepeatedHasNil is the error returned if Marshal is called with + // a struct with a repeated field containing a nil element. + errRepeatedHasNil = errors.New("proto: repeated field has nil element") + + // errOneofHasNil is the error returned if Marshal is called with + // a struct with a oneof field containing a nil element. + errOneofHasNil = errors.New("proto: oneof field has nil value") + + // ErrNil is the error returned if Marshal is called with nil. + ErrNil = errors.New("proto: Marshal called with nil") + + // ErrTooLarge is the error returned if Marshal is called with a + // message that encodes to >2GB. + ErrTooLarge = errors.New("proto: message encodes to over 2 GB") +) + +// The fundamental encoders that put bytes on the wire. +// Those that take integer types all accept uint64 and are +// therefore of type valueEncoder. + +const maxVarintBytes = 10 // maximum length of a varint + +// EncodeVarint returns the varint encoding of x. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +// Not used by the package itself, but helpful to clients +// wishing to use the same encoding. +func EncodeVarint(x uint64) []byte { + var buf [maxVarintBytes]byte + var n int + for n = 0; x > 127; n++ { + buf[n] = 0x80 | uint8(x&0x7F) + x >>= 7 + } + buf[n] = uint8(x) + n++ + return buf[0:n] +} + +// EncodeVarint writes a varint-encoded integer to the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) EncodeVarint(x uint64) error { + for x >= 1<<7 { + p.buf = append(p.buf, uint8(x&0x7f|0x80)) + x >>= 7 + } + p.buf = append(p.buf, uint8(x)) + return nil +} + +// SizeVarint returns the varint encoding size of an integer. +func SizeVarint(x uint64) int { + switch { + case x < 1<<7: + return 1 + case x < 1<<14: + return 2 + case x < 1<<21: + return 3 + case x < 1<<28: + return 4 + case x < 1<<35: + return 5 + case x < 1<<42: + return 6 + case x < 1<<49: + return 7 + case x < 1<<56: + return 8 + case x < 1<<63: + return 9 + } + return 10 +} + +// EncodeFixed64 writes a 64-bit integer to the Buffer. +// This is the format for the +// fixed64, sfixed64, and double protocol buffer types. +func (p *Buffer) EncodeFixed64(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24), + uint8(x>>32), + uint8(x>>40), + uint8(x>>48), + uint8(x>>56)) + return nil +} + +// EncodeFixed32 writes a 32-bit integer to the Buffer. +// This is the format for the +// fixed32, sfixed32, and float protocol buffer types. +func (p *Buffer) EncodeFixed32(x uint64) error { + p.buf = append(p.buf, + uint8(x), + uint8(x>>8), + uint8(x>>16), + uint8(x>>24)) + return nil +} + +// EncodeZigzag64 writes a zigzag-encoded 64-bit integer +// to the Buffer. +// This is the format used for the sint64 protocol buffer type. +func (p *Buffer) EncodeZigzag64(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} + +// EncodeZigzag32 writes a zigzag-encoded 32-bit integer +// to the Buffer. +// This is the format used for the sint32 protocol buffer type. +func (p *Buffer) EncodeZigzag32(x uint64) error { + // use signed number to get arithmetic right shift. + return p.EncodeVarint(uint64((uint32(x) << 1) ^ uint32((int32(x) >> 31)))) +} + +// EncodeRawBytes writes a count-delimited byte buffer to the Buffer. +// This is the format used for the bytes protocol buffer +// type and for embedded messages. +func (p *Buffer) EncodeRawBytes(b []byte) error { + p.EncodeVarint(uint64(len(b))) + p.buf = append(p.buf, b...) + return nil +} + +// EncodeStringBytes writes an encoded string to the Buffer. +// This is the format used for the proto2 string type. +func (p *Buffer) EncodeStringBytes(s string) error { + p.EncodeVarint(uint64(len(s))) + p.buf = append(p.buf, s...) + return nil +} + +// Marshaler is the interface representing objects that can marshal themselves. +type Marshaler interface { + Marshal() ([]byte, error) +} + +// EncodeMessage writes the protocol buffer to the Buffer, +// prefixed by a varint-encoded length. +func (p *Buffer) EncodeMessage(pb Message) error { + siz := Size(pb) + p.EncodeVarint(uint64(siz)) + return p.Marshal(pb) +} + +// All protocol buffer fields are nillable, but be careful. +func isNil(v reflect.Value) bool { + switch v.Kind() { + case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + return v.IsNil() + } + return false +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/equal.go b/terraform-server/vendor/github.com/golang/protobuf/proto/equal.go new file mode 100644 index 00000000..f9b6e41b --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/equal.go @@ -0,0 +1,301 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2011 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Protocol buffer comparison. + +package proto + +import ( + "bytes" + "log" + "reflect" + "strings" +) + +/* +Equal returns true iff protocol buffers a and b are equal. +The arguments must both be pointers to protocol buffer structs. + +Equality is defined in this way: + - Two messages are equal iff they are the same type, + corresponding fields are equal, unknown field sets + are equal, and extensions sets are equal. + - Two set scalar fields are equal iff their values are equal. + If the fields are of a floating-point type, remember that + NaN != x for all x, including NaN. If the message is defined + in a proto3 .proto file, fields are not "set"; specifically, + zero length proto3 "bytes" fields are equal (nil == {}). + - Two repeated fields are equal iff their lengths are the same, + and their corresponding elements are equal. Note a "bytes" field, + although represented by []byte, is not a repeated field and the + rule for the scalar fields described above applies. + - Two unset fields are equal. + - Two unknown field sets are equal if their current + encoded state is equal. + - Two extension sets are equal iff they have corresponding + elements that are pairwise equal. + - Two map fields are equal iff their lengths are the same, + and they contain the same set of elements. Zero-length map + fields are equal. + - Every other combination of things are not equal. + +The return value is undefined if a and b are not protocol buffers. +*/ +func Equal(a, b Message) bool { + if a == nil || b == nil { + return a == b + } + v1, v2 := reflect.ValueOf(a), reflect.ValueOf(b) + if v1.Type() != v2.Type() { + return false + } + if v1.Kind() == reflect.Ptr { + if v1.IsNil() { + return v2.IsNil() + } + if v2.IsNil() { + return false + } + v1, v2 = v1.Elem(), v2.Elem() + } + if v1.Kind() != reflect.Struct { + return false + } + return equalStruct(v1, v2) +} + +// v1 and v2 are known to have the same type. +func equalStruct(v1, v2 reflect.Value) bool { + sprop := GetProperties(v1.Type()) + for i := 0; i < v1.NumField(); i++ { + f := v1.Type().Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + f1, f2 := v1.Field(i), v2.Field(i) + if f.Type.Kind() == reflect.Ptr { + if n1, n2 := f1.IsNil(), f2.IsNil(); n1 && n2 { + // both unset + continue + } else if n1 != n2 { + // set/unset mismatch + return false + } + f1, f2 = f1.Elem(), f2.Elem() + } + if !equalAny(f1, f2, sprop.Prop[i]) { + return false + } + } + + if em1 := v1.FieldByName("XXX_InternalExtensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_InternalExtensions") + if !equalExtensions(v1.Type(), em1.Interface().(XXX_InternalExtensions), em2.Interface().(XXX_InternalExtensions)) { + return false + } + } + + if em1 := v1.FieldByName("XXX_extensions"); em1.IsValid() { + em2 := v2.FieldByName("XXX_extensions") + if !equalExtMap(v1.Type(), em1.Interface().(map[int32]Extension), em2.Interface().(map[int32]Extension)) { + return false + } + } + + uf := v1.FieldByName("XXX_unrecognized") + if !uf.IsValid() { + return true + } + + u1 := uf.Bytes() + u2 := v2.FieldByName("XXX_unrecognized").Bytes() + return bytes.Equal(u1, u2) +} + +// v1 and v2 are known to have the same type. +// prop may be nil. +func equalAny(v1, v2 reflect.Value, prop *Properties) bool { + if v1.Type() == protoMessageType { + m1, _ := v1.Interface().(Message) + m2, _ := v2.Interface().(Message) + return Equal(m1, m2) + } + switch v1.Kind() { + case reflect.Bool: + return v1.Bool() == v2.Bool() + case reflect.Float32, reflect.Float64: + return v1.Float() == v2.Float() + case reflect.Int32, reflect.Int64: + return v1.Int() == v2.Int() + case reflect.Interface: + // Probably a oneof field; compare the inner values. + n1, n2 := v1.IsNil(), v2.IsNil() + if n1 || n2 { + return n1 == n2 + } + e1, e2 := v1.Elem(), v2.Elem() + if e1.Type() != e2.Type() { + return false + } + return equalAny(e1, e2, nil) + case reflect.Map: + if v1.Len() != v2.Len() { + return false + } + for _, key := range v1.MapKeys() { + val2 := v2.MapIndex(key) + if !val2.IsValid() { + // This key was not found in the second map. + return false + } + if !equalAny(v1.MapIndex(key), val2, nil) { + return false + } + } + return true + case reflect.Ptr: + // Maps may have nil values in them, so check for nil. + if v1.IsNil() && v2.IsNil() { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return equalAny(v1.Elem(), v2.Elem(), prop) + case reflect.Slice: + if v1.Type().Elem().Kind() == reflect.Uint8 { + // short circuit: []byte + + // Edge case: if this is in a proto3 message, a zero length + // bytes field is considered the zero value. + if prop != nil && prop.proto3 && v1.Len() == 0 && v2.Len() == 0 { + return true + } + if v1.IsNil() != v2.IsNil() { + return false + } + return bytes.Equal(v1.Interface().([]byte), v2.Interface().([]byte)) + } + + if v1.Len() != v2.Len() { + return false + } + for i := 0; i < v1.Len(); i++ { + if !equalAny(v1.Index(i), v2.Index(i), prop) { + return false + } + } + return true + case reflect.String: + return v1.Interface().(string) == v2.Interface().(string) + case reflect.Struct: + return equalStruct(v1, v2) + case reflect.Uint32, reflect.Uint64: + return v1.Uint() == v2.Uint() + } + + // unknown type, so not a protocol buffer + log.Printf("proto: don't know how to compare %v", v1) + return false +} + +// base is the struct type that the extensions are based on. +// x1 and x2 are InternalExtensions. +func equalExtensions(base reflect.Type, x1, x2 XXX_InternalExtensions) bool { + em1, _ := x1.extensionsRead() + em2, _ := x2.extensionsRead() + return equalExtMap(base, em1, em2) +} + +func equalExtMap(base reflect.Type, em1, em2 map[int32]Extension) bool { + if len(em1) != len(em2) { + return false + } + + for extNum, e1 := range em1 { + e2, ok := em2[extNum] + if !ok { + return false + } + + m1 := extensionAsLegacyType(e1.value) + m2 := extensionAsLegacyType(e2.value) + + if m1 == nil && m2 == nil { + // Both have only encoded form. + if bytes.Equal(e1.enc, e2.enc) { + continue + } + // The bytes are different, but the extensions might still be + // equal. We need to decode them to compare. + } + + if m1 != nil && m2 != nil { + // Both are unencoded. + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + continue + } + + // At least one is encoded. To do a semantically correct comparison + // we need to unmarshal them first. + var desc *ExtensionDesc + if m := extensionMaps[base]; m != nil { + desc = m[extNum] + } + if desc == nil { + // If both have only encoded form and the bytes are the same, + // it is handled above. We get here when the bytes are different. + // We don't know how to decode it, so just compare them as byte + // slices. + log.Printf("proto: don't know how to compare extension %d of %v", extNum, base) + return false + } + var err error + if m1 == nil { + m1, err = decodeExtension(e1.enc, desc) + } + if m2 == nil && err == nil { + m2, err = decodeExtension(e2.enc, desc) + } + if err != nil { + // The encoded form is invalid. + log.Printf("proto: badly encoded extension %d of %v: %v", extNum, base, err) + return false + } + if !equalAny(reflect.ValueOf(m1), reflect.ValueOf(m2), nil) { + return false + } + } + + return true +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/extensions.go b/terraform-server/vendor/github.com/golang/protobuf/proto/extensions.go new file mode 100644 index 00000000..fa88add3 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/extensions.go @@ -0,0 +1,607 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Types and routines for supporting protocol buffer extensions. + */ + +import ( + "errors" + "fmt" + "io" + "reflect" + "strconv" + "sync" +) + +// ErrMissingExtension is the error returned by GetExtension if the named extension is not in the message. +var ErrMissingExtension = errors.New("proto: missing extension") + +// ExtensionRange represents a range of message extensions for a protocol buffer. +// Used in code generated by the protocol compiler. +type ExtensionRange struct { + Start, End int32 // both inclusive +} + +// extendableProto is an interface implemented by any protocol buffer generated by the current +// proto compiler that may be extended. +type extendableProto interface { + Message + ExtensionRangeArray() []ExtensionRange + extensionsWrite() map[int32]Extension + extensionsRead() (map[int32]Extension, sync.Locker) +} + +// extendableProtoV1 is an interface implemented by a protocol buffer generated by the previous +// version of the proto compiler that may be extended. +type extendableProtoV1 interface { + Message + ExtensionRangeArray() []ExtensionRange + ExtensionMap() map[int32]Extension +} + +// extensionAdapter is a wrapper around extendableProtoV1 that implements extendableProto. +type extensionAdapter struct { + extendableProtoV1 +} + +func (e extensionAdapter) extensionsWrite() map[int32]Extension { + return e.ExtensionMap() +} + +func (e extensionAdapter) extensionsRead() (map[int32]Extension, sync.Locker) { + return e.ExtensionMap(), notLocker{} +} + +// notLocker is a sync.Locker whose Lock and Unlock methods are nops. +type notLocker struct{} + +func (n notLocker) Lock() {} +func (n notLocker) Unlock() {} + +// extendable returns the extendableProto interface for the given generated proto message. +// If the proto message has the old extension format, it returns a wrapper that implements +// the extendableProto interface. +func extendable(p interface{}) (extendableProto, error) { + switch p := p.(type) { + case extendableProto: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return p, nil + case extendableProtoV1: + if isNilPtr(p) { + return nil, fmt.Errorf("proto: nil %T is not extendable", p) + } + return extensionAdapter{p}, nil + } + // Don't allocate a specific error containing %T: + // this is the hot path for Clone and MarshalText. + return nil, errNotExtendable +} + +var errNotExtendable = errors.New("proto: not an extendable proto.Message") + +func isNilPtr(x interface{}) bool { + v := reflect.ValueOf(x) + return v.Kind() == reflect.Ptr && v.IsNil() +} + +// XXX_InternalExtensions is an internal representation of proto extensions. +// +// Each generated message struct type embeds an anonymous XXX_InternalExtensions field, +// thus gaining the unexported 'extensions' method, which can be called only from the proto package. +// +// The methods of XXX_InternalExtensions are not concurrency safe in general, +// but calls to logically read-only methods such as has and get may be executed concurrently. +type XXX_InternalExtensions struct { + // The struct must be indirect so that if a user inadvertently copies a + // generated message and its embedded XXX_InternalExtensions, they + // avoid the mayhem of a copied mutex. + // + // The mutex serializes all logically read-only operations to p.extensionMap. + // It is up to the client to ensure that write operations to p.extensionMap are + // mutually exclusive with other accesses. + p *struct { + mu sync.Mutex + extensionMap map[int32]Extension + } +} + +// extensionsWrite returns the extension map, creating it on first use. +func (e *XXX_InternalExtensions) extensionsWrite() map[int32]Extension { + if e.p == nil { + e.p = new(struct { + mu sync.Mutex + extensionMap map[int32]Extension + }) + e.p.extensionMap = make(map[int32]Extension) + } + return e.p.extensionMap +} + +// extensionsRead returns the extensions map for read-only use. It may be nil. +// The caller must hold the returned mutex's lock when accessing Elements within the map. +func (e *XXX_InternalExtensions) extensionsRead() (map[int32]Extension, sync.Locker) { + if e.p == nil { + return nil, nil + } + return e.p.extensionMap, &e.p.mu +} + +// ExtensionDesc represents an extension specification. +// Used in generated code from the protocol compiler. +type ExtensionDesc struct { + ExtendedType Message // nil pointer to the type that is being extended + ExtensionType interface{} // nil pointer to the extension type + Field int32 // field number + Name string // fully-qualified name of extension, for text formatting + Tag string // protobuf tag style + Filename string // name of the file in which the extension is defined +} + +func (ed *ExtensionDesc) repeated() bool { + t := reflect.TypeOf(ed.ExtensionType) + return t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 +} + +// Extension represents an extension in a message. +type Extension struct { + // When an extension is stored in a message using SetExtension + // only desc and value are set. When the message is marshaled + // enc will be set to the encoded form of the message. + // + // When a message is unmarshaled and contains extensions, each + // extension will have only enc set. When such an extension is + // accessed using GetExtension (or GetExtensions) desc and value + // will be set. + desc *ExtensionDesc + + // value is a concrete value for the extension field. Let the type of + // desc.ExtensionType be the "API type" and the type of Extension.value + // be the "storage type". The API type and storage type are the same except: + // * For scalars (except []byte), the API type uses *T, + // while the storage type uses T. + // * For repeated fields, the API type uses []T, while the storage type + // uses *[]T. + // + // The reason for the divergence is so that the storage type more naturally + // matches what is expected of when retrieving the values through the + // protobuf reflection APIs. + // + // The value may only be populated if desc is also populated. + value interface{} + + // enc is the raw bytes for the extension field. + enc []byte +} + +// SetRawExtension is for testing only. +func SetRawExtension(base Message, id int32, b []byte) { + epb, err := extendable(base) + if err != nil { + return + } + extmap := epb.extensionsWrite() + extmap[id] = Extension{enc: b} +} + +// isExtensionField returns true iff the given field number is in an extension range. +func isExtensionField(pb extendableProto, field int32) bool { + for _, er := range pb.ExtensionRangeArray() { + if er.Start <= field && field <= er.End { + return true + } + } + return false +} + +// checkExtensionTypes checks that the given extension is valid for pb. +func checkExtensionTypes(pb extendableProto, extension *ExtensionDesc) error { + var pbi interface{} = pb + // Check the extended type. + if ea, ok := pbi.(extensionAdapter); ok { + pbi = ea.extendableProtoV1 + } + if a, b := reflect.TypeOf(pbi), reflect.TypeOf(extension.ExtendedType); a != b { + return fmt.Errorf("proto: bad extended type; %v does not extend %v", b, a) + } + // Check the range. + if !isExtensionField(pb, extension.Field) { + return errors.New("proto: bad extension number; not in declared ranges") + } + return nil +} + +// extPropKey is sufficient to uniquely identify an extension. +type extPropKey struct { + base reflect.Type + field int32 +} + +var extProp = struct { + sync.RWMutex + m map[extPropKey]*Properties +}{ + m: make(map[extPropKey]*Properties), +} + +func extensionProperties(ed *ExtensionDesc) *Properties { + key := extPropKey{base: reflect.TypeOf(ed.ExtendedType), field: ed.Field} + + extProp.RLock() + if prop, ok := extProp.m[key]; ok { + extProp.RUnlock() + return prop + } + extProp.RUnlock() + + extProp.Lock() + defer extProp.Unlock() + // Check again. + if prop, ok := extProp.m[key]; ok { + return prop + } + + prop := new(Properties) + prop.Init(reflect.TypeOf(ed.ExtensionType), "unknown_name", ed.Tag, nil) + extProp.m[key] = prop + return prop +} + +// HasExtension returns whether the given extension is present in pb. +func HasExtension(pb Message, extension *ExtensionDesc) bool { + // TODO: Check types, field numbers, etc.? + epb, err := extendable(pb) + if err != nil { + return false + } + extmap, mu := epb.extensionsRead() + if extmap == nil { + return false + } + mu.Lock() + _, ok := extmap[extension.Field] + mu.Unlock() + return ok +} + +// ClearExtension removes the given extension from pb. +func ClearExtension(pb Message, extension *ExtensionDesc) { + epb, err := extendable(pb) + if err != nil { + return + } + // TODO: Check types, field numbers, etc.? + extmap := epb.extensionsWrite() + delete(extmap, extension.Field) +} + +// GetExtension retrieves a proto2 extended field from pb. +// +// If the descriptor is type complete (i.e., ExtensionDesc.ExtensionType is non-nil), +// then GetExtension parses the encoded field and returns a Go value of the specified type. +// If the field is not present, then the default value is returned (if one is specified), +// otherwise ErrMissingExtension is reported. +// +// If the descriptor is not type complete (i.e., ExtensionDesc.ExtensionType is nil), +// then GetExtension returns the raw encoded bytes of the field extension. +func GetExtension(pb Message, extension *ExtensionDesc) (interface{}, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + + if extension.ExtendedType != nil { + // can only check type if this is a complete descriptor + if err := checkExtensionTypes(epb, extension); err != nil { + return nil, err + } + } + + emap, mu := epb.extensionsRead() + if emap == nil { + return defaultExtensionValue(extension) + } + mu.Lock() + defer mu.Unlock() + e, ok := emap[extension.Field] + if !ok { + // defaultExtensionValue returns the default value or + // ErrMissingExtension if there is no default. + return defaultExtensionValue(extension) + } + + if e.value != nil { + // Already decoded. Check the descriptor, though. + if e.desc != extension { + // This shouldn't happen. If it does, it means that + // GetExtension was called twice with two different + // descriptors with the same field number. + return nil, errors.New("proto: descriptor conflict") + } + return extensionAsLegacyType(e.value), nil + } + + if extension.ExtensionType == nil { + // incomplete descriptor + return e.enc, nil + } + + v, err := decodeExtension(e.enc, extension) + if err != nil { + return nil, err + } + + // Remember the decoded version and drop the encoded version. + // That way it is safe to mutate what we return. + e.value = extensionAsStorageType(v) + e.desc = extension + e.enc = nil + emap[extension.Field] = e + return extensionAsLegacyType(e.value), nil +} + +// defaultExtensionValue returns the default value for extension. +// If no default for an extension is defined ErrMissingExtension is returned. +func defaultExtensionValue(extension *ExtensionDesc) (interface{}, error) { + if extension.ExtensionType == nil { + // incomplete descriptor, so no default + return nil, ErrMissingExtension + } + + t := reflect.TypeOf(extension.ExtensionType) + props := extensionProperties(extension) + + sf, _, err := fieldDefault(t, props) + if err != nil { + return nil, err + } + + if sf == nil || sf.value == nil { + // There is no default value. + return nil, ErrMissingExtension + } + + if t.Kind() != reflect.Ptr { + // We do not need to return a Ptr, we can directly return sf.value. + return sf.value, nil + } + + // We need to return an interface{} that is a pointer to sf.value. + value := reflect.New(t).Elem() + value.Set(reflect.New(value.Type().Elem())) + if sf.kind == reflect.Int32 { + // We may have an int32 or an enum, but the underlying data is int32. + // Since we can't set an int32 into a non int32 reflect.value directly + // set it as a int32. + value.Elem().SetInt(int64(sf.value.(int32))) + } else { + value.Elem().Set(reflect.ValueOf(sf.value)) + } + return value.Interface(), nil +} + +// decodeExtension decodes an extension encoded in b. +func decodeExtension(b []byte, extension *ExtensionDesc) (interface{}, error) { + t := reflect.TypeOf(extension.ExtensionType) + unmarshal := typeUnmarshaler(t, extension.Tag) + + // t is a pointer to a struct, pointer to basic type or a slice. + // Allocate space to store the pointer/slice. + value := reflect.New(t).Elem() + + var err error + for { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + wire := int(x) & 7 + + b, err = unmarshal(b, valToPointer(value.Addr()), wire) + if err != nil { + return nil, err + } + + if len(b) == 0 { + break + } + } + return value.Interface(), nil +} + +// GetExtensions returns a slice of the extensions present in pb that are also listed in es. +// The returned slice has the same length as es; missing extensions will appear as nil elements. +func GetExtensions(pb Message, es []*ExtensionDesc) (extensions []interface{}, err error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + extensions = make([]interface{}, len(es)) + for i, e := range es { + extensions[i], err = GetExtension(epb, e) + if err == ErrMissingExtension { + err = nil + } + if err != nil { + return + } + } + return +} + +// ExtensionDescs returns a new slice containing pb's extension descriptors, in undefined order. +// For non-registered extensions, ExtensionDescs returns an incomplete descriptor containing +// just the Field field, which defines the extension's field number. +func ExtensionDescs(pb Message) ([]*ExtensionDesc, error) { + epb, err := extendable(pb) + if err != nil { + return nil, err + } + registeredExtensions := RegisteredExtensions(pb) + + emap, mu := epb.extensionsRead() + if emap == nil { + return nil, nil + } + mu.Lock() + defer mu.Unlock() + extensions := make([]*ExtensionDesc, 0, len(emap)) + for extid, e := range emap { + desc := e.desc + if desc == nil { + desc = registeredExtensions[extid] + if desc == nil { + desc = &ExtensionDesc{Field: extid} + } + } + + extensions = append(extensions, desc) + } + return extensions, nil +} + +// SetExtension sets the specified extension of pb to the specified value. +func SetExtension(pb Message, extension *ExtensionDesc, value interface{}) error { + epb, err := extendable(pb) + if err != nil { + return err + } + if err := checkExtensionTypes(epb, extension); err != nil { + return err + } + typ := reflect.TypeOf(extension.ExtensionType) + if typ != reflect.TypeOf(value) { + return fmt.Errorf("proto: bad extension value type. got: %T, want: %T", value, extension.ExtensionType) + } + // nil extension values need to be caught early, because the + // encoder can't distinguish an ErrNil due to a nil extension + // from an ErrNil due to a missing field. Extensions are + // always optional, so the encoder would just swallow the error + // and drop all the extensions from the encoded message. + if reflect.ValueOf(value).IsNil() { + return fmt.Errorf("proto: SetExtension called with nil value of type %T", value) + } + + extmap := epb.extensionsWrite() + extmap[extension.Field] = Extension{desc: extension, value: extensionAsStorageType(value)} + return nil +} + +// ClearAllExtensions clears all extensions from pb. +func ClearAllExtensions(pb Message) { + epb, err := extendable(pb) + if err != nil { + return + } + m := epb.extensionsWrite() + for k := range m { + delete(m, k) + } +} + +// A global registry of extensions. +// The generated code will register the generated descriptors by calling RegisterExtension. + +var extensionMaps = make(map[reflect.Type]map[int32]*ExtensionDesc) + +// RegisterExtension is called from the generated code. +func RegisterExtension(desc *ExtensionDesc) { + st := reflect.TypeOf(desc.ExtendedType).Elem() + m := extensionMaps[st] + if m == nil { + m = make(map[int32]*ExtensionDesc) + extensionMaps[st] = m + } + if _, ok := m[desc.Field]; ok { + panic("proto: duplicate extension registered: " + st.String() + " " + strconv.Itoa(int(desc.Field))) + } + m[desc.Field] = desc +} + +// RegisteredExtensions returns a map of the registered extensions of a +// protocol buffer struct, indexed by the extension number. +// The argument pb should be a nil pointer to the struct type. +func RegisteredExtensions(pb Message) map[int32]*ExtensionDesc { + return extensionMaps[reflect.TypeOf(pb).Elem()] +} + +// extensionAsLegacyType converts an value in the storage type as the API type. +// See Extension.value. +func extensionAsLegacyType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + // Represent primitive types as a pointer to the value. + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Slice: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + } + return v +} + +// extensionAsStorageType converts an value in the API type as the storage type. +// See Extension.value. +func extensionAsStorageType(v interface{}) interface{} { + switch rv := reflect.ValueOf(v); rv.Kind() { + case reflect.Ptr: + // Represent slice types as the value itself. + switch rv.Type().Elem().Kind() { + case reflect.Bool, reflect.Int32, reflect.Int64, reflect.Uint32, reflect.Uint64, reflect.Float32, reflect.Float64, reflect.String: + if rv.IsNil() { + v = reflect.Zero(rv.Type().Elem()).Interface() + } else { + v = rv.Elem().Interface() + } + } + case reflect.Slice: + // Represent slice types as a pointer to the value. + if rv.Type().Elem().Kind() != reflect.Uint8 { + rv2 := reflect.New(rv.Type()) + rv2.Elem().Set(rv) + v = rv2.Interface() + } + } + return v +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/lib.go b/terraform-server/vendor/github.com/golang/protobuf/proto/lib.go new file mode 100644 index 00000000..70fbda53 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/lib.go @@ -0,0 +1,965 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package proto converts data structures to and from the wire format of +protocol buffers. It works in concert with the Go source code generated +for .proto files by the protocol compiler. + +A summary of the properties of the protocol buffer interface +for a protocol buffer variable v: + + - Names are turned from camel_case to CamelCase for export. + - There are no methods on v to set fields; just treat + them as structure fields. + - There are getters that return a field's value if set, + and return the field's default value if unset. + The getters work even if the receiver is a nil message. + - The zero value for a struct is its correct initialization state. + All desired fields must be set before marshaling. + - A Reset() method will restore a protobuf struct to its zero state. + - Non-repeated fields are pointers to the values; nil means unset. + That is, optional or required field int32 f becomes F *int32. + - Repeated fields are slices. + - Helper functions are available to aid the setting of fields. + msg.Foo = proto.String("hello") // set field + - Constants are defined to hold the default values of all fields that + have them. They have the form Default_StructName_FieldName. + Because the getter methods handle defaulted values, + direct use of these constants should be rare. + - Enums are given type names and maps from names to values. + Enum values are prefixed by the enclosing message's name, or by the + enum's type name if it is a top-level enum. Enum types have a String + method, and a Enum method to assist in message construction. + - Nested messages, groups and enums have type names prefixed with the name of + the surrounding message type. + - Extensions are given descriptor names that start with E_, + followed by an underscore-delimited list of the nested messages + that contain it (if any) followed by the CamelCased name of the + extension field itself. HasExtension, ClearExtension, GetExtension + and SetExtension are functions for manipulating extensions. + - Oneof field sets are given a single field in their message, + with distinguished wrapper types for each possible field value. + - Marshal and Unmarshal are functions to encode and decode the wire format. + +When the .proto file specifies `syntax="proto3"`, there are some differences: + + - Non-repeated fields of non-message type are values instead of pointers. + - Enum types do not get an Enum method. + +The simplest way to describe this is to see an example. +Given file test.proto, containing + + package example; + + enum FOO { X = 17; } + + message Test { + required string label = 1; + optional int32 type = 2 [default=77]; + repeated int64 reps = 3; + optional group OptionalGroup = 4 { + required string RequiredField = 5; + } + oneof union { + int32 number = 6; + string name = 7; + } + } + +The resulting file, test.pb.go, is: + + package example + + import proto "github.com/golang/protobuf/proto" + import math "math" + + type FOO int32 + const ( + FOO_X FOO = 17 + ) + var FOO_name = map[int32]string{ + 17: "X", + } + var FOO_value = map[string]int32{ + "X": 17, + } + + func (x FOO) Enum() *FOO { + p := new(FOO) + *p = x + return p + } + func (x FOO) String() string { + return proto.EnumName(FOO_name, int32(x)) + } + func (x *FOO) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(FOO_value, data) + if err != nil { + return err + } + *x = FOO(value) + return nil + } + + type Test struct { + Label *string `protobuf:"bytes,1,req,name=label" json:"label,omitempty"` + Type *int32 `protobuf:"varint,2,opt,name=type,def=77" json:"type,omitempty"` + Reps []int64 `protobuf:"varint,3,rep,name=reps" json:"reps,omitempty"` + Optionalgroup *Test_OptionalGroup `protobuf:"group,4,opt,name=OptionalGroup" json:"optionalgroup,omitempty"` + // Types that are valid to be assigned to Union: + // *Test_Number + // *Test_Name + Union isTest_Union `protobuf_oneof:"union"` + XXX_unrecognized []byte `json:"-"` + } + func (m *Test) Reset() { *m = Test{} } + func (m *Test) String() string { return proto.CompactTextString(m) } + func (*Test) ProtoMessage() {} + + type isTest_Union interface { + isTest_Union() + } + + type Test_Number struct { + Number int32 `protobuf:"varint,6,opt,name=number"` + } + type Test_Name struct { + Name string `protobuf:"bytes,7,opt,name=name"` + } + + func (*Test_Number) isTest_Union() {} + func (*Test_Name) isTest_Union() {} + + func (m *Test) GetUnion() isTest_Union { + if m != nil { + return m.Union + } + return nil + } + const Default_Test_Type int32 = 77 + + func (m *Test) GetLabel() string { + if m != nil && m.Label != nil { + return *m.Label + } + return "" + } + + func (m *Test) GetType() int32 { + if m != nil && m.Type != nil { + return *m.Type + } + return Default_Test_Type + } + + func (m *Test) GetOptionalgroup() *Test_OptionalGroup { + if m != nil { + return m.Optionalgroup + } + return nil + } + + type Test_OptionalGroup struct { + RequiredField *string `protobuf:"bytes,5,req" json:"RequiredField,omitempty"` + } + func (m *Test_OptionalGroup) Reset() { *m = Test_OptionalGroup{} } + func (m *Test_OptionalGroup) String() string { return proto.CompactTextString(m) } + + func (m *Test_OptionalGroup) GetRequiredField() string { + if m != nil && m.RequiredField != nil { + return *m.RequiredField + } + return "" + } + + func (m *Test) GetNumber() int32 { + if x, ok := m.GetUnion().(*Test_Number); ok { + return x.Number + } + return 0 + } + + func (m *Test) GetName() string { + if x, ok := m.GetUnion().(*Test_Name); ok { + return x.Name + } + return "" + } + + func init() { + proto.RegisterEnum("example.FOO", FOO_name, FOO_value) + } + +To create and play with a Test object: + + package main + + import ( + "log" + + "github.com/golang/protobuf/proto" + pb "./example.pb" + ) + + func main() { + test := &pb.Test{ + Label: proto.String("hello"), + Type: proto.Int32(17), + Reps: []int64{1, 2, 3}, + Optionalgroup: &pb.Test_OptionalGroup{ + RequiredField: proto.String("good bye"), + }, + Union: &pb.Test_Name{"fred"}, + } + data, err := proto.Marshal(test) + if err != nil { + log.Fatal("marshaling error: ", err) + } + newTest := &pb.Test{} + err = proto.Unmarshal(data, newTest) + if err != nil { + log.Fatal("unmarshaling error: ", err) + } + // Now test and newTest contain the same data. + if test.GetLabel() != newTest.GetLabel() { + log.Fatalf("data mismatch %q != %q", test.GetLabel(), newTest.GetLabel()) + } + // Use a type switch to determine which oneof was set. + switch u := test.Union.(type) { + case *pb.Test_Number: // u.Number contains the number. + case *pb.Test_Name: // u.Name contains the string. + } + // etc. + } +*/ +package proto + +import ( + "encoding/json" + "fmt" + "log" + "reflect" + "sort" + "strconv" + "sync" +) + +// RequiredNotSetError is an error type returned by either Marshal or Unmarshal. +// Marshal reports this when a required field is not initialized. +// Unmarshal reports this when a required field is missing from the wire data. +type RequiredNotSetError struct{ field string } + +func (e *RequiredNotSetError) Error() string { + if e.field == "" { + return fmt.Sprintf("proto: required field not set") + } + return fmt.Sprintf("proto: required field %q not set", e.field) +} +func (e *RequiredNotSetError) RequiredNotSet() bool { + return true +} + +type invalidUTF8Error struct{ field string } + +func (e *invalidUTF8Error) Error() string { + if e.field == "" { + return "proto: invalid UTF-8 detected" + } + return fmt.Sprintf("proto: field %q contains invalid UTF-8", e.field) +} +func (e *invalidUTF8Error) InvalidUTF8() bool { + return true +} + +// errInvalidUTF8 is a sentinel error to identify fields with invalid UTF-8. +// This error should not be exposed to the external API as such errors should +// be recreated with the field information. +var errInvalidUTF8 = &invalidUTF8Error{} + +// isNonFatal reports whether the error is either a RequiredNotSet error +// or a InvalidUTF8 error. +func isNonFatal(err error) bool { + if re, ok := err.(interface{ RequiredNotSet() bool }); ok && re.RequiredNotSet() { + return true + } + if re, ok := err.(interface{ InvalidUTF8() bool }); ok && re.InvalidUTF8() { + return true + } + return false +} + +type nonFatal struct{ E error } + +// Merge merges err into nf and reports whether it was successful. +// Otherwise it returns false for any fatal non-nil errors. +func (nf *nonFatal) Merge(err error) (ok bool) { + if err == nil { + return true // not an error + } + if !isNonFatal(err) { + return false // fatal error + } + if nf.E == nil { + nf.E = err // store first instance of non-fatal error + } + return true +} + +// Message is implemented by generated protocol buffer messages. +type Message interface { + Reset() + String() string + ProtoMessage() +} + +// A Buffer is a buffer manager for marshaling and unmarshaling +// protocol buffers. It may be reused between invocations to +// reduce memory usage. It is not necessary to use a Buffer; +// the global functions Marshal and Unmarshal create a +// temporary Buffer and are fine for most applications. +type Buffer struct { + buf []byte // encode/decode byte stream + index int // read point + + deterministic bool +} + +// NewBuffer allocates a new Buffer and initializes its internal data to +// the contents of the argument slice. +func NewBuffer(e []byte) *Buffer { + return &Buffer{buf: e} +} + +// Reset resets the Buffer, ready for marshaling a new protocol buffer. +func (p *Buffer) Reset() { + p.buf = p.buf[0:0] // for reading/writing + p.index = 0 // for reading +} + +// SetBuf replaces the internal buffer with the slice, +// ready for unmarshaling the contents of the slice. +func (p *Buffer) SetBuf(s []byte) { + p.buf = s + p.index = 0 +} + +// Bytes returns the contents of the Buffer. +func (p *Buffer) Bytes() []byte { return p.buf } + +// SetDeterministic sets whether to use deterministic serialization. +// +// Deterministic serialization guarantees that for a given binary, equal +// messages will always be serialized to the same bytes. This implies: +// +// - Repeated serialization of a message will return the same bytes. +// - Different processes of the same binary (which may be executing on +// different machines) will serialize equal messages to the same bytes. +// +// Note that the deterministic serialization is NOT canonical across +// languages. It is not guaranteed to remain stable over time. It is unstable +// across different builds with schema changes due to unknown fields. +// Users who need canonical serialization (e.g., persistent storage in a +// canonical form, fingerprinting, etc.) should define their own +// canonicalization specification and implement their own serializer rather +// than relying on this API. +// +// If deterministic serialization is requested, map entries will be sorted +// by keys in lexicographical order. This is an implementation detail and +// subject to change. +func (p *Buffer) SetDeterministic(deterministic bool) { + p.deterministic = deterministic +} + +/* + * Helper routines for simplifying the creation of optional fields of basic type. + */ + +// Bool is a helper routine that allocates a new bool value +// to store v and returns a pointer to it. +func Bool(v bool) *bool { + return &v +} + +// Int32 is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it. +func Int32(v int32) *int32 { + return &v +} + +// Int is a helper routine that allocates a new int32 value +// to store v and returns a pointer to it, but unlike Int32 +// its argument value is an int. +func Int(v int) *int32 { + p := new(int32) + *p = int32(v) + return p +} + +// Int64 is a helper routine that allocates a new int64 value +// to store v and returns a pointer to it. +func Int64(v int64) *int64 { + return &v +} + +// Float32 is a helper routine that allocates a new float32 value +// to store v and returns a pointer to it. +func Float32(v float32) *float32 { + return &v +} + +// Float64 is a helper routine that allocates a new float64 value +// to store v and returns a pointer to it. +func Float64(v float64) *float64 { + return &v +} + +// Uint32 is a helper routine that allocates a new uint32 value +// to store v and returns a pointer to it. +func Uint32(v uint32) *uint32 { + return &v +} + +// Uint64 is a helper routine that allocates a new uint64 value +// to store v and returns a pointer to it. +func Uint64(v uint64) *uint64 { + return &v +} + +// String is a helper routine that allocates a new string value +// to store v and returns a pointer to it. +func String(v string) *string { + return &v +} + +// EnumName is a helper function to simplify printing protocol buffer enums +// by name. Given an enum map and a value, it returns a useful string. +func EnumName(m map[int32]string, v int32) string { + s, ok := m[v] + if ok { + return s + } + return strconv.Itoa(int(v)) +} + +// UnmarshalJSONEnum is a helper function to simplify recovering enum int values +// from their JSON-encoded representation. Given a map from the enum's symbolic +// names to its int values, and a byte buffer containing the JSON-encoded +// value, it returns an int32 that can be cast to the enum type by the caller. +// +// The function can deal with both JSON representations, numeric and symbolic. +func UnmarshalJSONEnum(m map[string]int32, data []byte, enumName string) (int32, error) { + if data[0] == '"' { + // New style: enums are strings. + var repr string + if err := json.Unmarshal(data, &repr); err != nil { + return -1, err + } + val, ok := m[repr] + if !ok { + return 0, fmt.Errorf("unrecognized enum %s value %q", enumName, repr) + } + return val, nil + } + // Old style: enums are ints. + var val int32 + if err := json.Unmarshal(data, &val); err != nil { + return 0, fmt.Errorf("cannot unmarshal %#q into enum %s", data, enumName) + } + return val, nil +} + +// DebugPrint dumps the encoded data in b in a debugging format with a header +// including the string s. Used in testing but made available for general debugging. +func (p *Buffer) DebugPrint(s string, b []byte) { + var u uint64 + + obuf := p.buf + index := p.index + p.buf = b + p.index = 0 + depth := 0 + + fmt.Printf("\n--- %s ---\n", s) + +out: + for { + for i := 0; i < depth; i++ { + fmt.Print(" ") + } + + index := p.index + if index == len(p.buf) { + break + } + + op, err := p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: fetching op err %v\n", index, err) + break out + } + tag := op >> 3 + wire := op & 7 + + switch wire { + default: + fmt.Printf("%3d: t=%3d unknown wire=%d\n", + index, tag, wire) + break out + + case WireBytes: + var r []byte + + r, err = p.DecodeRawBytes(false) + if err != nil { + break out + } + fmt.Printf("%3d: t=%3d bytes [%d]", index, tag, len(r)) + if len(r) <= 6 { + for i := 0; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } else { + for i := 0; i < 3; i++ { + fmt.Printf(" %.2x", r[i]) + } + fmt.Printf(" ..") + for i := len(r) - 3; i < len(r); i++ { + fmt.Printf(" %.2x", r[i]) + } + } + fmt.Printf("\n") + + case WireFixed32: + u, err = p.DecodeFixed32() + if err != nil { + fmt.Printf("%3d: t=%3d fix32 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix32 %d\n", index, tag, u) + + case WireFixed64: + u, err = p.DecodeFixed64() + if err != nil { + fmt.Printf("%3d: t=%3d fix64 err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d fix64 %d\n", index, tag, u) + + case WireVarint: + u, err = p.DecodeVarint() + if err != nil { + fmt.Printf("%3d: t=%3d varint err %v\n", index, tag, err) + break out + } + fmt.Printf("%3d: t=%3d varint %d\n", index, tag, u) + + case WireStartGroup: + fmt.Printf("%3d: t=%3d start\n", index, tag) + depth++ + + case WireEndGroup: + depth-- + fmt.Printf("%3d: t=%3d end\n", index, tag) + } + } + + if depth != 0 { + fmt.Printf("%3d: start-end not balanced %d\n", p.index, depth) + } + fmt.Printf("\n") + + p.buf = obuf + p.index = index +} + +// SetDefaults sets unset protocol buffer fields to their default values. +// It only modifies fields that are both unset and have defined defaults. +// It recursively sets default values in any non-nil sub-messages. +func SetDefaults(pb Message) { + setDefaults(reflect.ValueOf(pb), true, false) +} + +// v is a pointer to a struct. +func setDefaults(v reflect.Value, recur, zeros bool) { + v = v.Elem() + + defaultMu.RLock() + dm, ok := defaults[v.Type()] + defaultMu.RUnlock() + if !ok { + dm = buildDefaultMessage(v.Type()) + defaultMu.Lock() + defaults[v.Type()] = dm + defaultMu.Unlock() + } + + for _, sf := range dm.scalars { + f := v.Field(sf.index) + if !f.IsNil() { + // field already set + continue + } + dv := sf.value + if dv == nil && !zeros { + // no explicit default, and don't want to set zeros + continue + } + fptr := f.Addr().Interface() // **T + // TODO: Consider batching the allocations we do here. + switch sf.kind { + case reflect.Bool: + b := new(bool) + if dv != nil { + *b = dv.(bool) + } + *(fptr.(**bool)) = b + case reflect.Float32: + f := new(float32) + if dv != nil { + *f = dv.(float32) + } + *(fptr.(**float32)) = f + case reflect.Float64: + f := new(float64) + if dv != nil { + *f = dv.(float64) + } + *(fptr.(**float64)) = f + case reflect.Int32: + // might be an enum + if ft := f.Type(); ft != int32PtrType { + // enum + f.Set(reflect.New(ft.Elem())) + if dv != nil { + f.Elem().SetInt(int64(dv.(int32))) + } + } else { + // int32 field + i := new(int32) + if dv != nil { + *i = dv.(int32) + } + *(fptr.(**int32)) = i + } + case reflect.Int64: + i := new(int64) + if dv != nil { + *i = dv.(int64) + } + *(fptr.(**int64)) = i + case reflect.String: + s := new(string) + if dv != nil { + *s = dv.(string) + } + *(fptr.(**string)) = s + case reflect.Uint8: + // exceptional case: []byte + var b []byte + if dv != nil { + db := dv.([]byte) + b = make([]byte, len(db)) + copy(b, db) + } else { + b = []byte{} + } + *(fptr.(*[]byte)) = b + case reflect.Uint32: + u := new(uint32) + if dv != nil { + *u = dv.(uint32) + } + *(fptr.(**uint32)) = u + case reflect.Uint64: + u := new(uint64) + if dv != nil { + *u = dv.(uint64) + } + *(fptr.(**uint64)) = u + default: + log.Printf("proto: can't set default for field %v (sf.kind=%v)", f, sf.kind) + } + } + + for _, ni := range dm.nested { + f := v.Field(ni) + // f is *T or []*T or map[T]*T + switch f.Kind() { + case reflect.Ptr: + if f.IsNil() { + continue + } + setDefaults(f, recur, zeros) + + case reflect.Slice: + for i := 0; i < f.Len(); i++ { + e := f.Index(i) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + + case reflect.Map: + for _, k := range f.MapKeys() { + e := f.MapIndex(k) + if e.IsNil() { + continue + } + setDefaults(e, recur, zeros) + } + } + } +} + +var ( + // defaults maps a protocol buffer struct type to a slice of the fields, + // with its scalar fields set to their proto-declared non-zero default values. + defaultMu sync.RWMutex + defaults = make(map[reflect.Type]defaultMessage) + + int32PtrType = reflect.TypeOf((*int32)(nil)) +) + +// defaultMessage represents information about the default values of a message. +type defaultMessage struct { + scalars []scalarField + nested []int // struct field index of nested messages +} + +type scalarField struct { + index int // struct field index + kind reflect.Kind // element type (the T in *T or []T) + value interface{} // the proto-declared default value, or nil +} + +// t is a struct type. +func buildDefaultMessage(t reflect.Type) (dm defaultMessage) { + sprop := GetProperties(t) + for _, prop := range sprop.Prop { + fi, ok := sprop.decoderTags.get(prop.Tag) + if !ok { + // XXX_unrecognized + continue + } + ft := t.Field(fi).Type + + sf, nested, err := fieldDefault(ft, prop) + switch { + case err != nil: + log.Print(err) + case nested: + dm.nested = append(dm.nested, fi) + case sf != nil: + sf.index = fi + dm.scalars = append(dm.scalars, *sf) + } + } + + return dm +} + +// fieldDefault returns the scalarField for field type ft. +// sf will be nil if the field can not have a default. +// nestedMessage will be true if this is a nested message. +// Note that sf.index is not set on return. +func fieldDefault(ft reflect.Type, prop *Properties) (sf *scalarField, nestedMessage bool, err error) { + var canHaveDefault bool + switch ft.Kind() { + case reflect.Ptr: + if ft.Elem().Kind() == reflect.Struct { + nestedMessage = true + } else { + canHaveDefault = true // proto2 scalar field + } + + case reflect.Slice: + switch ft.Elem().Kind() { + case reflect.Ptr: + nestedMessage = true // repeated message + case reflect.Uint8: + canHaveDefault = true // bytes field + } + + case reflect.Map: + if ft.Elem().Kind() == reflect.Ptr { + nestedMessage = true // map with message values + } + } + + if !canHaveDefault { + if nestedMessage { + return nil, true, nil + } + return nil, false, nil + } + + // We now know that ft is a pointer or slice. + sf = &scalarField{kind: ft.Elem().Kind()} + + // scalar fields without defaults + if !prop.HasDefault { + return sf, false, nil + } + + // a scalar field: either *T or []byte + switch ft.Elem().Kind() { + case reflect.Bool: + x, err := strconv.ParseBool(prop.Default) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default bool %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Float32: + x, err := strconv.ParseFloat(prop.Default, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float32 %q: %v", prop.Default, err) + } + sf.value = float32(x) + case reflect.Float64: + x, err := strconv.ParseFloat(prop.Default, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default float64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.Int32: + x, err := strconv.ParseInt(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int32 %q: %v", prop.Default, err) + } + sf.value = int32(x) + case reflect.Int64: + x, err := strconv.ParseInt(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default int64 %q: %v", prop.Default, err) + } + sf.value = x + case reflect.String: + sf.value = prop.Default + case reflect.Uint8: + // []byte (not *uint8) + sf.value = []byte(prop.Default) + case reflect.Uint32: + x, err := strconv.ParseUint(prop.Default, 10, 32) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint32 %q: %v", prop.Default, err) + } + sf.value = uint32(x) + case reflect.Uint64: + x, err := strconv.ParseUint(prop.Default, 10, 64) + if err != nil { + return nil, false, fmt.Errorf("proto: bad default uint64 %q: %v", prop.Default, err) + } + sf.value = x + default: + return nil, false, fmt.Errorf("proto: unhandled def kind %v", ft.Elem().Kind()) + } + + return sf, false, nil +} + +// mapKeys returns a sort.Interface to be used for sorting the map keys. +// Map fields may have key types of non-float scalars, strings and enums. +func mapKeys(vs []reflect.Value) sort.Interface { + s := mapKeySorter{vs: vs} + + // Type specialization per https://developers.google.com/protocol-buffers/docs/proto#maps. + if len(vs) == 0 { + return s + } + switch vs[0].Kind() { + case reflect.Int32, reflect.Int64: + s.less = func(a, b reflect.Value) bool { return a.Int() < b.Int() } + case reflect.Uint32, reflect.Uint64: + s.less = func(a, b reflect.Value) bool { return a.Uint() < b.Uint() } + case reflect.Bool: + s.less = func(a, b reflect.Value) bool { return !a.Bool() && b.Bool() } // false < true + case reflect.String: + s.less = func(a, b reflect.Value) bool { return a.String() < b.String() } + default: + panic(fmt.Sprintf("unsupported map key type: %v", vs[0].Kind())) + } + + return s +} + +type mapKeySorter struct { + vs []reflect.Value + less func(a, b reflect.Value) bool +} + +func (s mapKeySorter) Len() int { return len(s.vs) } +func (s mapKeySorter) Swap(i, j int) { s.vs[i], s.vs[j] = s.vs[j], s.vs[i] } +func (s mapKeySorter) Less(i, j int) bool { + return s.less(s.vs[i], s.vs[j]) +} + +// isProto3Zero reports whether v is a zero proto3 value. +func isProto3Zero(v reflect.Value) bool { + switch v.Kind() { + case reflect.Bool: + return !v.Bool() + case reflect.Int32, reflect.Int64: + return v.Int() == 0 + case reflect.Uint32, reflect.Uint64: + return v.Uint() == 0 + case reflect.Float32, reflect.Float64: + return v.Float() == 0 + case reflect.String: + return v.String() == "" + } + return false +} + +const ( + // ProtoPackageIsVersion3 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion3 = true + + // ProtoPackageIsVersion2 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion2 = true + + // ProtoPackageIsVersion1 is referenced from generated protocol buffer files + // to assert that that code is compatible with this version of the proto package. + ProtoPackageIsVersion1 = true +) + +// InternalMessageInfo is a type used internally by generated .pb.go files. +// This type is not intended to be used by non-generated code. +// This type is not subject to any compatibility guarantee. +type InternalMessageInfo struct { + marshal *marshalInfo + unmarshal *unmarshalInfo + merge *mergeInfo + discard *discardInfo +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/message_set.go b/terraform-server/vendor/github.com/golang/protobuf/proto/message_set.go new file mode 100644 index 00000000..f48a7567 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/message_set.go @@ -0,0 +1,181 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Support for message sets. + */ + +import ( + "errors" +) + +// errNoMessageTypeID occurs when a protocol buffer does not have a message type ID. +// A message type ID is required for storing a protocol buffer in a message set. +var errNoMessageTypeID = errors.New("proto does not have a message type ID") + +// The first two types (_MessageSet_Item and messageSet) +// model what the protocol compiler produces for the following protocol message: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } +// That is the MessageSet wire format. We can't use a proto to generate these +// because that would introduce a circular dependency between it and this package. + +type _MessageSet_Item struct { + TypeId *int32 `protobuf:"varint,2,req,name=type_id"` + Message []byte `protobuf:"bytes,3,req,name=message"` +} + +type messageSet struct { + Item []*_MessageSet_Item `protobuf:"group,1,rep"` + XXX_unrecognized []byte + // TODO: caching? +} + +// Make sure messageSet is a Message. +var _ Message = (*messageSet)(nil) + +// messageTypeIder is an interface satisfied by a protocol buffer type +// that may be stored in a MessageSet. +type messageTypeIder interface { + MessageTypeId() int32 +} + +func (ms *messageSet) find(pb Message) *_MessageSet_Item { + mti, ok := pb.(messageTypeIder) + if !ok { + return nil + } + id := mti.MessageTypeId() + for _, item := range ms.Item { + if *item.TypeId == id { + return item + } + } + return nil +} + +func (ms *messageSet) Has(pb Message) bool { + return ms.find(pb) != nil +} + +func (ms *messageSet) Unmarshal(pb Message) error { + if item := ms.find(pb); item != nil { + return Unmarshal(item.Message, pb) + } + if _, ok := pb.(messageTypeIder); !ok { + return errNoMessageTypeID + } + return nil // TODO: return error instead? +} + +func (ms *messageSet) Marshal(pb Message) error { + msg, err := Marshal(pb) + if err != nil { + return err + } + if item := ms.find(pb); item != nil { + // reuse existing item + item.Message = msg + return nil + } + + mti, ok := pb.(messageTypeIder) + if !ok { + return errNoMessageTypeID + } + + mtid := mti.MessageTypeId() + ms.Item = append(ms.Item, &_MessageSet_Item{ + TypeId: &mtid, + Message: msg, + }) + return nil +} + +func (ms *messageSet) Reset() { *ms = messageSet{} } +func (ms *messageSet) String() string { return CompactTextString(ms) } +func (*messageSet) ProtoMessage() {} + +// Support for the message_set_wire_format message option. + +func skipVarint(buf []byte) []byte { + i := 0 + for ; buf[i]&0x80 != 0; i++ { + } + return buf[i+1:] +} + +// unmarshalMessageSet decodes the extension map encoded in buf in the message set wire format. +// It is called by Unmarshal methods on protocol buffer messages with the message_set_wire_format option. +func unmarshalMessageSet(buf []byte, exts interface{}) error { + var m map[int32]Extension + switch exts := exts.(type) { + case *XXX_InternalExtensions: + m = exts.extensionsWrite() + case map[int32]Extension: + m = exts + default: + return errors.New("proto: not an extension map") + } + + ms := new(messageSet) + if err := Unmarshal(buf, ms); err != nil { + return err + } + for _, item := range ms.Item { + id := *item.TypeId + msg := item.Message + + // Restore wire type and field number varint, plus length varint. + // Be careful to preserve duplicate items. + b := EncodeVarint(uint64(id)<<3 | WireBytes) + if ext, ok := m[id]; ok { + // Existing data; rip off the tag and length varint + // so we join the new data correctly. + // We can assume that ext.enc is set because we are unmarshaling. + o := ext.enc[len(b):] // skip wire type and field number + _, n := DecodeVarint(o) // calculate length of length varint + o = o[n:] // skip length varint + msg = append(o, msg...) // join old data and new data + } + b = append(b, EncodeVarint(uint64(len(msg)))...) + b = append(b, msg...) + + m[id] = Extension{enc: b} + } + return nil +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go b/terraform-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go new file mode 100644 index 00000000..94fa9194 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/pointer_reflect.go @@ -0,0 +1,360 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build purego appengine js + +// This file contains an implementation of proto field accesses using package reflect. +// It is slower than the code in pointer_unsafe.go but it avoids package unsafe and can +// be used on App Engine. + +package proto + +import ( + "reflect" + "sync" +) + +const unsafeAllowed = false + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by the sequence of field indices +// passed to reflect's FieldByIndex. +type field []int + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return f.Index +} + +// invalidField is an invalid field identifier. +var invalidField = field(nil) + +// zeroField is a noop when calling pointer.offset. +var zeroField = field([]int{}) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { return f != nil } + +// The pointer type is for the table-driven decoder. +// The implementation here uses a reflect.Value of pointer type to +// create a generic pointer. In pointer_unsafe.go we use unsafe +// instead of reflect to implement the same (but faster) interface. +type pointer struct { + v reflect.Value +} + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + return pointer{v: reflect.ValueOf(*i)} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) pointer { + v := reflect.ValueOf(*i) + u := reflect.New(v.Type()) + u.Elem().Set(v) + if deref { + u = u.Elem() + } + return pointer{v: u} +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{v: v} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + return pointer{v: p.v.Elem().FieldByIndex(f).Addr()} +} + +func (p pointer) isNil() bool { + return p.v.IsNil() +} + +// grow updates the slice s in place to make it one element longer. +// s must be addressable. +// Returns the (addressable) new element. +func grow(s reflect.Value) reflect.Value { + n, m := s.Len(), s.Cap() + if n < m { + s.SetLen(n + 1) + } else { + s.Set(reflect.Append(s, reflect.Zero(s.Type().Elem()))) + } + return s.Index(n) +} + +func (p pointer) toInt64() *int64 { + return p.v.Interface().(*int64) +} +func (p pointer) toInt64Ptr() **int64 { + return p.v.Interface().(**int64) +} +func (p pointer) toInt64Slice() *[]int64 { + return p.v.Interface().(*[]int64) +} + +var int32ptr = reflect.TypeOf((*int32)(nil)) + +func (p pointer) toInt32() *int32 { + return p.v.Convert(int32ptr).Interface().(*int32) +} + +// The toInt32Ptr/Slice methods don't work because of enums. +// Instead, we must use set/get methods for the int32ptr/slice case. +/* + func (p pointer) toInt32Ptr() **int32 { + return p.v.Interface().(**int32) +} + func (p pointer) toInt32Slice() *[]int32 { + return p.v.Interface().(*[]int32) +} +*/ +func (p pointer) getInt32Ptr() *int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().(*int32) + } + // an enum + return p.v.Elem().Convert(int32PtrType).Interface().(*int32) +} +func (p pointer) setInt32Ptr(v int32) { + // Allocate value in a *int32. Possibly convert that to a *enum. + // Then assign it to a **int32 or **enum. + // Note: we can convert *int32 to *enum, but we can't convert + // **int32 to **enum! + p.v.Elem().Set(reflect.ValueOf(&v).Convert(p.v.Type().Elem())) +} + +// getInt32Slice copies []int32 from p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getInt32Slice() []int32 { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + return p.v.Elem().Interface().([]int32) + } + // an enum + // Allocate a []int32, then assign []enum's values into it. + // Note: we can't convert []enum to []int32. + slice := p.v.Elem() + s := make([]int32, slice.Len()) + for i := 0; i < slice.Len(); i++ { + s[i] = int32(slice.Index(i).Int()) + } + return s +} + +// setInt32Slice copies []int32 into p as a new slice. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setInt32Slice(v []int32) { + if p.v.Type().Elem().Elem() == reflect.TypeOf(int32(0)) { + // raw int32 type + p.v.Elem().Set(reflect.ValueOf(v)) + return + } + // an enum + // Allocate a []enum, then assign []int32's values into it. + // Note: we can't convert []enum to []int32. + slice := reflect.MakeSlice(p.v.Type().Elem(), len(v), cap(v)) + for i, x := range v { + slice.Index(i).SetInt(int64(x)) + } + p.v.Elem().Set(slice) +} +func (p pointer) appendInt32Slice(v int32) { + grow(p.v.Elem()).SetInt(int64(v)) +} + +func (p pointer) toUint64() *uint64 { + return p.v.Interface().(*uint64) +} +func (p pointer) toUint64Ptr() **uint64 { + return p.v.Interface().(**uint64) +} +func (p pointer) toUint64Slice() *[]uint64 { + return p.v.Interface().(*[]uint64) +} +func (p pointer) toUint32() *uint32 { + return p.v.Interface().(*uint32) +} +func (p pointer) toUint32Ptr() **uint32 { + return p.v.Interface().(**uint32) +} +func (p pointer) toUint32Slice() *[]uint32 { + return p.v.Interface().(*[]uint32) +} +func (p pointer) toBool() *bool { + return p.v.Interface().(*bool) +} +func (p pointer) toBoolPtr() **bool { + return p.v.Interface().(**bool) +} +func (p pointer) toBoolSlice() *[]bool { + return p.v.Interface().(*[]bool) +} +func (p pointer) toFloat64() *float64 { + return p.v.Interface().(*float64) +} +func (p pointer) toFloat64Ptr() **float64 { + return p.v.Interface().(**float64) +} +func (p pointer) toFloat64Slice() *[]float64 { + return p.v.Interface().(*[]float64) +} +func (p pointer) toFloat32() *float32 { + return p.v.Interface().(*float32) +} +func (p pointer) toFloat32Ptr() **float32 { + return p.v.Interface().(**float32) +} +func (p pointer) toFloat32Slice() *[]float32 { + return p.v.Interface().(*[]float32) +} +func (p pointer) toString() *string { + return p.v.Interface().(*string) +} +func (p pointer) toStringPtr() **string { + return p.v.Interface().(**string) +} +func (p pointer) toStringSlice() *[]string { + return p.v.Interface().(*[]string) +} +func (p pointer) toBytes() *[]byte { + return p.v.Interface().(*[]byte) +} +func (p pointer) toBytesSlice() *[][]byte { + return p.v.Interface().(*[][]byte) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return p.v.Interface().(*XXX_InternalExtensions) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return p.v.Interface().(*map[int32]Extension) +} +func (p pointer) getPointer() pointer { + return pointer{v: p.v.Elem()} +} +func (p pointer) setPointer(q pointer) { + p.v.Elem().Set(q.v) +} +func (p pointer) appendPointer(q pointer) { + grow(p.v.Elem()).Set(q.v) +} + +// getPointerSlice copies []*T from p as a new []pointer. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) getPointerSlice() []pointer { + if p.v.IsNil() { + return nil + } + n := p.v.Elem().Len() + s := make([]pointer, n) + for i := 0; i < n; i++ { + s[i] = pointer{v: p.v.Elem().Index(i)} + } + return s +} + +// setPointerSlice copies []pointer into p as a new []*T. +// This behavior differs from the implementation in pointer_unsafe.go. +func (p pointer) setPointerSlice(v []pointer) { + if v == nil { + p.v.Elem().Set(reflect.New(p.v.Elem().Type()).Elem()) + return + } + s := reflect.MakeSlice(p.v.Elem().Type(), 0, len(v)) + for _, p := range v { + s = reflect.Append(s, p.v) + } + p.v.Elem().Set(s) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + if p.v.Elem().IsNil() { + return pointer{v: p.v.Elem()} + } + return pointer{v: p.v.Elem().Elem().Elem().Field(0).Addr()} // *interface -> interface -> *struct -> struct +} + +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + // TODO: check that p.v.Type().Elem() == t? + return p.v +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + atomicLock.Lock() + defer atomicLock.Unlock() + return *p +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomicLock.Lock() + defer atomicLock.Unlock() + *p = v +} + +var atomicLock sync.Mutex diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go b/terraform-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go new file mode 100644 index 00000000..dbfffe07 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/pointer_unsafe.go @@ -0,0 +1,313 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2012 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build !purego,!appengine,!js + +// This file contains the implementation of the proto field accesses using package unsafe. + +package proto + +import ( + "reflect" + "sync/atomic" + "unsafe" +) + +const unsafeAllowed = true + +// A field identifies a field in a struct, accessible from a pointer. +// In this implementation, a field is identified by its byte offset from the start of the struct. +type field uintptr + +// toField returns a field equivalent to the given reflect field. +func toField(f *reflect.StructField) field { + return field(f.Offset) +} + +// invalidField is an invalid field identifier. +const invalidField = ^field(0) + +// zeroField is a noop when calling pointer.offset. +const zeroField = field(0) + +// IsValid reports whether the field identifier is valid. +func (f field) IsValid() bool { + return f != invalidField +} + +// The pointer type below is for the new table-driven encoder/decoder. +// The implementation here uses unsafe.Pointer to create a generic pointer. +// In pointer_reflect.go we use reflect instead of unsafe to implement +// the same (but slower) interface. +type pointer struct { + p unsafe.Pointer +} + +// size of pointer +var ptrSize = unsafe.Sizeof(uintptr(0)) + +// toPointer converts an interface of pointer type to a pointer +// that points to the same target. +func toPointer(i *Message) pointer { + // Super-tricky - read pointer out of data word of interface value. + // Saves ~25ns over the equivalent: + // return valToPointer(reflect.ValueOf(*i)) + return pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} +} + +// toAddrPointer converts an interface to a pointer that points to +// the interface data. +func toAddrPointer(i *interface{}, isptr, deref bool) (p pointer) { + // Super-tricky - read or get the address of data word of interface value. + if isptr { + // The interface is of pointer type, thus it is a direct interface. + // The data word is the pointer data itself. We take its address. + p = pointer{p: unsafe.Pointer(uintptr(unsafe.Pointer(i)) + ptrSize)} + } else { + // The interface is not of pointer type. The data word is the pointer + // to the data. + p = pointer{p: (*[2]unsafe.Pointer)(unsafe.Pointer(i))[1]} + } + if deref { + p.p = *(*unsafe.Pointer)(p.p) + } + return p +} + +// valToPointer converts v to a pointer. v must be of pointer type. +func valToPointer(v reflect.Value) pointer { + return pointer{p: unsafe.Pointer(v.Pointer())} +} + +// offset converts from a pointer to a structure to a pointer to +// one of its fields. +func (p pointer) offset(f field) pointer { + // For safety, we should panic if !f.IsValid, however calling panic causes + // this to no longer be inlineable, which is a serious performance cost. + /* + if !f.IsValid() { + panic("invalid field") + } + */ + return pointer{p: unsafe.Pointer(uintptr(p.p) + uintptr(f))} +} + +func (p pointer) isNil() bool { + return p.p == nil +} + +func (p pointer) toInt64() *int64 { + return (*int64)(p.p) +} +func (p pointer) toInt64Ptr() **int64 { + return (**int64)(p.p) +} +func (p pointer) toInt64Slice() *[]int64 { + return (*[]int64)(p.p) +} +func (p pointer) toInt32() *int32 { + return (*int32)(p.p) +} + +// See pointer_reflect.go for why toInt32Ptr/Slice doesn't exist. +/* + func (p pointer) toInt32Ptr() **int32 { + return (**int32)(p.p) + } + func (p pointer) toInt32Slice() *[]int32 { + return (*[]int32)(p.p) + } +*/ +func (p pointer) getInt32Ptr() *int32 { + return *(**int32)(p.p) +} +func (p pointer) setInt32Ptr(v int32) { + *(**int32)(p.p) = &v +} + +// getInt32Slice loads a []int32 from p. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getInt32Slice() []int32 { + return *(*[]int32)(p.p) +} + +// setInt32Slice stores a []int32 to p. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setInt32Slice(v []int32) { + *(*[]int32)(p.p) = v +} + +// TODO: Can we get rid of appendInt32Slice and use setInt32Slice instead? +func (p pointer) appendInt32Slice(v int32) { + s := (*[]int32)(p.p) + *s = append(*s, v) +} + +func (p pointer) toUint64() *uint64 { + return (*uint64)(p.p) +} +func (p pointer) toUint64Ptr() **uint64 { + return (**uint64)(p.p) +} +func (p pointer) toUint64Slice() *[]uint64 { + return (*[]uint64)(p.p) +} +func (p pointer) toUint32() *uint32 { + return (*uint32)(p.p) +} +func (p pointer) toUint32Ptr() **uint32 { + return (**uint32)(p.p) +} +func (p pointer) toUint32Slice() *[]uint32 { + return (*[]uint32)(p.p) +} +func (p pointer) toBool() *bool { + return (*bool)(p.p) +} +func (p pointer) toBoolPtr() **bool { + return (**bool)(p.p) +} +func (p pointer) toBoolSlice() *[]bool { + return (*[]bool)(p.p) +} +func (p pointer) toFloat64() *float64 { + return (*float64)(p.p) +} +func (p pointer) toFloat64Ptr() **float64 { + return (**float64)(p.p) +} +func (p pointer) toFloat64Slice() *[]float64 { + return (*[]float64)(p.p) +} +func (p pointer) toFloat32() *float32 { + return (*float32)(p.p) +} +func (p pointer) toFloat32Ptr() **float32 { + return (**float32)(p.p) +} +func (p pointer) toFloat32Slice() *[]float32 { + return (*[]float32)(p.p) +} +func (p pointer) toString() *string { + return (*string)(p.p) +} +func (p pointer) toStringPtr() **string { + return (**string)(p.p) +} +func (p pointer) toStringSlice() *[]string { + return (*[]string)(p.p) +} +func (p pointer) toBytes() *[]byte { + return (*[]byte)(p.p) +} +func (p pointer) toBytesSlice() *[][]byte { + return (*[][]byte)(p.p) +} +func (p pointer) toExtensions() *XXX_InternalExtensions { + return (*XXX_InternalExtensions)(p.p) +} +func (p pointer) toOldExtensions() *map[int32]Extension { + return (*map[int32]Extension)(p.p) +} + +// getPointerSlice loads []*T from p as a []pointer. +// The value returned is aliased with the original slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) getPointerSlice() []pointer { + // Super-tricky - p should point to a []*T where T is a + // message type. We load it as []pointer. + return *(*[]pointer)(p.p) +} + +// setPointerSlice stores []pointer into p as a []*T. +// The value set is aliased with the input slice. +// This behavior differs from the implementation in pointer_reflect.go. +func (p pointer) setPointerSlice(v []pointer) { + // Super-tricky - p should point to a []*T where T is a + // message type. We store it as []pointer. + *(*[]pointer)(p.p) = v +} + +// getPointer loads the pointer at p and returns it. +func (p pointer) getPointer() pointer { + return pointer{p: *(*unsafe.Pointer)(p.p)} +} + +// setPointer stores the pointer q at p. +func (p pointer) setPointer(q pointer) { + *(*unsafe.Pointer)(p.p) = q.p +} + +// append q to the slice pointed to by p. +func (p pointer) appendPointer(q pointer) { + s := (*[]unsafe.Pointer)(p.p) + *s = append(*s, q.p) +} + +// getInterfacePointer returns a pointer that points to the +// interface data of the interface pointed by p. +func (p pointer) getInterfacePointer() pointer { + // Super-tricky - read pointer out of data word of interface value. + return pointer{p: (*(*[2]unsafe.Pointer)(p.p))[1]} +} + +// asPointerTo returns a reflect.Value that is a pointer to an +// object of type t stored at p. +func (p pointer) asPointerTo(t reflect.Type) reflect.Value { + return reflect.NewAt(t, p.p) +} + +func atomicLoadUnmarshalInfo(p **unmarshalInfo) *unmarshalInfo { + return (*unmarshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreUnmarshalInfo(p **unmarshalInfo, v *unmarshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMarshalInfo(p **marshalInfo) *marshalInfo { + return (*marshalInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMarshalInfo(p **marshalInfo, v *marshalInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadMergeInfo(p **mergeInfo) *mergeInfo { + return (*mergeInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreMergeInfo(p **mergeInfo, v *mergeInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} +func atomicLoadDiscardInfo(p **discardInfo) *discardInfo { + return (*discardInfo)(atomic.LoadPointer((*unsafe.Pointer)(unsafe.Pointer(p)))) +} +func atomicStoreDiscardInfo(p **discardInfo, v *discardInfo) { + atomic.StorePointer((*unsafe.Pointer)(unsafe.Pointer(p)), unsafe.Pointer(v)) +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/properties.go b/terraform-server/vendor/github.com/golang/protobuf/proto/properties.go new file mode 100644 index 00000000..a4b8c0cd --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/properties.go @@ -0,0 +1,544 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +/* + * Routines for encoding data into the wire format for protocol buffers. + */ + +import ( + "fmt" + "log" + "reflect" + "sort" + "strconv" + "strings" + "sync" +) + +const debug bool = false + +// Constants that identify the encoding of a value on the wire. +const ( + WireVarint = 0 + WireFixed64 = 1 + WireBytes = 2 + WireStartGroup = 3 + WireEndGroup = 4 + WireFixed32 = 5 +) + +// tagMap is an optimization over map[int]int for typical protocol buffer +// use-cases. Encoded protocol buffers are often in tag order with small tag +// numbers. +type tagMap struct { + fastTags []int + slowTags map[int]int +} + +// tagMapFastLimit is the upper bound on the tag number that will be stored in +// the tagMap slice rather than its map. +const tagMapFastLimit = 1024 + +func (p *tagMap) get(t int) (int, bool) { + if t > 0 && t < tagMapFastLimit { + if t >= len(p.fastTags) { + return 0, false + } + fi := p.fastTags[t] + return fi, fi >= 0 + } + fi, ok := p.slowTags[t] + return fi, ok +} + +func (p *tagMap) put(t int, fi int) { + if t > 0 && t < tagMapFastLimit { + for len(p.fastTags) < t+1 { + p.fastTags = append(p.fastTags, -1) + } + p.fastTags[t] = fi + return + } + if p.slowTags == nil { + p.slowTags = make(map[int]int) + } + p.slowTags[t] = fi +} + +// StructProperties represents properties for all the fields of a struct. +// decoderTags and decoderOrigNames should only be used by the decoder. +type StructProperties struct { + Prop []*Properties // properties for each field + reqCount int // required count + decoderTags tagMap // map from proto tag to struct field number + decoderOrigNames map[string]int // map from original name to struct field number + order []int // list of struct field numbers in tag order + + // OneofTypes contains information about the oneof fields in this message. + // It is keyed by the original name of a field. + OneofTypes map[string]*OneofProperties +} + +// OneofProperties represents information about a specific field in a oneof. +type OneofProperties struct { + Type reflect.Type // pointer to generated struct type for this oneof field + Field int // struct field number of the containing oneof in the message + Prop *Properties +} + +// Implement the sorting interface so we can sort the fields in tag order, as recommended by the spec. +// See encode.go, (*Buffer).enc_struct. + +func (sp *StructProperties) Len() int { return len(sp.order) } +func (sp *StructProperties) Less(i, j int) bool { + return sp.Prop[sp.order[i]].Tag < sp.Prop[sp.order[j]].Tag +} +func (sp *StructProperties) Swap(i, j int) { sp.order[i], sp.order[j] = sp.order[j], sp.order[i] } + +// Properties represents the protocol-specific behavior of a single struct field. +type Properties struct { + Name string // name of the field, for error messages + OrigName string // original name before protocol compiler (always set) + JSONName string // name to use for JSON; determined by protoc + Wire string + WireType int + Tag int + Required bool + Optional bool + Repeated bool + Packed bool // relevant for repeated primitives only + Enum string // set for enum types only + proto3 bool // whether this is known to be a proto3 field + oneof bool // whether this is a oneof field + + Default string // default value + HasDefault bool // whether an explicit default was provided + + stype reflect.Type // set for struct types only + sprop *StructProperties // set for struct types only + + mtype reflect.Type // set for map types only + MapKeyProp *Properties // set for map types only + MapValProp *Properties // set for map types only +} + +// String formats the properties in the protobuf struct field tag style. +func (p *Properties) String() string { + s := p.Wire + s += "," + s += strconv.Itoa(p.Tag) + if p.Required { + s += ",req" + } + if p.Optional { + s += ",opt" + } + if p.Repeated { + s += ",rep" + } + if p.Packed { + s += ",packed" + } + s += ",name=" + p.OrigName + if p.JSONName != p.OrigName { + s += ",json=" + p.JSONName + } + if p.proto3 { + s += ",proto3" + } + if p.oneof { + s += ",oneof" + } + if len(p.Enum) > 0 { + s += ",enum=" + p.Enum + } + if p.HasDefault { + s += ",def=" + p.Default + } + return s +} + +// Parse populates p by parsing a string in the protobuf struct field tag style. +func (p *Properties) Parse(s string) { + // "bytes,49,opt,name=foo,def=hello!" + fields := strings.Split(s, ",") // breaks def=, but handled below. + if len(fields) < 2 { + log.Printf("proto: tag has too few fields: %q", s) + return + } + + p.Wire = fields[0] + switch p.Wire { + case "varint": + p.WireType = WireVarint + case "fixed32": + p.WireType = WireFixed32 + case "fixed64": + p.WireType = WireFixed64 + case "zigzag32": + p.WireType = WireVarint + case "zigzag64": + p.WireType = WireVarint + case "bytes", "group": + p.WireType = WireBytes + // no numeric converter for non-numeric types + default: + log.Printf("proto: tag has unknown wire type: %q", s) + return + } + + var err error + p.Tag, err = strconv.Atoi(fields[1]) + if err != nil { + return + } + +outer: + for i := 2; i < len(fields); i++ { + f := fields[i] + switch { + case f == "req": + p.Required = true + case f == "opt": + p.Optional = true + case f == "rep": + p.Repeated = true + case f == "packed": + p.Packed = true + case strings.HasPrefix(f, "name="): + p.OrigName = f[5:] + case strings.HasPrefix(f, "json="): + p.JSONName = f[5:] + case strings.HasPrefix(f, "enum="): + p.Enum = f[5:] + case f == "proto3": + p.proto3 = true + case f == "oneof": + p.oneof = true + case strings.HasPrefix(f, "def="): + p.HasDefault = true + p.Default = f[4:] // rest of string + if i+1 < len(fields) { + // Commas aren't escaped, and def is always last. + p.Default += "," + strings.Join(fields[i+1:], ",") + break outer + } + } + } +} + +var protoMessageType = reflect.TypeOf((*Message)(nil)).Elem() + +// setFieldProps initializes the field properties for submessages and maps. +func (p *Properties) setFieldProps(typ reflect.Type, f *reflect.StructField, lockGetProp bool) { + switch t1 := typ; t1.Kind() { + case reflect.Ptr: + if t1.Elem().Kind() == reflect.Struct { + p.stype = t1.Elem() + } + + case reflect.Slice: + if t2 := t1.Elem(); t2.Kind() == reflect.Ptr && t2.Elem().Kind() == reflect.Struct { + p.stype = t2.Elem() + } + + case reflect.Map: + p.mtype = t1 + p.MapKeyProp = &Properties{} + p.MapKeyProp.init(reflect.PtrTo(p.mtype.Key()), "Key", f.Tag.Get("protobuf_key"), nil, lockGetProp) + p.MapValProp = &Properties{} + vtype := p.mtype.Elem() + if vtype.Kind() != reflect.Ptr && vtype.Kind() != reflect.Slice { + // The value type is not a message (*T) or bytes ([]byte), + // so we need encoders for the pointer to this type. + vtype = reflect.PtrTo(vtype) + } + p.MapValProp.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) + } + + if p.stype != nil { + if lockGetProp { + p.sprop = GetProperties(p.stype) + } else { + p.sprop = getPropertiesLocked(p.stype) + } + } +} + +var ( + marshalerType = reflect.TypeOf((*Marshaler)(nil)).Elem() +) + +// Init populates the properties from a protocol buffer struct tag. +func (p *Properties) Init(typ reflect.Type, name, tag string, f *reflect.StructField) { + p.init(typ, name, tag, f, true) +} + +func (p *Properties) init(typ reflect.Type, name, tag string, f *reflect.StructField, lockGetProp bool) { + // "bytes,49,opt,def=hello!" + p.Name = name + p.OrigName = name + if tag == "" { + return + } + p.Parse(tag) + p.setFieldProps(typ, f, lockGetProp) +} + +var ( + propertiesMu sync.RWMutex + propertiesMap = make(map[reflect.Type]*StructProperties) +) + +// GetProperties returns the list of properties for the type represented by t. +// t must represent a generated struct type of a protocol message. +func GetProperties(t reflect.Type) *StructProperties { + if t.Kind() != reflect.Struct { + panic("proto: type must have kind struct") + } + + // Most calls to GetProperties in a long-running program will be + // retrieving details for types we have seen before. + propertiesMu.RLock() + sprop, ok := propertiesMap[t] + propertiesMu.RUnlock() + if ok { + return sprop + } + + propertiesMu.Lock() + sprop = getPropertiesLocked(t) + propertiesMu.Unlock() + return sprop +} + +type ( + oneofFuncsIface interface { + XXX_OneofFuncs() (func(Message, *Buffer) error, func(Message, int, int, *Buffer) (bool, error), func(Message) int, []interface{}) + } + oneofWrappersIface interface { + XXX_OneofWrappers() []interface{} + } +) + +// getPropertiesLocked requires that propertiesMu is held. +func getPropertiesLocked(t reflect.Type) *StructProperties { + if prop, ok := propertiesMap[t]; ok { + return prop + } + + prop := new(StructProperties) + // in case of recursive protos, fill this in now. + propertiesMap[t] = prop + + // build properties + prop.Prop = make([]*Properties, t.NumField()) + prop.order = make([]int, t.NumField()) + + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + p := new(Properties) + name := f.Name + p.init(f.Type, name, f.Tag.Get("protobuf"), &f, false) + + oneof := f.Tag.Get("protobuf_oneof") // special case + if oneof != "" { + // Oneof fields don't use the traditional protobuf tag. + p.OrigName = oneof + } + prop.Prop[i] = p + prop.order[i] = i + if debug { + print(i, " ", f.Name, " ", t.String(), " ") + if p.Tag > 0 { + print(p.String()) + } + print("\n") + } + } + + // Re-order prop.order. + sort.Sort(prop) + + var oots []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oots = m.XXX_OneofFuncs() + case oneofWrappersIface: + oots = m.XXX_OneofWrappers() + } + if len(oots) > 0 { + // Interpret oneof metadata. + prop.OneofTypes = make(map[string]*OneofProperties) + for _, oot := range oots { + oop := &OneofProperties{ + Type: reflect.ValueOf(oot).Type(), // *T + Prop: new(Properties), + } + sft := oop.Type.Elem().Field(0) + oop.Prop.Name = sft.Name + oop.Prop.Parse(sft.Tag.Get("protobuf")) + // There will be exactly one interface field that + // this new value is assignable to. + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if f.Type.Kind() != reflect.Interface { + continue + } + if !oop.Type.AssignableTo(f.Type) { + continue + } + oop.Field = i + break + } + prop.OneofTypes[oop.Prop.OrigName] = oop + } + } + + // build required counts + // build tags + reqCount := 0 + prop.decoderOrigNames = make(map[string]int) + for i, p := range prop.Prop { + if strings.HasPrefix(p.Name, "XXX_") { + // Internal fields should not appear in tags/origNames maps. + // They are handled specially when encoding and decoding. + continue + } + if p.Required { + reqCount++ + } + prop.decoderTags.put(p.Tag, i) + prop.decoderOrigNames[p.OrigName] = i + } + prop.reqCount = reqCount + + return prop +} + +// A global registry of enum types. +// The generated code will register the generated maps by calling RegisterEnum. + +var enumValueMaps = make(map[string]map[string]int32) + +// RegisterEnum is called from the generated code to install the enum descriptor +// maps into the global table to aid parsing text format protocol buffers. +func RegisterEnum(typeName string, unusedNameMap map[int32]string, valueMap map[string]int32) { + if _, ok := enumValueMaps[typeName]; ok { + panic("proto: duplicate enum registered: " + typeName) + } + enumValueMaps[typeName] = valueMap +} + +// EnumValueMap returns the mapping from names to integers of the +// enum type enumType, or a nil if not found. +func EnumValueMap(enumType string) map[string]int32 { + return enumValueMaps[enumType] +} + +// A registry of all linked message types. +// The string is a fully-qualified proto name ("pkg.Message"). +var ( + protoTypedNils = make(map[string]Message) // a map from proto names to typed nil pointers + protoMapTypes = make(map[string]reflect.Type) // a map from proto names to map types + revProtoTypes = make(map[reflect.Type]string) +) + +// RegisterType is called from generated code and maps from the fully qualified +// proto name to the type (pointer to struct) of the protocol buffer. +func RegisterType(x Message, name string) { + if _, ok := protoTypedNils[name]; ok { + // TODO: Some day, make this a panic. + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + if v := reflect.ValueOf(x); v.Kind() == reflect.Ptr && v.Pointer() == 0 { + // Generated code always calls RegisterType with nil x. + // This check is just for extra safety. + protoTypedNils[name] = x + } else { + protoTypedNils[name] = reflect.Zero(t).Interface().(Message) + } + revProtoTypes[t] = name +} + +// RegisterMapType is called from generated code and maps from the fully qualified +// proto name to the native map type of the proto map definition. +func RegisterMapType(x interface{}, name string) { + if reflect.TypeOf(x).Kind() != reflect.Map { + panic(fmt.Sprintf("RegisterMapType(%T, %q); want map", x, name)) + } + if _, ok := protoMapTypes[name]; ok { + log.Printf("proto: duplicate proto type registered: %s", name) + return + } + t := reflect.TypeOf(x) + protoMapTypes[name] = t + revProtoTypes[t] = name +} + +// MessageName returns the fully-qualified proto name for the given message type. +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} + +// MessageType returns the message type (pointer to struct) for a named message. +// The type is not guaranteed to implement proto.Message if the name refers to a +// map entry. +func MessageType(name string) reflect.Type { + if t, ok := protoTypedNils[name]; ok { + return reflect.TypeOf(t) + } + return protoMapTypes[name] +} + +// A registry of all linked proto files. +var ( + protoFiles = make(map[string][]byte) // file name => fileDescriptor +) + +// RegisterFile is called from generated code and maps from the +// full file name of a .proto file to its compressed FileDescriptorProto. +func RegisterFile(filename string, fileDescriptor []byte) { + protoFiles[filename] = fileDescriptor +} + +// FileDescriptor returns the compressed FileDescriptorProto for a .proto file. +func FileDescriptor(filename string) []byte { return protoFiles[filename] } diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/table_marshal.go b/terraform-server/vendor/github.com/golang/protobuf/proto/table_marshal.go new file mode 100644 index 00000000..5cb11fa9 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/table_marshal.go @@ -0,0 +1,2776 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "math" + "reflect" + "sort" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// a sizer takes a pointer to a field and the size of its tag, computes the size of +// the encoded data. +type sizer func(pointer, int) int + +// a marshaler takes a byte slice, a pointer to a field, and its tag (in wire format), +// marshals the field to the end of the slice, returns the slice and error (if any). +type marshaler func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) + +// marshalInfo is the information used for marshaling a message. +type marshalInfo struct { + typ reflect.Type + fields []*marshalFieldInfo + unrecognized field // offset of XXX_unrecognized + extensions field // offset of XXX_InternalExtensions + v1extensions field // offset of XXX_extensions + sizecache field // offset of XXX_sizecache + initialized int32 // 0 -- only typ is set, 1 -- fully initialized + messageset bool // uses message set wire format + hasmarshaler bool // has custom marshaler + sync.RWMutex // protect extElems map, also for initialization + extElems map[int32]*marshalElemInfo // info of extension elements +} + +// marshalFieldInfo is the information used for marshaling a field of a message. +type marshalFieldInfo struct { + field field + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isPointer bool + required bool // field is required + name string // name of the field, for error reporting + oneofElems map[reflect.Type]*marshalElemInfo // info of oneof elements +} + +// marshalElemInfo is the information used for marshaling an extension or oneof element. +type marshalElemInfo struct { + wiretag uint64 // tag in wire format + tagsize int // size of tag in wire format + sizer sizer + marshaler marshaler + isptr bool // elem is pointer typed, thus interface of this type is a direct interface (extension only) + deref bool // dereference the pointer before operating on it; implies isptr +} + +var ( + marshalInfoMap = map[reflect.Type]*marshalInfo{} + marshalInfoLock sync.Mutex +) + +// getMarshalInfo returns the information to marshal a given type of message. +// The info it returns may not necessarily initialized. +// t is the type of the message (NOT the pointer to it). +func getMarshalInfo(t reflect.Type) *marshalInfo { + marshalInfoLock.Lock() + u, ok := marshalInfoMap[t] + if !ok { + u = &marshalInfo{typ: t} + marshalInfoMap[t] = u + } + marshalInfoLock.Unlock() + return u +} + +// Size is the entry point from generated code, +// and should be ONLY called by generated code. +// It computes the size of encoded data of msg. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Size(msg Message) int { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return 0 + } + return u.size(ptr) +} + +// Marshal is the entry point from generated code, +// and should be ONLY called by generated code. +// It marshals msg to the end of b. +// a is a pointer to a place to store cached marshal info. +func (a *InternalMessageInfo) Marshal(b []byte, msg Message, deterministic bool) ([]byte, error) { + u := getMessageMarshalInfo(msg, a) + ptr := toPointer(&msg) + if ptr.isNil() { + // We get here if msg is a typed nil ((*SomeMessage)(nil)), + // so it satisfies the interface, and msg == nil wouldn't + // catch it. We don't want crash in this case. + return b, ErrNil + } + return u.marshal(b, ptr, deterministic) +} + +func getMessageMarshalInfo(msg interface{}, a *InternalMessageInfo) *marshalInfo { + // u := a.marshal, but atomically. + // We use an atomic here to ensure memory consistency. + u := atomicLoadMarshalInfo(&a.marshal) + if u == nil { + // Get marshal information from type of message. + t := reflect.ValueOf(msg).Type() + if t.Kind() != reflect.Ptr { + panic(fmt.Sprintf("cannot handle non-pointer message type %v", t)) + } + u = getMarshalInfo(t.Elem()) + // Store it in the cache for later users. + // a.marshal = u, but atomically. + atomicStoreMarshalInfo(&a.marshal, u) + } + return u +} + +// size is the main function to compute the size of the encoded data of a message. +// ptr is the pointer to the message. +func (u *marshalInfo) size(ptr pointer) int { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b, _ := m.Marshal() + return len(b) + } + + n := 0 + for _, f := range u.fields { + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + n += f.sizer(ptr.offset(f.field), f.tagsize) + } + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + n += u.sizeMessageSet(e) + } else { + n += u.sizeExtensions(e) + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + n += u.sizeV1Extensions(m) + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + n += len(s) + } + // cache the result for use in marshal + if u.sizecache.IsValid() { + atomic.StoreInt32(ptr.offset(u.sizecache).toInt32(), int32(n)) + } + return n +} + +// cachedsize gets the size from cache. If there is no cache (i.e. message is not generated), +// fall back to compute the size. +func (u *marshalInfo) cachedsize(ptr pointer) int { + if u.sizecache.IsValid() { + return int(atomic.LoadInt32(ptr.offset(u.sizecache).toInt32())) + } + return u.size(ptr) +} + +// marshal is the main function to marshal a message. It takes a byte slice and appends +// the encoded data to the end of the slice, returns the slice and error (if any). +// ptr is the pointer to the message. +// If deterministic is true, map is marshaled in deterministic order. +func (u *marshalInfo) marshal(b []byte, ptr pointer, deterministic bool) ([]byte, error) { + if atomic.LoadInt32(&u.initialized) == 0 { + u.computeMarshalInfo() + } + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if u.hasmarshaler { + m := ptr.asPointerTo(u.typ).Interface().(Marshaler) + b1, err := m.Marshal() + b = append(b, b1...) + return b, err + } + + var err, errLater error + // The old marshaler encodes extensions at beginning. + if u.extensions.IsValid() { + e := ptr.offset(u.extensions).toExtensions() + if u.messageset { + b, err = u.appendMessageSet(b, e, deterministic) + } else { + b, err = u.appendExtensions(b, e, deterministic) + } + if err != nil { + return b, err + } + } + if u.v1extensions.IsValid() { + m := *ptr.offset(u.v1extensions).toOldExtensions() + b, err = u.appendV1Extensions(b, m, deterministic) + if err != nil { + return b, err + } + } + for _, f := range u.fields { + if f.required { + if ptr.offset(f.field).getPointer().isNil() { + // Required field is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name} + } + continue + } + } + if f.isPointer && ptr.offset(f.field).getPointer().isNil() { + // nil pointer always marshals to nothing + continue + } + b, err = f.marshaler(b, ptr.offset(f.field), f.wiretag, deterministic) + if err != nil { + if err1, ok := err.(*RequiredNotSetError); ok { + // Required field in submessage is not set. + // We record the error but keep going, to give a complete marshaling. + if errLater == nil { + errLater = &RequiredNotSetError{f.name + "." + err1.field} + } + continue + } + if err == errRepeatedHasNil { + err = errors.New("proto: repeated field " + f.name + " has nil element") + } + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return b, err + } + } + if u.unrecognized.IsValid() { + s := *ptr.offset(u.unrecognized).toBytes() + b = append(b, s...) + } + return b, errLater +} + +// computeMarshalInfo initializes the marshal info. +func (u *marshalInfo) computeMarshalInfo() { + u.Lock() + defer u.Unlock() + if u.initialized != 0 { // non-atomic read is ok as it is protected by the lock + return + } + + t := u.typ + u.unrecognized = invalidField + u.extensions = invalidField + u.v1extensions = invalidField + u.sizecache = invalidField + + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + if reflect.PtrTo(t).Implements(marshalerType) { + u.hasmarshaler = true + atomic.StoreInt32(&u.initialized, 1) + return + } + + // get oneof implementers + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + + n := t.NumField() + + // deal with XXX fields first + for i := 0; i < t.NumField(); i++ { + f := t.Field(i) + if !strings.HasPrefix(f.Name, "XXX_") { + continue + } + switch f.Name { + case "XXX_sizecache": + u.sizecache = toField(&f) + case "XXX_unrecognized": + u.unrecognized = toField(&f) + case "XXX_InternalExtensions": + u.extensions = toField(&f) + u.messageset = f.Tag.Get("protobuf_messageset") == "1" + case "XXX_extensions": + u.v1extensions = toField(&f) + case "XXX_NoUnkeyedLiteral": + // nothing to do + default: + panic("unknown XXX field: " + f.Name) + } + n-- + } + + // normal fields + fields := make([]marshalFieldInfo, n) // batch allocation + u.fields = make([]*marshalFieldInfo, 0, n) + for i, j := 0, 0; i < t.NumField(); i++ { + f := t.Field(i) + + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + field := &fields[j] + j++ + field.name = f.Name + u.fields = append(u.fields, field) + if f.Tag.Get("protobuf_oneof") != "" { + field.computeOneofFieldInfo(&f, oneofImplementers) + continue + } + if f.Tag.Get("protobuf") == "" { + // field has no tag (not in generated message), ignore it + u.fields = u.fields[:len(u.fields)-1] + j-- + continue + } + field.computeMarshalFieldInfo(&f) + } + + // fields are marshaled in tag order on the wire. + sort.Sort(byTag(u.fields)) + + atomic.StoreInt32(&u.initialized, 1) +} + +// helper for sorting fields by tag +type byTag []*marshalFieldInfo + +func (a byTag) Len() int { return len(a) } +func (a byTag) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byTag) Less(i, j int) bool { return a[i].wiretag < a[j].wiretag } + +// getExtElemInfo returns the information to marshal an extension element. +// The info it returns is initialized. +func (u *marshalInfo) getExtElemInfo(desc *ExtensionDesc) *marshalElemInfo { + // get from cache first + u.RLock() + e, ok := u.extElems[desc.Field] + u.RUnlock() + if ok { + return e + } + + t := reflect.TypeOf(desc.ExtensionType) // pointer or slice to basic type or struct + tags := strings.Split(desc.Tag, ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if t.Kind() == reflect.Ptr && t.Elem().Kind() != reflect.Struct { + t = t.Elem() + } + sizer, marshaler := typeMarshaler(t, tags, false, false) + var deref bool + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + t = reflect.PtrTo(t) + deref = true + } + e = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + isptr: t.Kind() == reflect.Ptr, + deref: deref, + } + + // update cache + u.Lock() + if u.extElems == nil { + u.extElems = make(map[int32]*marshalElemInfo) + } + u.extElems[desc.Field] = e + u.Unlock() + return e +} + +// computeMarshalFieldInfo fills up the information to marshal a field. +func (fi *marshalFieldInfo) computeMarshalFieldInfo(f *reflect.StructField) { + // parse protobuf tag of the field. + // tag has format of "bytes,49,opt,name=foo,def=hello!" + tags := strings.Split(f.Tag.Get("protobuf"), ",") + if tags[0] == "" { + return + } + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + if tags[2] == "req" { + fi.required = true + } + fi.setTag(f, tag, wt) + fi.setMarshaler(f, tags) +} + +func (fi *marshalFieldInfo) computeOneofFieldInfo(f *reflect.StructField, oneofImplementers []interface{}) { + fi.field = toField(f) + fi.wiretag = math.MaxInt32 // Use a large tag number, make oneofs sorted at the end. This tag will not appear on the wire. + fi.isPointer = true + fi.sizer, fi.marshaler = makeOneOfMarshaler(fi, f) + fi.oneofElems = make(map[reflect.Type]*marshalElemInfo) + + ityp := f.Type // interface type + for _, o := range oneofImplementers { + t := reflect.TypeOf(o) + if !t.Implements(ityp) { + continue + } + sf := t.Elem().Field(0) // oneof implementer is a struct with a single field + tags := strings.Split(sf.Tag.Get("protobuf"), ",") + tag, err := strconv.Atoi(tags[1]) + if err != nil { + panic("tag is not an integer") + } + wt := wiretype(tags[0]) + sizer, marshaler := typeMarshaler(sf.Type, tags, false, true) // oneof should not omit any zero value + fi.oneofElems[t.Elem()] = &marshalElemInfo{ + wiretag: uint64(tag)<<3 | wt, + tagsize: SizeVarint(uint64(tag) << 3), + sizer: sizer, + marshaler: marshaler, + } + } +} + +// wiretype returns the wire encoding of the type. +func wiretype(encoding string) uint64 { + switch encoding { + case "fixed32": + return WireFixed32 + case "fixed64": + return WireFixed64 + case "varint", "zigzag32", "zigzag64": + return WireVarint + case "bytes": + return WireBytes + case "group": + return WireStartGroup + } + panic("unknown wire type " + encoding) +} + +// setTag fills up the tag (in wire format) and its size in the info of a field. +func (fi *marshalFieldInfo) setTag(f *reflect.StructField, tag int, wt uint64) { + fi.field = toField(f) + fi.wiretag = uint64(tag)<<3 | wt + fi.tagsize = SizeVarint(uint64(tag) << 3) +} + +// setMarshaler fills up the sizer and marshaler in the info of a field. +func (fi *marshalFieldInfo) setMarshaler(f *reflect.StructField, tags []string) { + switch f.Type.Kind() { + case reflect.Map: + // map field + fi.isPointer = true + fi.sizer, fi.marshaler = makeMapMarshaler(f) + return + case reflect.Ptr, reflect.Slice: + fi.isPointer = true + } + fi.sizer, fi.marshaler = typeMarshaler(f.Type, tags, true, false) +} + +// typeMarshaler returns the sizer and marshaler of a given field. +// t is the type of the field. +// tags is the generated "protobuf" tag of the field. +// If nozero is true, zero value is not marshaled to the wire. +// If oneof is true, it is a oneof field. +func typeMarshaler(t reflect.Type, tags []string, nozero, oneof bool) (sizer, marshaler) { + encoding := tags[0] + + pointer := false + slice := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + packed := false + proto3 := false + validateUTF8 := true + for i := 2; i < len(tags); i++ { + if tags[i] == "packed" { + packed = true + } + if tags[i] == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + switch t.Kind() { + case reflect.Bool: + if pointer { + return sizeBoolPtr, appendBoolPtr + } + if slice { + if packed { + return sizeBoolPackedSlice, appendBoolPackedSlice + } + return sizeBoolSlice, appendBoolSlice + } + if nozero { + return sizeBoolValueNoZero, appendBoolValueNoZero + } + return sizeBoolValue, appendBoolValue + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixed32Ptr, appendFixed32Ptr + } + if slice { + if packed { + return sizeFixed32PackedSlice, appendFixed32PackedSlice + } + return sizeFixed32Slice, appendFixed32Slice + } + if nozero { + return sizeFixed32ValueNoZero, appendFixed32ValueNoZero + } + return sizeFixed32Value, appendFixed32Value + case "varint": + if pointer { + return sizeVarint32Ptr, appendVarint32Ptr + } + if slice { + if packed { + return sizeVarint32PackedSlice, appendVarint32PackedSlice + } + return sizeVarint32Slice, appendVarint32Slice + } + if nozero { + return sizeVarint32ValueNoZero, appendVarint32ValueNoZero + } + return sizeVarint32Value, appendVarint32Value + } + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return sizeFixedS32Ptr, appendFixedS32Ptr + } + if slice { + if packed { + return sizeFixedS32PackedSlice, appendFixedS32PackedSlice + } + return sizeFixedS32Slice, appendFixedS32Slice + } + if nozero { + return sizeFixedS32ValueNoZero, appendFixedS32ValueNoZero + } + return sizeFixedS32Value, appendFixedS32Value + case "varint": + if pointer { + return sizeVarintS32Ptr, appendVarintS32Ptr + } + if slice { + if packed { + return sizeVarintS32PackedSlice, appendVarintS32PackedSlice + } + return sizeVarintS32Slice, appendVarintS32Slice + } + if nozero { + return sizeVarintS32ValueNoZero, appendVarintS32ValueNoZero + } + return sizeVarintS32Value, appendVarintS32Value + case "zigzag32": + if pointer { + return sizeZigzag32Ptr, appendZigzag32Ptr + } + if slice { + if packed { + return sizeZigzag32PackedSlice, appendZigzag32PackedSlice + } + return sizeZigzag32Slice, appendZigzag32Slice + } + if nozero { + return sizeZigzag32ValueNoZero, appendZigzag32ValueNoZero + } + return sizeZigzag32Value, appendZigzag32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixed64Ptr, appendFixed64Ptr + } + if slice { + if packed { + return sizeFixed64PackedSlice, appendFixed64PackedSlice + } + return sizeFixed64Slice, appendFixed64Slice + } + if nozero { + return sizeFixed64ValueNoZero, appendFixed64ValueNoZero + } + return sizeFixed64Value, appendFixed64Value + case "varint": + if pointer { + return sizeVarint64Ptr, appendVarint64Ptr + } + if slice { + if packed { + return sizeVarint64PackedSlice, appendVarint64PackedSlice + } + return sizeVarint64Slice, appendVarint64Slice + } + if nozero { + return sizeVarint64ValueNoZero, appendVarint64ValueNoZero + } + return sizeVarint64Value, appendVarint64Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return sizeFixedS64Ptr, appendFixedS64Ptr + } + if slice { + if packed { + return sizeFixedS64PackedSlice, appendFixedS64PackedSlice + } + return sizeFixedS64Slice, appendFixedS64Slice + } + if nozero { + return sizeFixedS64ValueNoZero, appendFixedS64ValueNoZero + } + return sizeFixedS64Value, appendFixedS64Value + case "varint": + if pointer { + return sizeVarintS64Ptr, appendVarintS64Ptr + } + if slice { + if packed { + return sizeVarintS64PackedSlice, appendVarintS64PackedSlice + } + return sizeVarintS64Slice, appendVarintS64Slice + } + if nozero { + return sizeVarintS64ValueNoZero, appendVarintS64ValueNoZero + } + return sizeVarintS64Value, appendVarintS64Value + case "zigzag64": + if pointer { + return sizeZigzag64Ptr, appendZigzag64Ptr + } + if slice { + if packed { + return sizeZigzag64PackedSlice, appendZigzag64PackedSlice + } + return sizeZigzag64Slice, appendZigzag64Slice + } + if nozero { + return sizeZigzag64ValueNoZero, appendZigzag64ValueNoZero + } + return sizeZigzag64Value, appendZigzag64Value + } + case reflect.Float32: + if pointer { + return sizeFloat32Ptr, appendFloat32Ptr + } + if slice { + if packed { + return sizeFloat32PackedSlice, appendFloat32PackedSlice + } + return sizeFloat32Slice, appendFloat32Slice + } + if nozero { + return sizeFloat32ValueNoZero, appendFloat32ValueNoZero + } + return sizeFloat32Value, appendFloat32Value + case reflect.Float64: + if pointer { + return sizeFloat64Ptr, appendFloat64Ptr + } + if slice { + if packed { + return sizeFloat64PackedSlice, appendFloat64PackedSlice + } + return sizeFloat64Slice, appendFloat64Slice + } + if nozero { + return sizeFloat64ValueNoZero, appendFloat64ValueNoZero + } + return sizeFloat64Value, appendFloat64Value + case reflect.String: + if validateUTF8 { + if pointer { + return sizeStringPtr, appendUTF8StringPtr + } + if slice { + return sizeStringSlice, appendUTF8StringSlice + } + if nozero { + return sizeStringValueNoZero, appendUTF8StringValueNoZero + } + return sizeStringValue, appendUTF8StringValue + } + if pointer { + return sizeStringPtr, appendStringPtr + } + if slice { + return sizeStringSlice, appendStringSlice + } + if nozero { + return sizeStringValueNoZero, appendStringValueNoZero + } + return sizeStringValue, appendStringValue + case reflect.Slice: + if slice { + return sizeBytesSlice, appendBytesSlice + } + if oneof { + // Oneof bytes field may also have "proto3" tag. + // We want to marshal it as a oneof field. Do this + // check before the proto3 check. + return sizeBytesOneof, appendBytesOneof + } + if proto3 { + return sizeBytes3, appendBytes3 + } + return sizeBytes, appendBytes + case reflect.Struct: + switch encoding { + case "group": + if slice { + return makeGroupSliceMarshaler(getMarshalInfo(t)) + } + return makeGroupMarshaler(getMarshalInfo(t)) + case "bytes": + if slice { + return makeMessageSliceMarshaler(getMarshalInfo(t)) + } + return makeMessageMarshaler(getMarshalInfo(t)) + } + } + panic(fmt.Sprintf("unknown or mismatched type: type: %v, wire type: %v", t, encoding)) +} + +// Below are functions to size/marshal a specific type of a field. +// They are stored in the field's info, and called by function pointers. +// They have type sizer or marshaler. + +func sizeFixed32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixed32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixed32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixed32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixedS32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFixedS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFixedS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + return (4 + tagsize) * len(s) +} +func sizeFixedS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFloat32Value(_ pointer, tagsize int) int { + return 4 + tagsize +} +func sizeFloat32ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat32Ptr() + if p == nil { + return 0 + } + return 4 + tagsize +} +func sizeFloat32Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + return (4 + tagsize) * len(s) +} +func sizeFloat32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return 0 + } + return 4*len(s) + SizeVarint(uint64(4*len(s))) + tagsize +} +func sizeFixed64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixed64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixed64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixed64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFixedS64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFixedS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFixedS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + return (8 + tagsize) * len(s) +} +func sizeFixedS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeFloat64Value(_ pointer, tagsize int) int { + return 8 + tagsize +} +func sizeFloat64ValueNoZero(ptr pointer, tagsize int) int { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toFloat64Ptr() + if p == nil { + return 0 + } + return 8 + tagsize +} +func sizeFloat64Slice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + return (8 + tagsize) * len(s) +} +func sizeFloat64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return 0 + } + return 8*len(s) + SizeVarint(uint64(8*len(s))) + tagsize +} +func sizeVarint32Value(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarint32Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarint32Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarint32PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarint64Value(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + return SizeVarint(v) + tagsize +} +func sizeVarint64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toUint64() + if v == 0 { + return 0 + } + return SizeVarint(v) + tagsize +} +func sizeVarint64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toUint64Ptr() + if p == nil { + return 0 + } + return SizeVarint(*p) + tagsize +} +func sizeVarint64Slice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(v) + tagsize + } + return n +} +func sizeVarint64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeVarintS64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v)) + tagsize +} +func sizeVarintS64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + return SizeVarint(uint64(*p)) + tagsize +} +func sizeVarintS64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + tagsize + } + return n +} +func sizeVarintS64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag32Value(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt32() + if v == 0 { + return 0 + } + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Ptr(ptr pointer, tagsize int) int { + p := ptr.getInt32Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize +} +func sizeZigzag32Slice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + tagsize + } + return n +} +func sizeZigzag32PackedSlice(ptr pointer, tagsize int) int { + s := ptr.getInt32Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeZigzag64Value(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64ValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toInt64() + if v == 0 { + return 0 + } + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Ptr(ptr pointer, tagsize int) int { + p := *ptr.toInt64Ptr() + if p == nil { + return 0 + } + v := *p + return SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize +} +func sizeZigzag64Slice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1)^uint64((int64(v)>>63))) + tagsize + } + return n +} +func sizeZigzag64PackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return 0 + } + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + return n + SizeVarint(uint64(n)) + tagsize +} +func sizeBoolValue(_ pointer, tagsize int) int { + return 1 + tagsize +} +func sizeBoolValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toBool() + if !v { + return 0 + } + return 1 + tagsize +} +func sizeBoolPtr(ptr pointer, tagsize int) int { + p := *ptr.toBoolPtr() + if p == nil { + return 0 + } + return 1 + tagsize +} +func sizeBoolSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + return (1 + tagsize) * len(s) +} +func sizeBoolPackedSlice(ptr pointer, tagsize int) int { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return 0 + } + return len(s) + SizeVarint(uint64(len(s))) + tagsize +} +func sizeStringValue(ptr pointer, tagsize int) int { + v := *ptr.toString() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringValueNoZero(ptr pointer, tagsize int) int { + v := *ptr.toString() + if v == "" { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringPtr(ptr pointer, tagsize int) int { + p := *ptr.toStringPtr() + if p == nil { + return 0 + } + v := *p + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeStringSlice(ptr pointer, tagsize int) int { + s := *ptr.toStringSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} +func sizeBytes(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if v == nil { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytes3(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + if len(v) == 0 { + return 0 + } + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesOneof(ptr pointer, tagsize int) int { + v := *ptr.toBytes() + return len(v) + SizeVarint(uint64(len(v))) + tagsize +} +func sizeBytesSlice(ptr pointer, tagsize int) int { + s := *ptr.toBytesSlice() + n := 0 + for _, v := range s { + n += len(v) + SizeVarint(uint64(len(v))) + tagsize + } + return n +} + +// appendFixed32 appends an encoded fixed32 to b. +func appendFixed32(b []byte, v uint32) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24)) + return b +} + +// appendFixed64 appends an encoded fixed64 to b. +func appendFixed64(b []byte, v uint64) []byte { + b = append(b, + byte(v), + byte(v>>8), + byte(v>>16), + byte(v>>24), + byte(v>>32), + byte(v>>40), + byte(v>>48), + byte(v>>56)) + return b +} + +// appendVarint appends an encoded varint to b. +func appendVarint(b []byte, v uint64) []byte { + // TODO: make 1-byte (maybe 2-byte) case inline-able, once we + // have non-leaf inliner. + switch { + case v < 1<<7: + b = append(b, byte(v)) + case v < 1<<14: + b = append(b, + byte(v&0x7f|0x80), + byte(v>>7)) + case v < 1<<21: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte(v>>14)) + case v < 1<<28: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte(v>>21)) + case v < 1<<35: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte(v>>28)) + case v < 1<<42: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte(v>>35)) + case v < 1<<49: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte(v>>42)) + case v < 1<<56: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte(v>>49)) + case v < 1<<63: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte(v>>56)) + default: + b = append(b, + byte(v&0x7f|0x80), + byte((v>>7)&0x7f|0x80), + byte((v>>14)&0x7f|0x80), + byte((v>>21)&0x7f|0x80), + byte((v>>28)&0x7f|0x80), + byte((v>>35)&0x7f|0x80), + byte((v>>42)&0x7f|0x80), + byte((v>>49)&0x7f|0x80), + byte((v>>56)&0x7f|0x80), + 1) + } + return b +} + +func appendFixed32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFixed32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, *p) + return b, nil +} +func appendFixed32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixed32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, v) + } + return b, nil +} +func appendFixedS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + return b, nil +} +func appendFixedS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(*p)) + return b, nil +} +func appendFixedS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFixedS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, uint32(v)) + } + return b, nil +} +func appendFloat32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float32bits(*ptr.toFloat32()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, v) + return b, nil +} +func appendFloat32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(*p)) + return b, nil +} +func appendFloat32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFloat32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(4*len(s))) + for _, v := range s { + b = appendFixed32(b, math.Float32bits(v)) + } + return b, nil +} +func appendFixed64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFixed64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, *p) + return b, nil +} +func appendFixed64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixed64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, v) + } + return b, nil +} +func appendFixedS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + return b, nil +} +func appendFixedS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(*p)) + return b, nil +} +func appendFixedS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFixedS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, uint64(v)) + } + return b, nil +} +func appendFloat64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := math.Float64bits(*ptr.toFloat64()) + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, v) + return b, nil +} +func appendFloat64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toFloat64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(*p)) + return b, nil +} +func appendFloat64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendFloat64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toFloat64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(8*len(s))) + for _, v := range s { + b = appendFixed64(b, math.Float64bits(v)) + } + return b, nil +} +func appendVarint32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarint32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarint32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarint64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toUint64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + return b, nil +} +func appendVarint64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toUint64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, *p) + return b, nil +} +func appendVarint64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, v) + } + return b, nil +} +func appendVarint64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toUint64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(v) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, v) + } + return b, nil +} +func appendVarintS64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + return b, nil +} +func appendVarintS64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(*p)) + return b, nil +} +func appendVarintS64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendVarintS64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v)) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v)) + } + return b, nil +} +func appendZigzag32Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt32() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := ptr.getInt32Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + return b, nil +} +func appendZigzag32Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag32PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := ptr.getInt32Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64((uint32(v) << 1) ^ uint32((int32(v) >> 31)))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64((uint32(v)<<1)^uint32((int32(v)>>31)))) + } + return b, nil +} +func appendZigzag64Value(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64ValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toInt64() + if v == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Ptr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toInt64Ptr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + v := *p + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + return b, nil +} +func appendZigzag64Slice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendZigzag64PackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toInt64Slice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + // compute size + n := 0 + for _, v := range s { + n += SizeVarint(uint64(v<<1) ^ uint64((int64(v) >> 63))) + } + b = appendVarint(b, uint64(n)) + for _, v := range s { + b = appendVarint(b, uint64(v<<1)^uint64((int64(v)>>63))) + } + return b, nil +} +func appendBoolValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBool() + if !v { + return b, nil + } + b = appendVarint(b, wiretag) + b = append(b, 1) + return b, nil +} + +func appendBoolPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toBoolPtr() + if p == nil { + return b, nil + } + b = appendVarint(b, wiretag) + if *p { + b = append(b, 1) + } else { + b = append(b, 0) + } + return b, nil +} +func appendBoolSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendBoolPackedSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBoolSlice() + if len(s) == 0 { + return b, nil + } + b = appendVarint(b, wiretag&^7|WireBytes) + b = appendVarint(b, uint64(len(s))) + for _, v := range s { + if v { + b = append(b, 1) + } else { + b = append(b, 0) + } + } + return b, nil +} +func appendStringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toString() + if v == "" { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendStringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toStringSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} +func appendUTF8StringValue(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringValueNoZero(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + v := *ptr.toString() + if v == "" { + return b, nil + } + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringPtr(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + p := *ptr.toStringPtr() + if p == nil { + return b, nil + } + v := *p + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendUTF8StringSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + var invalidUTF8 bool + s := *ptr.toStringSlice() + for _, v := range s { + if !utf8.ValidString(v) { + invalidUTF8 = true + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + if invalidUTF8 { + return b, errInvalidUTF8 + } + return b, nil +} +func appendBytes(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if v == nil { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytes3(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + if len(v) == 0 { + return b, nil + } + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesOneof(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + v := *ptr.toBytes() + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + return b, nil +} +func appendBytesSlice(b []byte, ptr pointer, wiretag uint64, _ bool) ([]byte, error) { + s := *ptr.toBytesSlice() + for _, v := range s { + b = appendVarint(b, wiretag) + b = appendVarint(b, uint64(len(v))) + b = append(b, v...) + } + return b, nil +} + +// makeGroupMarshaler returns the sizer and marshaler for a group. +// u is the marshal info of the underlying message. +func makeGroupMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + return u.size(p) + 2*tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + var err error + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, p, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + return b, err + } +} + +// makeGroupSliceMarshaler returns the sizer and marshaler for a group slice. +// u is the marshal info of the underlying message. +func makeGroupSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + n += u.size(v) + 2*tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) // start group + b, err = u.marshal(b, v, deterministic) + b = appendVarint(b, wiretag+(WireEndGroup-WireStartGroup)) // end group + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMessageMarshaler returns the sizer and marshaler for a message field. +// u is the marshal info of the message. +func makeMessageMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.size(p) + return siz + SizeVarint(uint64(siz)) + tagsize + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + p := ptr.getPointer() + if p.isNil() { + return b, nil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(p) + b = appendVarint(b, uint64(siz)) + return u.marshal(b, p, deterministic) + } +} + +// makeMessageSliceMarshaler returns the sizer and marshaler for a message slice. +// u is the marshal info of the message. +func makeMessageSliceMarshaler(u *marshalInfo) (sizer, marshaler) { + return func(ptr pointer, tagsize int) int { + s := ptr.getPointerSlice() + n := 0 + for _, v := range s { + if v.isNil() { + continue + } + siz := u.size(v) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, wiretag uint64, deterministic bool) ([]byte, error) { + s := ptr.getPointerSlice() + var err error + var nerr nonFatal + for _, v := range s { + if v.isNil() { + return b, errRepeatedHasNil + } + b = appendVarint(b, wiretag) + siz := u.cachedsize(v) + b = appendVarint(b, uint64(siz)) + b, err = u.marshal(b, v, deterministic) + + if !nerr.Merge(err) { + if err == ErrNil { + err = errRepeatedHasNil + } + return b, err + } + } + return b, nerr.E + } +} + +// makeMapMarshaler returns the sizer and marshaler for a map field. +// f is the pointer to the reflect data structure of the field. +func makeMapMarshaler(f *reflect.StructField) (sizer, marshaler) { + // figure out key and value type + t := f.Type + keyType := t.Key() + valType := t.Elem() + keyTags := strings.Split(f.Tag.Get("protobuf_key"), ",") + valTags := strings.Split(f.Tag.Get("protobuf_val"), ",") + keySizer, keyMarshaler := typeMarshaler(keyType, keyTags, false, false) // don't omit zero value in map + valSizer, valMarshaler := typeMarshaler(valType, valTags, false, false) // don't omit zero value in map + keyWireTag := 1<<3 | wiretype(keyTags[0]) + valWireTag := 2<<3 | wiretype(valTags[0]) + + // We create an interface to get the addresses of the map key and value. + // If value is pointer-typed, the interface is a direct interface, the + // idata itself is the value. Otherwise, the idata is the pointer to the + // value. + // Key cannot be pointer-typed. + valIsPtr := valType.Kind() == reflect.Ptr + + // If value is a message with nested maps, calling + // valSizer in marshal may be quadratic. We should use + // cached version in marshal (but not in size). + // If value is not message type, we don't have size cache, + // but it cannot be nested either. Just use valSizer. + valCachedSizer := valSizer + if valIsPtr && valType.Elem().Kind() == reflect.Struct { + u := getMarshalInfo(valType.Elem()) + valCachedSizer = func(ptr pointer, tagsize int) int { + // Same as message sizer, but use cache. + p := ptr.getPointer() + if p.isNil() { + return 0 + } + siz := u.cachedsize(p) + return siz + SizeVarint(uint64(siz)) + tagsize + } + } + return func(ptr pointer, tagsize int) int { + m := ptr.asPointerTo(t).Elem() // the map + n := 0 + for _, k := range m.MapKeys() { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + siz := keySizer(kaddr, 1) + valSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + n += siz + SizeVarint(uint64(siz)) + tagsize + } + return n + }, + func(b []byte, ptr pointer, tag uint64, deterministic bool) ([]byte, error) { + m := ptr.asPointerTo(t).Elem() // the map + var err error + keys := m.MapKeys() + if len(keys) > 1 && deterministic { + sort.Sort(mapKeys(keys)) + } + + var nerr nonFatal + for _, k := range keys { + ki := k.Interface() + vi := m.MapIndex(k).Interface() + kaddr := toAddrPointer(&ki, false, false) // pointer to key + vaddr := toAddrPointer(&vi, valIsPtr, false) // pointer to value + b = appendVarint(b, tag) + siz := keySizer(kaddr, 1) + valCachedSizer(vaddr, 1) // tag of key = 1 (size=1), tag of val = 2 (size=1) + b = appendVarint(b, uint64(siz)) + b, err = keyMarshaler(b, kaddr, keyWireTag, deterministic) + if !nerr.Merge(err) { + return b, err + } + b, err = valMarshaler(b, vaddr, valWireTag, deterministic) + if err != ErrNil && !nerr.Merge(err) { // allow nil value in map + return b, err + } + } + return b, nerr.E + } +} + +// makeOneOfMarshaler returns the sizer and marshaler for a oneof field. +// fi is the marshal info of the field. +// f is the pointer to the reflect data structure of the field. +func makeOneOfMarshaler(fi *marshalFieldInfo, f *reflect.StructField) (sizer, marshaler) { + // Oneof field is an interface. We need to get the actual data type on the fly. + t := f.Type + return func(ptr pointer, _ int) int { + p := ptr.getInterfacePointer() + if p.isNil() { + return 0 + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + e := fi.oneofElems[telem] + return e.sizer(p, e.tagsize) + }, + func(b []byte, ptr pointer, _ uint64, deterministic bool) ([]byte, error) { + p := ptr.getInterfacePointer() + if p.isNil() { + return b, nil + } + v := ptr.asPointerTo(t).Elem().Elem().Elem() // *interface -> interface -> *struct -> struct + telem := v.Type() + if telem.Field(0).Type.Kind() == reflect.Ptr && p.getPointer().isNil() { + return b, errOneofHasNil + } + e := fi.oneofElems[telem] + return e.marshaler(b, p, e.wiretag, deterministic) + } +} + +// sizeExtensions computes the size of encoded data for a XXX_InternalExtensions field. +func (u *marshalInfo) sizeExtensions(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + mu.Unlock() + return n +} + +// appendExtensions marshals a XXX_InternalExtensions field to the end of byte slice b. +func (u *marshalInfo) appendExtensions(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + // Not sure this is required, but the old code does it. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// message set format is: +// message MessageSet { +// repeated group Item = 1 { +// required int32 type_id = 2; +// required string message = 3; +// }; +// } + +// sizeMessageSet computes the size of encoded data for a XXX_InternalExtensions field +// in message set format (above). +func (u *marshalInfo) sizeMessageSet(ext *XXX_InternalExtensions) int { + m, mu := ext.extensionsRead() + if m == nil { + return 0 + } + mu.Lock() + + n := 0 + for id, e := range m { + n += 2 // start group, end group. tag = 1 (size=1) + n += SizeVarint(uint64(id)) + 1 // type_id, tag = 2 (size=1) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + siz := len(msgWithLen) + n += siz + 1 // message, tag = 3 (size=1) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, 1) // message, tag = 3 (size=1) + } + mu.Unlock() + return n +} + +// appendMessageSet marshals a XXX_InternalExtensions field in message set format (above) +// to the end of byte slice b. +func (u *marshalInfo) appendMessageSet(b []byte, ext *XXX_InternalExtensions, deterministic bool) ([]byte, error) { + m, mu := ext.extensionsRead() + if m == nil { + return b, nil + } + mu.Lock() + defer mu.Unlock() + + var err error + var nerr nonFatal + + // Fast-path for common cases: zero or one extensions. + // Don't bother sorting the keys. + if len(m) <= 1 { + for id, e := range m { + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + if !nerr.Merge(err) { + return b, err + } + b = append(b, 1<<3|WireEndGroup) + } + return b, nerr.E + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + for _, id := range keys { + e := m[int32(id)] + b = append(b, 1<<3|WireStartGroup) + b = append(b, 2<<3|WireVarint) + b = appendVarint(b, uint64(id)) + + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + msgWithLen := skipVarint(e.enc) // skip old tag, but leave the length varint + b = append(b, 3<<3|WireBytes) + b = append(b, msgWithLen...) + b = append(b, 1<<3|WireEndGroup) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, 3<<3|WireBytes, deterministic) + b = append(b, 1<<3|WireEndGroup) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// sizeV1Extensions computes the size of encoded data for a V1-API extension field. +func (u *marshalInfo) sizeV1Extensions(m map[int32]Extension) int { + if m == nil { + return 0 + } + + n := 0 + for _, e := range m { + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + n += len(e.enc) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + n += ei.sizer(p, ei.tagsize) + } + return n +} + +// appendV1Extensions marshals a V1-API extension field to the end of byte slice b. +func (u *marshalInfo) appendV1Extensions(b []byte, m map[int32]Extension, deterministic bool) ([]byte, error) { + if m == nil { + return b, nil + } + + // Sort the keys to provide a deterministic encoding. + keys := make([]int, 0, len(m)) + for k := range m { + keys = append(keys, int(k)) + } + sort.Ints(keys) + + var err error + var nerr nonFatal + for _, k := range keys { + e := m[int32(k)] + if e.value == nil || e.desc == nil { + // Extension is only in its encoded form. + b = append(b, e.enc...) + continue + } + + // We don't skip extensions that have an encoded form set, + // because the extension value may have been mutated after + // the last time this function was called. + + ei := u.getExtElemInfo(e.desc) + v := e.value + p := toAddrPointer(&v, ei.isptr, ei.deref) + b, err = ei.marshaler(b, p, ei.wiretag, deterministic) + if !nerr.Merge(err) { + return b, err + } + } + return b, nerr.E +} + +// newMarshaler is the interface representing objects that can marshal themselves. +// +// This exists to support protoc-gen-go generated messages. +// The proto package will stop type-asserting to this interface in the future. +// +// DO NOT DEPEND ON THIS. +type newMarshaler interface { + XXX_Size() int + XXX_Marshal(b []byte, deterministic bool) ([]byte, error) +} + +// Size returns the encoded size of a protocol buffer message. +// This is the main entry point. +func Size(pb Message) int { + if m, ok := pb.(newMarshaler); ok { + return m.XXX_Size() + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, _ := m.Marshal() + return len(b) + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return 0 + } + var info InternalMessageInfo + return info.Size(pb) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, returning the data. +// This is the main entry point. +func Marshal(pb Message) ([]byte, error) { + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + b := make([]byte, 0, siz) + return m.XXX_Marshal(b, false) + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + return m.Marshal() + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return nil, ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + b := make([]byte, 0, siz) + return info.Marshal(b, pb, false) +} + +// Marshal takes a protocol buffer message +// and encodes it into the wire format, writing the result to the +// Buffer. +// This is an alternative entry point. It is not necessary to use +// a Buffer for most applications. +func (p *Buffer) Marshal(pb Message) error { + var err error + if m, ok := pb.(newMarshaler); ok { + siz := m.XXX_Size() + p.grow(siz) // make sure buf has enough capacity + p.buf, err = m.XXX_Marshal(p.buf, p.deterministic) + return err + } + if m, ok := pb.(Marshaler); ok { + // If the message can marshal itself, let it do it, for compatibility. + // NOTE: This is not efficient. + b, err := m.Marshal() + p.buf = append(p.buf, b...) + return err + } + // in case somehow we didn't generate the wrapper + if pb == nil { + return ErrNil + } + var info InternalMessageInfo + siz := info.Size(pb) + p.grow(siz) // make sure buf has enough capacity + p.buf, err = info.Marshal(p.buf, pb, p.deterministic) + return err +} + +// grow grows the buffer's capacity, if necessary, to guarantee space for +// another n bytes. After grow(n), at least n bytes can be written to the +// buffer without another allocation. +func (p *Buffer) grow(n int) { + need := len(p.buf) + n + if need <= cap(p.buf) { + return + } + newCap := len(p.buf) * 2 + if newCap < need { + newCap = need + } + p.buf = append(make([]byte, 0, newCap), p.buf...) +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/table_merge.go b/terraform-server/vendor/github.com/golang/protobuf/proto/table_merge.go new file mode 100644 index 00000000..5525def6 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/table_merge.go @@ -0,0 +1,654 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "fmt" + "reflect" + "strings" + "sync" + "sync/atomic" +) + +// Merge merges the src message into dst. +// This assumes that dst and src of the same type and are non-nil. +func (a *InternalMessageInfo) Merge(dst, src Message) { + mi := atomicLoadMergeInfo(&a.merge) + if mi == nil { + mi = getMergeInfo(reflect.TypeOf(dst).Elem()) + atomicStoreMergeInfo(&a.merge, mi) + } + mi.merge(toPointer(&dst), toPointer(&src)) +} + +type mergeInfo struct { + typ reflect.Type + + initialized int32 // 0: only typ is valid, 1: everything is valid + lock sync.Mutex + + fields []mergeFieldInfo + unrecognized field // Offset of XXX_unrecognized +} + +type mergeFieldInfo struct { + field field // Offset of field, guaranteed to be valid + + // isPointer reports whether the value in the field is a pointer. + // This is true for the following situations: + // * Pointer to struct + // * Pointer to basic type (proto2 only) + // * Slice (first value in slice header is a pointer) + // * String (first value in string header is a pointer) + isPointer bool + + // basicWidth reports the width of the field assuming that it is directly + // embedded in the struct (as is the case for basic types in proto3). + // The possible values are: + // 0: invalid + // 1: bool + // 4: int32, uint32, float32 + // 8: int64, uint64, float64 + basicWidth int + + // Where dst and src are pointers to the types being merged. + merge func(dst, src pointer) +} + +var ( + mergeInfoMap = map[reflect.Type]*mergeInfo{} + mergeInfoLock sync.Mutex +) + +func getMergeInfo(t reflect.Type) *mergeInfo { + mergeInfoLock.Lock() + defer mergeInfoLock.Unlock() + mi := mergeInfoMap[t] + if mi == nil { + mi = &mergeInfo{typ: t} + mergeInfoMap[t] = mi + } + return mi +} + +// merge merges src into dst assuming they are both of type *mi.typ. +func (mi *mergeInfo) merge(dst, src pointer) { + if dst.isNil() { + panic("proto: nil destination") + } + if src.isNil() { + return // Nothing to do. + } + + if atomic.LoadInt32(&mi.initialized) == 0 { + mi.computeMergeInfo() + } + + for _, fi := range mi.fields { + sfp := src.offset(fi.field) + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + if fi.isPointer && sfp.getPointer().isNil() { // Could be slice or string + continue + } + if fi.basicWidth > 0 { + switch { + case fi.basicWidth == 1 && !*sfp.toBool(): + continue + case fi.basicWidth == 4 && *sfp.toUint32() == 0: + continue + case fi.basicWidth == 8 && *sfp.toUint64() == 0: + continue + } + } + } + + dfp := dst.offset(fi.field) + fi.merge(dfp, sfp) + } + + // TODO: Make this faster? + out := dst.asPointerTo(mi.typ).Elem() + in := src.asPointerTo(mi.typ).Elem() + if emIn, err := extendable(in.Addr().Interface()); err == nil { + emOut, _ := extendable(out.Addr().Interface()) + mIn, muIn := emIn.extensionsRead() + if mIn != nil { + mOut := emOut.extensionsWrite() + muIn.Lock() + mergeExtension(mOut, mIn) + muIn.Unlock() + } + } + + if mi.unrecognized.IsValid() { + if b := *src.offset(mi.unrecognized).toBytes(); len(b) > 0 { + *dst.offset(mi.unrecognized).toBytes() = append([]byte(nil), b...) + } + } +} + +func (mi *mergeInfo) computeMergeInfo() { + mi.lock.Lock() + defer mi.lock.Unlock() + if mi.initialized != 0 { + return + } + t := mi.typ + n := t.NumField() + + props := GetProperties(t) + for i := 0; i < n; i++ { + f := t.Field(i) + if strings.HasPrefix(f.Name, "XXX_") { + continue + } + + mfi := mergeFieldInfo{field: toField(&f)} + tf := f.Type + + // As an optimization, we can avoid the merge function call cost + // if we know for sure that the source will have no effect + // by checking if it is the zero value. + if unsafeAllowed { + switch tf.Kind() { + case reflect.Ptr, reflect.Slice, reflect.String: + // As a special case, we assume slices and strings are pointers + // since we know that the first field in the SliceSlice or + // StringHeader is a data pointer. + mfi.isPointer = true + case reflect.Bool: + mfi.basicWidth = 1 + case reflect.Int32, reflect.Uint32, reflect.Float32: + mfi.basicWidth = 4 + case reflect.Int64, reflect.Uint64, reflect.Float64: + mfi.basicWidth = 8 + } + } + + // Unwrap tf to get at its most basic type. + var isPointer, isSlice bool + if tf.Kind() == reflect.Slice && tf.Elem().Kind() != reflect.Uint8 { + isSlice = true + tf = tf.Elem() + } + if tf.Kind() == reflect.Ptr { + isPointer = true + tf = tf.Elem() + } + if isPointer && isSlice && tf.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + tf.Name()) + } + + switch tf.Kind() { + case reflect.Int32: + switch { + case isSlice: // E.g., []int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Slice is not defined (see pointer_reflect.go). + /* + sfsp := src.toInt32Slice() + if *sfsp != nil { + dfsp := dst.toInt32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + */ + sfs := src.getInt32Slice() + if sfs != nil { + dfs := dst.getInt32Slice() + dfs = append(dfs, sfs...) + if dfs == nil { + dfs = []int32{} + } + dst.setInt32Slice(dfs) + } + } + case isPointer: // E.g., *int32 + mfi.merge = func(dst, src pointer) { + // NOTE: toInt32Ptr is not defined (see pointer_reflect.go). + /* + sfpp := src.toInt32Ptr() + if *sfpp != nil { + dfpp := dst.toInt32Ptr() + if *dfpp == nil { + *dfpp = Int32(**sfpp) + } else { + **dfpp = **sfpp + } + } + */ + sfp := src.getInt32Ptr() + if sfp != nil { + dfp := dst.getInt32Ptr() + if dfp == nil { + dst.setInt32Ptr(*sfp) + } else { + *dfp = *sfp + } + } + } + default: // E.g., int32 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt32(); v != 0 { + *dst.toInt32() = v + } + } + } + case reflect.Int64: + switch { + case isSlice: // E.g., []int64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toInt64Slice() + if *sfsp != nil { + dfsp := dst.toInt64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []int64{} + } + } + } + case isPointer: // E.g., *int64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toInt64Ptr() + if *sfpp != nil { + dfpp := dst.toInt64Ptr() + if *dfpp == nil { + *dfpp = Int64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., int64 + mfi.merge = func(dst, src pointer) { + if v := *src.toInt64(); v != 0 { + *dst.toInt64() = v + } + } + } + case reflect.Uint32: + switch { + case isSlice: // E.g., []uint32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint32Slice() + if *sfsp != nil { + dfsp := dst.toUint32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint32{} + } + } + } + case isPointer: // E.g., *uint32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint32Ptr() + if *sfpp != nil { + dfpp := dst.toUint32Ptr() + if *dfpp == nil { + *dfpp = Uint32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint32 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint32(); v != 0 { + *dst.toUint32() = v + } + } + } + case reflect.Uint64: + switch { + case isSlice: // E.g., []uint64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toUint64Slice() + if *sfsp != nil { + dfsp := dst.toUint64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []uint64{} + } + } + } + case isPointer: // E.g., *uint64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toUint64Ptr() + if *sfpp != nil { + dfpp := dst.toUint64Ptr() + if *dfpp == nil { + *dfpp = Uint64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., uint64 + mfi.merge = func(dst, src pointer) { + if v := *src.toUint64(); v != 0 { + *dst.toUint64() = v + } + } + } + case reflect.Float32: + switch { + case isSlice: // E.g., []float32 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat32Slice() + if *sfsp != nil { + dfsp := dst.toFloat32Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float32{} + } + } + } + case isPointer: // E.g., *float32 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat32Ptr() + if *sfpp != nil { + dfpp := dst.toFloat32Ptr() + if *dfpp == nil { + *dfpp = Float32(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float32 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat32(); v != 0 { + *dst.toFloat32() = v + } + } + } + case reflect.Float64: + switch { + case isSlice: // E.g., []float64 + mfi.merge = func(dst, src pointer) { + sfsp := src.toFloat64Slice() + if *sfsp != nil { + dfsp := dst.toFloat64Slice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []float64{} + } + } + } + case isPointer: // E.g., *float64 + mfi.merge = func(dst, src pointer) { + sfpp := src.toFloat64Ptr() + if *sfpp != nil { + dfpp := dst.toFloat64Ptr() + if *dfpp == nil { + *dfpp = Float64(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., float64 + mfi.merge = func(dst, src pointer) { + if v := *src.toFloat64(); v != 0 { + *dst.toFloat64() = v + } + } + } + case reflect.Bool: + switch { + case isSlice: // E.g., []bool + mfi.merge = func(dst, src pointer) { + sfsp := src.toBoolSlice() + if *sfsp != nil { + dfsp := dst.toBoolSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []bool{} + } + } + } + case isPointer: // E.g., *bool + mfi.merge = func(dst, src pointer) { + sfpp := src.toBoolPtr() + if *sfpp != nil { + dfpp := dst.toBoolPtr() + if *dfpp == nil { + *dfpp = Bool(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., bool + mfi.merge = func(dst, src pointer) { + if v := *src.toBool(); v { + *dst.toBool() = v + } + } + } + case reflect.String: + switch { + case isSlice: // E.g., []string + mfi.merge = func(dst, src pointer) { + sfsp := src.toStringSlice() + if *sfsp != nil { + dfsp := dst.toStringSlice() + *dfsp = append(*dfsp, *sfsp...) + if *dfsp == nil { + *dfsp = []string{} + } + } + } + case isPointer: // E.g., *string + mfi.merge = func(dst, src pointer) { + sfpp := src.toStringPtr() + if *sfpp != nil { + dfpp := dst.toStringPtr() + if *dfpp == nil { + *dfpp = String(**sfpp) + } else { + **dfpp = **sfpp + } + } + } + default: // E.g., string + mfi.merge = func(dst, src pointer) { + if v := *src.toString(); v != "" { + *dst.toString() = v + } + } + } + case reflect.Slice: + isProto3 := props.Prop[i].proto3 + switch { + case isPointer: + panic("bad pointer in byte slice case in " + tf.Name()) + case tf.Elem().Kind() != reflect.Uint8: + panic("bad element kind in byte slice case in " + tf.Name()) + case isSlice: // E.g., [][]byte + mfi.merge = func(dst, src pointer) { + sbsp := src.toBytesSlice() + if *sbsp != nil { + dbsp := dst.toBytesSlice() + for _, sb := range *sbsp { + if sb == nil { + *dbsp = append(*dbsp, nil) + } else { + *dbsp = append(*dbsp, append([]byte{}, sb...)) + } + } + if *dbsp == nil { + *dbsp = [][]byte{} + } + } + } + default: // E.g., []byte + mfi.merge = func(dst, src pointer) { + sbp := src.toBytes() + if *sbp != nil { + dbp := dst.toBytes() + if !isProto3 || len(*sbp) > 0 { + *dbp = append([]byte{}, *sbp...) + } + } + } + } + case reflect.Struct: + switch { + case !isPointer: + panic(fmt.Sprintf("message field %s without pointer", tf)) + case isSlice: // E.g., []*pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sps := src.getPointerSlice() + if sps != nil { + dps := dst.getPointerSlice() + for _, sp := range sps { + var dp pointer + if !sp.isNil() { + dp = valToPointer(reflect.New(tf)) + mi.merge(dp, sp) + } + dps = append(dps, dp) + } + if dps == nil { + dps = []pointer{} + } + dst.setPointerSlice(dps) + } + } + default: // E.g., *pb.T + mi := getMergeInfo(tf) + mfi.merge = func(dst, src pointer) { + sp := src.getPointer() + if !sp.isNil() { + dp := dst.getPointer() + if dp.isNil() { + dp = valToPointer(reflect.New(tf)) + dst.setPointer(dp) + } + mi.merge(dp, sp) + } + } + } + case reflect.Map: + switch { + case isPointer || isSlice: + panic("bad pointer or slice in map case in " + tf.Name()) + default: // E.g., map[K]V + mfi.merge = func(dst, src pointer) { + sm := src.asPointerTo(tf).Elem() + if sm.Len() == 0 { + return + } + dm := dst.asPointerTo(tf).Elem() + if dm.IsNil() { + dm.Set(reflect.MakeMap(tf)) + } + + switch tf.Elem().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(Clone(val.Interface().(Message))) + dm.SetMapIndex(key, val) + } + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + val = reflect.ValueOf(append([]byte{}, val.Bytes()...)) + dm.SetMapIndex(key, val) + } + default: // Basic type (e.g., string) + for _, key := range sm.MapKeys() { + val := sm.MapIndex(key) + dm.SetMapIndex(key, val) + } + } + } + } + case reflect.Interface: + // Must be oneof field. + switch { + case isPointer || isSlice: + panic("bad pointer or slice in interface case in " + tf.Name()) + default: // E.g., interface{} + // TODO: Make this faster? + mfi.merge = func(dst, src pointer) { + su := src.asPointerTo(tf).Elem() + if !su.IsNil() { + du := dst.asPointerTo(tf).Elem() + typ := su.Elem().Type() + if du.IsNil() || du.Elem().Type() != typ { + du.Set(reflect.New(typ.Elem())) // Initialize interface if empty + } + sv := su.Elem().Elem().Field(0) + if sv.Kind() == reflect.Ptr && sv.IsNil() { + return + } + dv := du.Elem().Elem().Field(0) + if dv.Kind() == reflect.Ptr && dv.IsNil() { + dv.Set(reflect.New(sv.Type().Elem())) // Initialize proto message if empty + } + switch sv.Type().Kind() { + case reflect.Ptr: // Proto struct (e.g., *T) + Merge(dv.Interface().(Message), sv.Interface().(Message)) + case reflect.Slice: // E.g. Bytes type (e.g., []byte) + dv.Set(reflect.ValueOf(append([]byte{}, sv.Bytes()...))) + default: // Basic type (e.g., string) + dv.Set(sv) + } + } + } + } + default: + panic(fmt.Sprintf("merger not found for type:%s", tf)) + } + mi.fields = append(mi.fields, mfi) + } + + mi.unrecognized = invalidField + if f, ok := t.FieldByName("XXX_unrecognized"); ok { + if f.Type != reflect.TypeOf([]byte{}) { + panic("expected XXX_unrecognized to be of type []byte") + } + mi.unrecognized = toField(&f) + } + + atomic.StoreInt32(&mi.initialized, 1) +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/table_unmarshal.go b/terraform-server/vendor/github.com/golang/protobuf/proto/table_unmarshal.go new file mode 100644 index 00000000..acee2fc5 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/table_unmarshal.go @@ -0,0 +1,2053 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "errors" + "fmt" + "io" + "math" + "reflect" + "strconv" + "strings" + "sync" + "sync/atomic" + "unicode/utf8" +) + +// Unmarshal is the entry point from the generated .pb.go files. +// This function is not intended to be used by non-generated code. +// This function is not subject to any compatibility guarantee. +// msg contains a pointer to a protocol buffer struct. +// b is the data to be unmarshaled into the protocol buffer. +// a is a pointer to a place to store cached unmarshal information. +func (a *InternalMessageInfo) Unmarshal(msg Message, b []byte) error { + // Load the unmarshal information for this message type. + // The atomic load ensures memory consistency. + u := atomicLoadUnmarshalInfo(&a.unmarshal) + if u == nil { + // Slow path: find unmarshal info for msg, update a with it. + u = getUnmarshalInfo(reflect.TypeOf(msg).Elem()) + atomicStoreUnmarshalInfo(&a.unmarshal, u) + } + // Then do the unmarshaling. + err := u.unmarshal(toPointer(&msg), b) + return err +} + +type unmarshalInfo struct { + typ reflect.Type // type of the protobuf struct + + // 0 = only typ field is initialized + // 1 = completely initialized + initialized int32 + lock sync.Mutex // prevents double initialization + dense []unmarshalFieldInfo // fields indexed by tag # + sparse map[uint64]unmarshalFieldInfo // fields indexed by tag # + reqFields []string // names of required fields + reqMask uint64 // 1< 0 { + // Read tag and wire type. + // Special case 1 and 2 byte varints. + var x uint64 + if b[0] < 128 { + x = uint64(b[0]) + b = b[1:] + } else if len(b) >= 2 && b[1] < 128 { + x = uint64(b[0]&0x7f) + uint64(b[1])<<7 + b = b[2:] + } else { + var n int + x, n = decodeVarint(b) + if n == 0 { + return io.ErrUnexpectedEOF + } + b = b[n:] + } + tag := x >> 3 + wire := int(x) & 7 + + // Dispatch on the tag to one of the unmarshal* functions below. + var f unmarshalFieldInfo + if tag < uint64(len(u.dense)) { + f = u.dense[tag] + } else { + f = u.sparse[tag] + } + if fn := f.unmarshal; fn != nil { + var err error + b, err = fn(b, m.offset(f.field), wire) + if err == nil { + reqMask |= f.reqMask + continue + } + if r, ok := err.(*RequiredNotSetError); ok { + // Remember this error, but keep parsing. We need to produce + // a full parse even if a required field is missing. + if errLater == nil { + errLater = r + } + reqMask |= f.reqMask + continue + } + if err != errInternalBadWireType { + if err == errInvalidUTF8 { + if errLater == nil { + fullName := revProtoTypes[reflect.PtrTo(u.typ)] + "." + f.name + errLater = &invalidUTF8Error{fullName} + } + continue + } + return err + } + // Fragments with bad wire type are treated as unknown fields. + } + + // Unknown tag. + if !u.unrecognized.IsValid() { + // Don't keep unrecognized data; just skip it. + var err error + b, err = skipField(b, wire) + if err != nil { + return err + } + continue + } + // Keep unrecognized data around. + // maybe in extensions, maybe in the unrecognized field. + z := m.offset(u.unrecognized).toBytes() + var emap map[int32]Extension + var e Extension + for _, r := range u.extensionRanges { + if uint64(r.Start) <= tag && tag <= uint64(r.End) { + if u.extensions.IsValid() { + mp := m.offset(u.extensions).toExtensions() + emap = mp.extensionsWrite() + e = emap[int32(tag)] + z = &e.enc + break + } + if u.oldExtensions.IsValid() { + p := m.offset(u.oldExtensions).toOldExtensions() + emap = *p + if emap == nil { + emap = map[int32]Extension{} + *p = emap + } + e = emap[int32(tag)] + z = &e.enc + break + } + panic("no extensions field available") + } + } + + // Use wire type to skip data. + var err error + b0 := b + b, err = skipField(b, wire) + if err != nil { + return err + } + *z = encodeVarint(*z, tag<<3|uint64(wire)) + *z = append(*z, b0[:len(b0)-len(b)]...) + + if emap != nil { + emap[int32(tag)] = e + } + } + if reqMask != u.reqMask && errLater == nil { + // A required field of this message is missing. + for _, n := range u.reqFields { + if reqMask&1 == 0 { + errLater = &RequiredNotSetError{n} + } + reqMask >>= 1 + } + } + return errLater +} + +// computeUnmarshalInfo fills in u with information for use +// in unmarshaling protocol buffers of type u.typ. +func (u *unmarshalInfo) computeUnmarshalInfo() { + u.lock.Lock() + defer u.lock.Unlock() + if u.initialized != 0 { + return + } + t := u.typ + n := t.NumField() + + // Set up the "not found" value for the unrecognized byte buffer. + // This is the default for proto3. + u.unrecognized = invalidField + u.extensions = invalidField + u.oldExtensions = invalidField + + // List of the generated type and offset for each oneof field. + type oneofField struct { + ityp reflect.Type // interface type of oneof field + field field // offset in containing message + } + var oneofFields []oneofField + + for i := 0; i < n; i++ { + f := t.Field(i) + if f.Name == "XXX_unrecognized" { + // The byte slice used to hold unrecognized input is special. + if f.Type != reflect.TypeOf(([]byte)(nil)) { + panic("bad type for XXX_unrecognized field: " + f.Type.Name()) + } + u.unrecognized = toField(&f) + continue + } + if f.Name == "XXX_InternalExtensions" { + // Ditto here. + if f.Type != reflect.TypeOf(XXX_InternalExtensions{}) { + panic("bad type for XXX_InternalExtensions field: " + f.Type.Name()) + } + u.extensions = toField(&f) + if f.Tag.Get("protobuf_messageset") == "1" { + u.isMessageSet = true + } + continue + } + if f.Name == "XXX_extensions" { + // An older form of the extensions field. + if f.Type != reflect.TypeOf((map[int32]Extension)(nil)) { + panic("bad type for XXX_extensions field: " + f.Type.Name()) + } + u.oldExtensions = toField(&f) + continue + } + if f.Name == "XXX_NoUnkeyedLiteral" || f.Name == "XXX_sizecache" { + continue + } + + oneof := f.Tag.Get("protobuf_oneof") + if oneof != "" { + oneofFields = append(oneofFields, oneofField{f.Type, toField(&f)}) + // The rest of oneof processing happens below. + continue + } + + tags := f.Tag.Get("protobuf") + tagArray := strings.Split(tags, ",") + if len(tagArray) < 2 { + panic("protobuf tag not enough fields in " + t.Name() + "." + f.Name + ": " + tags) + } + tag, err := strconv.Atoi(tagArray[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tagArray[1]) + } + + name := "" + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + } + + // Extract unmarshaling function from the field (its type and tags). + unmarshal := fieldUnmarshaler(&f) + + // Required field? + var reqMask uint64 + if tagArray[2] == "req" { + bit := len(u.reqFields) + u.reqFields = append(u.reqFields, name) + reqMask = uint64(1) << uint(bit) + // TODO: if we have more than 64 required fields, we end up + // not verifying that all required fields are present. + // Fix this, perhaps using a count of required fields? + } + + // Store the info in the correct slot in the message. + u.setTag(tag, toField(&f), unmarshal, reqMask, name) + } + + // Find any types associated with oneof fields. + var oneofImplementers []interface{} + switch m := reflect.Zero(reflect.PtrTo(t)).Interface().(type) { + case oneofFuncsIface: + _, _, _, oneofImplementers = m.XXX_OneofFuncs() + case oneofWrappersIface: + oneofImplementers = m.XXX_OneofWrappers() + } + for _, v := range oneofImplementers { + tptr := reflect.TypeOf(v) // *Msg_X + typ := tptr.Elem() // Msg_X + + f := typ.Field(0) // oneof implementers have one field + baseUnmarshal := fieldUnmarshaler(&f) + tags := strings.Split(f.Tag.Get("protobuf"), ",") + fieldNum, err := strconv.Atoi(tags[1]) + if err != nil { + panic("protobuf tag field not an integer: " + tags[1]) + } + var name string + for _, tag := range tags { + if strings.HasPrefix(tag, "name=") { + name = strings.TrimPrefix(tag, "name=") + break + } + } + + // Find the oneof field that this struct implements. + // Might take O(n^2) to process all of the oneofs, but who cares. + for _, of := range oneofFields { + if tptr.Implements(of.ityp) { + // We have found the corresponding interface for this struct. + // That lets us know where this struct should be stored + // when we encounter it during unmarshaling. + unmarshal := makeUnmarshalOneof(typ, of.ityp, baseUnmarshal) + u.setTag(fieldNum, of.field, unmarshal, 0, name) + } + } + + } + + // Get extension ranges, if any. + fn := reflect.Zero(reflect.PtrTo(t)).MethodByName("ExtensionRangeArray") + if fn.IsValid() { + if !u.extensions.IsValid() && !u.oldExtensions.IsValid() { + panic("a message with extensions, but no extensions field in " + t.Name()) + } + u.extensionRanges = fn.Call(nil)[0].Interface().([]ExtensionRange) + } + + // Explicitly disallow tag 0. This will ensure we flag an error + // when decoding a buffer of all zeros. Without this code, we + // would decode and skip an all-zero buffer of even length. + // [0 0] is [tag=0/wiretype=varint varint-encoded-0]. + u.setTag(0, zeroField, func(b []byte, f pointer, w int) ([]byte, error) { + return nil, fmt.Errorf("proto: %s: illegal tag 0 (wire type %d)", t, w) + }, 0, "") + + // Set mask for required field check. + u.reqMask = uint64(1)<= 0 && (tag < 16 || tag < 2*n) { // TODO: what are the right numbers here? + for len(u.dense) <= tag { + u.dense = append(u.dense, unmarshalFieldInfo{}) + } + u.dense[tag] = i + return + } + if u.sparse == nil { + u.sparse = map[uint64]unmarshalFieldInfo{} + } + u.sparse[uint64(tag)] = i +} + +// fieldUnmarshaler returns an unmarshaler for the given field. +func fieldUnmarshaler(f *reflect.StructField) unmarshaler { + if f.Type.Kind() == reflect.Map { + return makeUnmarshalMap(f) + } + return typeUnmarshaler(f.Type, f.Tag.Get("protobuf")) +} + +// typeUnmarshaler returns an unmarshaler for the given field type / field tag pair. +func typeUnmarshaler(t reflect.Type, tags string) unmarshaler { + tagArray := strings.Split(tags, ",") + encoding := tagArray[0] + name := "unknown" + proto3 := false + validateUTF8 := true + for _, tag := range tagArray[3:] { + if strings.HasPrefix(tag, "name=") { + name = tag[5:] + } + if tag == "proto3" { + proto3 = true + } + } + validateUTF8 = validateUTF8 && proto3 + + // Figure out packaging (pointer, slice, or both) + slice := false + pointer := false + if t.Kind() == reflect.Slice && t.Elem().Kind() != reflect.Uint8 { + slice = true + t = t.Elem() + } + if t.Kind() == reflect.Ptr { + pointer = true + t = t.Elem() + } + + // We'll never have both pointer and slice for basic types. + if pointer && slice && t.Kind() != reflect.Struct { + panic("both pointer and slice for basic type in " + t.Name()) + } + + switch t.Kind() { + case reflect.Bool: + if pointer { + return unmarshalBoolPtr + } + if slice { + return unmarshalBoolSlice + } + return unmarshalBoolValue + case reflect.Int32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixedS32Ptr + } + if slice { + return unmarshalFixedS32Slice + } + return unmarshalFixedS32Value + case "varint": + // this could be int32 or enum + if pointer { + return unmarshalInt32Ptr + } + if slice { + return unmarshalInt32Slice + } + return unmarshalInt32Value + case "zigzag32": + if pointer { + return unmarshalSint32Ptr + } + if slice { + return unmarshalSint32Slice + } + return unmarshalSint32Value + } + case reflect.Int64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixedS64Ptr + } + if slice { + return unmarshalFixedS64Slice + } + return unmarshalFixedS64Value + case "varint": + if pointer { + return unmarshalInt64Ptr + } + if slice { + return unmarshalInt64Slice + } + return unmarshalInt64Value + case "zigzag64": + if pointer { + return unmarshalSint64Ptr + } + if slice { + return unmarshalSint64Slice + } + return unmarshalSint64Value + } + case reflect.Uint32: + switch encoding { + case "fixed32": + if pointer { + return unmarshalFixed32Ptr + } + if slice { + return unmarshalFixed32Slice + } + return unmarshalFixed32Value + case "varint": + if pointer { + return unmarshalUint32Ptr + } + if slice { + return unmarshalUint32Slice + } + return unmarshalUint32Value + } + case reflect.Uint64: + switch encoding { + case "fixed64": + if pointer { + return unmarshalFixed64Ptr + } + if slice { + return unmarshalFixed64Slice + } + return unmarshalFixed64Value + case "varint": + if pointer { + return unmarshalUint64Ptr + } + if slice { + return unmarshalUint64Slice + } + return unmarshalUint64Value + } + case reflect.Float32: + if pointer { + return unmarshalFloat32Ptr + } + if slice { + return unmarshalFloat32Slice + } + return unmarshalFloat32Value + case reflect.Float64: + if pointer { + return unmarshalFloat64Ptr + } + if slice { + return unmarshalFloat64Slice + } + return unmarshalFloat64Value + case reflect.Map: + panic("map type in typeUnmarshaler in " + t.Name()) + case reflect.Slice: + if pointer { + panic("bad pointer in slice case in " + t.Name()) + } + if slice { + return unmarshalBytesSlice + } + return unmarshalBytesValue + case reflect.String: + if validateUTF8 { + if pointer { + return unmarshalUTF8StringPtr + } + if slice { + return unmarshalUTF8StringSlice + } + return unmarshalUTF8StringValue + } + if pointer { + return unmarshalStringPtr + } + if slice { + return unmarshalStringSlice + } + return unmarshalStringValue + case reflect.Struct: + // message or group field + if !pointer { + panic(fmt.Sprintf("message/group field %s:%s without pointer", t, encoding)) + } + switch encoding { + case "bytes": + if slice { + return makeUnmarshalMessageSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalMessagePtr(getUnmarshalInfo(t), name) + case "group": + if slice { + return makeUnmarshalGroupSlicePtr(getUnmarshalInfo(t), name) + } + return makeUnmarshalGroupPtr(getUnmarshalInfo(t), name) + } + } + panic(fmt.Sprintf("unmarshaler not found type:%s encoding:%s", t, encoding)) +} + +// Below are all the unmarshalers for individual fields of various types. + +func unmarshalInt64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64() = v + return b, nil +} + +func unmarshalInt64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalInt64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x) + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalSint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64() = v + return b, nil +} + +func unmarshalSint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + *f.toInt64Ptr() = &v + return b, nil +} + +func unmarshalSint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int64(x>>1) ^ int64(x)<<63>>63 + s := f.toInt64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalUint64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64() = v + return b, nil +} + +func unmarshalUint64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + *f.toUint64Ptr() = &v + return b, nil +} + +func unmarshalUint64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint64(x) + s := f.toUint64Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalInt32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + *f.toInt32() = v + return b, nil +} + +func unmarshalInt32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalInt32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x) + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalSint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + *f.toInt32() = v + return b, nil +} + +func unmarshalSint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.setInt32Ptr(v) + return b, nil +} + +func unmarshalSint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := int32(x>>1) ^ int32(x)<<31>>31 + f.appendInt32Slice(v) + return b, nil +} + +func unmarshalUint32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32() = v + return b, nil +} + +func unmarshalUint32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + *f.toUint32Ptr() = &v + return b, nil +} + +func unmarshalUint32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + v := uint32(x) + s := f.toUint32Slice() + *s = append(*s, v) + return b, nil +} + +func unmarshalFixed64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64() = v + return b[8:], nil +} + +func unmarshalFixed64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + *f.toUint64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixed64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 + s := f.toUint64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixedS64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64() = v + return b[8:], nil +} + +func unmarshalFixedS64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + *f.toInt64Ptr() = &v + return b[8:], nil +} + +func unmarshalFixedS64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := int64(b[0]) | int64(b[1])<<8 | int64(b[2])<<16 | int64(b[3])<<24 | int64(b[4])<<32 | int64(b[5])<<40 | int64(b[6])<<48 | int64(b[7])<<56 + s := f.toInt64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFixed32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32() = v + return b[4:], nil +} + +func unmarshalFixed32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + *f.toUint32Ptr() = &v + return b[4:], nil +} + +func unmarshalFixed32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 + s := f.toUint32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalFixedS32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + *f.toInt32() = v + return b[4:], nil +} + +func unmarshalFixedS32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.setInt32Ptr(v) + return b[4:], nil +} + +func unmarshalFixedS32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := int32(b[0]) | int32(b[1])<<8 | int32(b[2])<<16 | int32(b[3])<<24 + f.appendInt32Slice(v) + return b[4:], nil +} + +func unmarshalBoolValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + // Note: any length varint is allowed, even though any sane + // encoder will use one byte. + // See https://github.com/golang/protobuf/issues/76 + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + // TODO: check if x>1? Tests seem to indicate no. + v := x != 0 + *f.toBool() = v + return b[n:], nil +} + +func unmarshalBoolPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + *f.toBoolPtr() = &v + return b[n:], nil +} + +func unmarshalBoolSlice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + x, n = decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + b = b[n:] + } + return res, nil + } + if w != WireVarint { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + v := x != 0 + s := f.toBoolSlice() + *s = append(*s, v) + return b[n:], nil +} + +func unmarshalFloat64Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64() = v + return b[8:], nil +} + +func unmarshalFloat64Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + *f.toFloat64Ptr() = &v + return b[8:], nil +} + +func unmarshalFloat64Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + b = b[8:] + } + return res, nil + } + if w != WireFixed64 { + return b, errInternalBadWireType + } + if len(b) < 8 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float64frombits(uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56) + s := f.toFloat64Slice() + *s = append(*s, v) + return b[8:], nil +} + +func unmarshalFloat32Value(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32() = v + return b[4:], nil +} + +func unmarshalFloat32Ptr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + *f.toFloat32Ptr() = &v + return b[4:], nil +} + +func unmarshalFloat32Slice(b []byte, f pointer, w int) ([]byte, error) { + if w == WireBytes { // packed + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + res := b[x:] + b = b[:x] + for len(b) > 0 { + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + b = b[4:] + } + return res, nil + } + if w != WireFixed32 { + return b, errInternalBadWireType + } + if len(b) < 4 { + return nil, io.ErrUnexpectedEOF + } + v := math.Float32frombits(uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24) + s := f.toFloat32Slice() + *s = append(*s, v) + return b[4:], nil +} + +func unmarshalStringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + return b[x:], nil +} + +func unmarshalStringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + return b[x:], nil +} + +func unmarshalStringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + return b[x:], nil +} + +func unmarshalUTF8StringValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toString() = v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringPtr(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + *f.toStringPtr() = &v + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +func unmarshalUTF8StringSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := string(b[:x]) + s := f.toStringSlice() + *s = append(*s, v) + if !utf8.ValidString(v) { + return b[x:], errInvalidUTF8 + } + return b[x:], nil +} + +var emptyBuf [0]byte + +func unmarshalBytesValue(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // The use of append here is a trick which avoids the zeroing + // that would be required if we used a make/copy pair. + // We append to emptyBuf instead of nil because we want + // a non-nil result even when the length is 0. + v := append(emptyBuf[:], b[:x]...) + *f.toBytes() = v + return b[x:], nil +} + +func unmarshalBytesSlice(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := append(emptyBuf[:], b[:x]...) + s := f.toBytesSlice() + *s = append(*s, v) + return b[x:], nil +} + +func makeUnmarshalMessagePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + // First read the message field to see if something is there. + // The semantics of multiple submessages are weird. Instead of + // the last one winning (as it is for all other fields), multiple + // submessages are merged. + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[x:], err + } +} + +func makeUnmarshalMessageSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireBytes { + return b, errInternalBadWireType + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[x:], err + } +} + +func makeUnmarshalGroupPtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := f.getPointer() + if v.isNil() { + v = valToPointer(reflect.New(sub.typ)) + f.setPointer(v) + } + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + return b[y:], err + } +} + +func makeUnmarshalGroupSlicePtr(sub *unmarshalInfo, name string) unmarshaler { + return func(b []byte, f pointer, w int) ([]byte, error) { + if w != WireStartGroup { + return b, errInternalBadWireType + } + x, y := findEndGroup(b) + if x < 0 { + return nil, io.ErrUnexpectedEOF + } + v := valToPointer(reflect.New(sub.typ)) + err := sub.unmarshal(v, b[:x]) + if err != nil { + if r, ok := err.(*RequiredNotSetError); ok { + r.field = name + "." + r.field + } else { + return nil, err + } + } + f.appendPointer(v) + return b[y:], err + } +} + +func makeUnmarshalMap(f *reflect.StructField) unmarshaler { + t := f.Type + kt := t.Key() + vt := t.Elem() + unmarshalKey := typeUnmarshaler(kt, f.Tag.Get("protobuf_key")) + unmarshalVal := typeUnmarshaler(vt, f.Tag.Get("protobuf_val")) + return func(b []byte, f pointer, w int) ([]byte, error) { + // The map entry is a submessage. Figure out how big it is. + if w != WireBytes { + return nil, fmt.Errorf("proto: bad wiretype for map field: got %d want %d", w, WireBytes) + } + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + b = b[n:] + if x > uint64(len(b)) { + return nil, io.ErrUnexpectedEOF + } + r := b[x:] // unused data to return + b = b[:x] // data for map entry + + // Note: we could use #keys * #values ~= 200 functions + // to do map decoding without reflection. Probably not worth it. + // Maps will be somewhat slow. Oh well. + + // Read key and value from data. + var nerr nonFatal + k := reflect.New(kt) + v := reflect.New(vt) + for len(b) > 0 { + x, n := decodeVarint(b) + if n == 0 { + return nil, io.ErrUnexpectedEOF + } + wire := int(x) & 7 + b = b[n:] + + var err error + switch x >> 3 { + case 1: + b, err = unmarshalKey(b, valToPointer(k), wire) + case 2: + b, err = unmarshalVal(b, valToPointer(v), wire) + default: + err = errInternalBadWireType // skip unknown tag + } + + if nerr.Merge(err) { + continue + } + if err != errInternalBadWireType { + return nil, err + } + + // Skip past unknown fields. + b, err = skipField(b, wire) + if err != nil { + return nil, err + } + } + + // Get map, allocate if needed. + m := f.asPointerTo(t).Elem() // an addressable map[K]T + if m.IsNil() { + m.Set(reflect.MakeMap(t)) + } + + // Insert into map. + m.SetMapIndex(k.Elem(), v.Elem()) + + return r, nerr.E + } +} + +// makeUnmarshalOneof makes an unmarshaler for oneof fields. +// for: +// message Msg { +// oneof F { +// int64 X = 1; +// float64 Y = 2; +// } +// } +// typ is the type of the concrete entry for a oneof case (e.g. Msg_X). +// ityp is the interface type of the oneof field (e.g. isMsg_F). +// unmarshal is the unmarshaler for the base type of the oneof case (e.g. int64). +// Note that this function will be called once for each case in the oneof. +func makeUnmarshalOneof(typ, ityp reflect.Type, unmarshal unmarshaler) unmarshaler { + sf := typ.Field(0) + field0 := toField(&sf) + return func(b []byte, f pointer, w int) ([]byte, error) { + // Allocate holder for value. + v := reflect.New(typ) + + // Unmarshal data into holder. + // We unmarshal into the first field of the holder object. + var err error + var nerr nonFatal + b, err = unmarshal(b, valToPointer(v).offset(field0), w) + if !nerr.Merge(err) { + return nil, err + } + + // Write pointer to holder into target field. + f.asPointerTo(ityp).Elem().Set(v) + + return b, nerr.E + } +} + +// Error used by decode internally. +var errInternalBadWireType = errors.New("proto: internal error: bad wiretype") + +// skipField skips past a field of type wire and returns the remaining bytes. +func skipField(b []byte, wire int) ([]byte, error) { + switch wire { + case WireVarint: + _, k := decodeVarint(b) + if k == 0 { + return b, io.ErrUnexpectedEOF + } + b = b[k:] + case WireFixed32: + if len(b) < 4 { + return b, io.ErrUnexpectedEOF + } + b = b[4:] + case WireFixed64: + if len(b) < 8 { + return b, io.ErrUnexpectedEOF + } + b = b[8:] + case WireBytes: + m, k := decodeVarint(b) + if k == 0 || uint64(len(b)-k) < m { + return b, io.ErrUnexpectedEOF + } + b = b[uint64(k)+m:] + case WireStartGroup: + _, i := findEndGroup(b) + if i == -1 { + return b, io.ErrUnexpectedEOF + } + b = b[i:] + default: + return b, fmt.Errorf("proto: can't skip unknown wire type %d", wire) + } + return b, nil +} + +// findEndGroup finds the index of the next EndGroup tag. +// Groups may be nested, so the "next" EndGroup tag is the first +// unpaired EndGroup. +// findEndGroup returns the indexes of the start and end of the EndGroup tag. +// Returns (-1,-1) if it can't find one. +func findEndGroup(b []byte) (int, int) { + depth := 1 + i := 0 + for { + x, n := decodeVarint(b[i:]) + if n == 0 { + return -1, -1 + } + j := i + i += n + switch x & 7 { + case WireVarint: + _, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + case WireFixed32: + if len(b)-4 < i { + return -1, -1 + } + i += 4 + case WireFixed64: + if len(b)-8 < i { + return -1, -1 + } + i += 8 + case WireBytes: + m, k := decodeVarint(b[i:]) + if k == 0 { + return -1, -1 + } + i += k + if uint64(len(b)-i) < m { + return -1, -1 + } + i += int(m) + case WireStartGroup: + depth++ + case WireEndGroup: + depth-- + if depth == 0 { + return j, i + } + default: + return -1, -1 + } + } +} + +// encodeVarint appends a varint-encoded integer to b and returns the result. +func encodeVarint(b []byte, x uint64) []byte { + for x >= 1<<7 { + b = append(b, byte(x&0x7f|0x80)) + x >>= 7 + } + return append(b, byte(x)) +} + +// decodeVarint reads a varint-encoded integer from b. +// Returns the decoded integer and the number of bytes read. +// If there is an error, it returns 0,0. +func decodeVarint(b []byte) (uint64, int) { + var x, y uint64 + if len(b) == 0 { + goto bad + } + x = uint64(b[0]) + if x < 0x80 { + return x, 1 + } + x -= 0x80 + + if len(b) <= 1 { + goto bad + } + y = uint64(b[1]) + x += y << 7 + if y < 0x80 { + return x, 2 + } + x -= 0x80 << 7 + + if len(b) <= 2 { + goto bad + } + y = uint64(b[2]) + x += y << 14 + if y < 0x80 { + return x, 3 + } + x -= 0x80 << 14 + + if len(b) <= 3 { + goto bad + } + y = uint64(b[3]) + x += y << 21 + if y < 0x80 { + return x, 4 + } + x -= 0x80 << 21 + + if len(b) <= 4 { + goto bad + } + y = uint64(b[4]) + x += y << 28 + if y < 0x80 { + return x, 5 + } + x -= 0x80 << 28 + + if len(b) <= 5 { + goto bad + } + y = uint64(b[5]) + x += y << 35 + if y < 0x80 { + return x, 6 + } + x -= 0x80 << 35 + + if len(b) <= 6 { + goto bad + } + y = uint64(b[6]) + x += y << 42 + if y < 0x80 { + return x, 7 + } + x -= 0x80 << 42 + + if len(b) <= 7 { + goto bad + } + y = uint64(b[7]) + x += y << 49 + if y < 0x80 { + return x, 8 + } + x -= 0x80 << 49 + + if len(b) <= 8 { + goto bad + } + y = uint64(b[8]) + x += y << 56 + if y < 0x80 { + return x, 9 + } + x -= 0x80 << 56 + + if len(b) <= 9 { + goto bad + } + y = uint64(b[9]) + x += y << 63 + if y < 2 { + return x, 10 + } + +bad: + return 0, 0 +} diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/text.go b/terraform-server/vendor/github.com/golang/protobuf/proto/text.go new file mode 100644 index 00000000..d97f9b35 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/text.go @@ -0,0 +1,845 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for writing the text protocol buffer format. + +import ( + "bufio" + "bytes" + "encoding" + "errors" + "fmt" + "io" + "log" + "math" + "reflect" + "sort" + "strings" +) + +var ( + newline = []byte("\n") + spaces = []byte(" ") + endBraceNewline = []byte("}\n") + backslashN = []byte{'\\', 'n'} + backslashR = []byte{'\\', 'r'} + backslashT = []byte{'\\', 't'} + backslashDQ = []byte{'\\', '"'} + backslashBS = []byte{'\\', '\\'} + posInf = []byte("inf") + negInf = []byte("-inf") + nan = []byte("nan") +) + +type writer interface { + io.Writer + WriteByte(byte) error +} + +// textWriter is an io.Writer that tracks its indentation level. +type textWriter struct { + ind int + complete bool // if the current position is a complete line + compact bool // whether to write out as a one-liner + w writer +} + +func (w *textWriter) WriteString(s string) (n int, err error) { + if !strings.Contains(s, "\n") { + if !w.compact && w.complete { + w.writeIndent() + } + w.complete = false + return io.WriteString(w.w, s) + } + // WriteString is typically called without newlines, so this + // codepath and its copy are rare. We copy to avoid + // duplicating all of Write's logic here. + return w.Write([]byte(s)) +} + +func (w *textWriter) Write(p []byte) (n int, err error) { + newlines := bytes.Count(p, newline) + if newlines == 0 { + if !w.compact && w.complete { + w.writeIndent() + } + n, err = w.w.Write(p) + w.complete = false + return n, err + } + + frags := bytes.SplitN(p, newline, newlines+1) + if w.compact { + for i, frag := range frags { + if i > 0 { + if err := w.w.WriteByte(' '); err != nil { + return n, err + } + n++ + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + } + return n, nil + } + + for i, frag := range frags { + if w.complete { + w.writeIndent() + } + nn, err := w.w.Write(frag) + n += nn + if err != nil { + return n, err + } + if i+1 < len(frags) { + if err := w.w.WriteByte('\n'); err != nil { + return n, err + } + n++ + } + } + w.complete = len(frags[len(frags)-1]) == 0 + return n, nil +} + +func (w *textWriter) WriteByte(c byte) error { + if w.compact && c == '\n' { + c = ' ' + } + if !w.compact && w.complete { + w.writeIndent() + } + err := w.w.WriteByte(c) + w.complete = c == '\n' + return err +} + +func (w *textWriter) indent() { w.ind++ } + +func (w *textWriter) unindent() { + if w.ind == 0 { + log.Print("proto: textWriter unindented too far") + return + } + w.ind-- +} + +func writeName(w *textWriter, props *Properties) error { + if _, err := w.WriteString(props.OrigName); err != nil { + return err + } + if props.Wire != "group" { + return w.WriteByte(':') + } + return nil +} + +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < sv.NumField(); i++ { + fv := sv.Field(i) + props := sprops.Prop[i] + name := st.Field(i).Name + + if name == "XXX_NoUnkeyedLiteral" { + continue + } + + if strings.HasPrefix(name, "XXX_") { + // There are two XXX_ fields: + // XXX_unrecognized []byte + // XXX_extensions map[int32]proto.Extension + // The first is handled here; + // the second is handled at the bottom of this function. + if name == "XXX_unrecognized" && !fv.IsNil() { + if err := writeUnknownStruct(w, fv.Interface().([]byte)); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Field not filled in. This could be an optional field or + // a required field that wasn't filled in. Either way, there + // isn't anything we can show for it. + continue + } + if fv.Kind() == reflect.Slice && fv.IsNil() { + // Repeated field that is empty, or a bytes field that is unused. + continue + } + + if props.Repeated && fv.Kind() == reflect.Slice { + // Repeated field. + for j := 0; j < fv.Len(); j++ { + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + v := fv.Index(j) + if v.Kind() == reflect.Ptr && v.IsNil() { + // A nil message in a repeated field is not valid, + // but we can handle that more gracefully than panicking. + if _, err := w.Write([]byte("\n")); err != nil { + return err + } + continue + } + if err := tm.writeAny(w, v, props); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if fv.Kind() == reflect.Map { + // Map fields are rendered as a repeated struct with key/value fields. + keys := fv.MapKeys() + sort.Sort(mapKeys(keys)) + for _, key := range keys { + val := fv.MapIndex(key) + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + // open struct + if err := w.WriteByte('<'); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + // key + if _, err := w.WriteString("key:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, key, props.MapKeyProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + // nil values aren't legal, but we can avoid panicking because of them. + if val.Kind() != reflect.Ptr || !val.IsNil() { + // value + if _, err := w.WriteString("value:"); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, val, props.MapValProp); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + // close struct + w.unindent() + if err := w.WriteByte('>'); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + } + continue + } + if props.proto3 && fv.Kind() == reflect.Slice && fv.Len() == 0 { + // empty bytes field + continue + } + if fv.Kind() != reflect.Ptr && fv.Kind() != reflect.Slice { + // proto3 non-repeated scalar field; skip if zero value + if isProto3Zero(fv) { + continue + } + } + + if fv.Kind() == reflect.Interface { + // Check if it is a oneof. + if st.Field(i).Tag.Get("protobuf_oneof") != "" { + // fv is nil, or holds a pointer to generated struct. + // That generated struct has exactly one field, + // which has a protobuf struct tag. + if fv.IsNil() { + continue + } + inner := fv.Elem().Elem() // interface -> *T -> T + tag := inner.Type().Field(0).Tag.Get("protobuf") + props = new(Properties) // Overwrite the outer props var, but not its pointee. + props.Parse(tag) + // Write the value in the oneof, not the oneof itself. + fv = inner.Field(0) + + // Special case to cope with malformed messages gracefully: + // If the value in the oneof is a nil pointer, don't panic + // in writeAny. + if fv.Kind() == reflect.Ptr && fv.IsNil() { + // Use errors.New so writeAny won't render quotes. + msg := errors.New("/* nil */") + fv = reflect.ValueOf(&msg).Elem() + } + } + } + + if err := writeName(w, props); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + + // Enums have a String method, so writeAny will work fine. + if err := tm.writeAny(w, fv, props); err != nil { + return err + } + + if err := w.WriteByte('\n'); err != nil { + return err + } + } + + // Extensions (the XXX_extensions field). + pv := sv.Addr() + if _, err := extendable(pv.Interface()); err == nil { + if err := tm.writeExtensions(w, pv); err != nil { + return err + } + } + + return nil +} + +var textMarshalerType = reflect.TypeOf((*encoding.TextMarshaler)(nil)).Elem() + +// writeAny writes an arbitrary field. +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { + v = reflect.Indirect(v) + + // Floats have special cases. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + x := v.Float() + var b []byte + switch { + case math.IsInf(x, 1): + b = posInf + case math.IsInf(x, -1): + b = negInf + case math.IsNaN(x): + b = nan + } + if b != nil { + _, err := w.Write(b) + return err + } + // Other values are handled below. + } + + // We don't attempt to serialise every possible value type; only those + // that can occur in protocol buffers. + switch v.Kind() { + case reflect.Slice: + // Should only be a []byte; repeated fields are handled in writeStruct. + if err := writeString(w, string(v.Bytes())); err != nil { + return err + } + case reflect.String: + if err := writeString(w, v.String()); err != nil { + return err + } + case reflect.Struct: + // Required/optional group/message. + var bra, ket byte = '<', '>' + if props != nil && props.Wire == "group" { + bra, ket = '{', '}' + } + if err := w.WriteByte(bra); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte('\n'); err != nil { + return err + } + } + w.indent() + if v.CanAddr() { + // Calling v.Interface on a struct causes the reflect package to + // copy the entire struct. This is racy with the new Marshaler + // since we atomically update the XXX_sizecache. + // + // Thus, we retrieve a pointer to the struct if possible to avoid + // a race since v.Interface on the pointer doesn't copy the struct. + // + // If v is not addressable, then we are not worried about a race + // since it implies that the binary Marshaler cannot possibly be + // mutating this value. + v = v.Addr() + } + if v.Type().Implements(textMarshalerType) { + text, err := v.Interface().(encoding.TextMarshaler).MarshalText() + if err != nil { + return err + } + if _, err = w.Write(text); err != nil { + return err + } + } else { + if v.Kind() == reflect.Ptr { + v = v.Elem() + } + if err := tm.writeStruct(w, v); err != nil { + return err + } + } + w.unindent() + if err := w.WriteByte(ket); err != nil { + return err + } + default: + _, err := fmt.Fprint(w, v.Interface()) + return err + } + return nil +} + +// equivalent to C's isprint. +func isprint(c byte) bool { + return c >= 0x20 && c < 0x7f +} + +// writeString writes a string in the protocol buffer text format. +// It is similar to strconv.Quote except we don't use Go escape sequences, +// we treat the string as a byte sequence, and we use octal escapes. +// These differences are to maintain interoperability with the other +// languages' implementations of the text format. +func writeString(w *textWriter, s string) error { + // use WriteByte here to get any needed indent + if err := w.WriteByte('"'); err != nil { + return err + } + // Loop over the bytes, not the runes. + for i := 0; i < len(s); i++ { + var err error + // Divergence from C++: we don't escape apostrophes. + // There's no need to escape them, and the C++ parser + // copes with a naked apostrophe. + switch c := s[i]; c { + case '\n': + _, err = w.w.Write(backslashN) + case '\r': + _, err = w.w.Write(backslashR) + case '\t': + _, err = w.w.Write(backslashT) + case '"': + _, err = w.w.Write(backslashDQ) + case '\\': + _, err = w.w.Write(backslashBS) + default: + if isprint(c) { + err = w.w.WriteByte(c) + } else { + _, err = fmt.Fprintf(w.w, "\\%03o", c) + } + } + if err != nil { + return err + } + } + return w.WriteByte('"') +} + +func writeUnknownStruct(w *textWriter, data []byte) (err error) { + if !w.compact { + if _, err := fmt.Fprintf(w, "/* %d unknown bytes */\n", len(data)); err != nil { + return err + } + } + b := NewBuffer(data) + for b.index < len(b.buf) { + x, err := b.DecodeVarint() + if err != nil { + _, err := fmt.Fprintf(w, "/* %v */\n", err) + return err + } + wire, tag := x&7, x>>3 + if wire == WireEndGroup { + w.unindent() + if _, err := w.Write(endBraceNewline); err != nil { + return err + } + continue + } + if _, err := fmt.Fprint(w, tag); err != nil { + return err + } + if wire != WireStartGroup { + if err := w.WriteByte(':'); err != nil { + return err + } + } + if !w.compact || wire == WireStartGroup { + if err := w.WriteByte(' '); err != nil { + return err + } + } + switch wire { + case WireBytes: + buf, e := b.DecodeRawBytes(false) + if e == nil { + _, err = fmt.Fprintf(w, "%q", buf) + } else { + _, err = fmt.Fprintf(w, "/* %v */", e) + } + case WireFixed32: + x, err = b.DecodeFixed32() + err = writeUnknownInt(w, x, err) + case WireFixed64: + x, err = b.DecodeFixed64() + err = writeUnknownInt(w, x, err) + case WireStartGroup: + err = w.WriteByte('{') + w.indent() + case WireVarint: + x, err = b.DecodeVarint() + err = writeUnknownInt(w, x, err) + default: + _, err = fmt.Fprintf(w, "/* unknown wire type %d */", wire) + } + if err != nil { + return err + } + if err = w.WriteByte('\n'); err != nil { + return err + } + } + return nil +} + +func writeUnknownInt(w *textWriter, x uint64, err error) error { + if err == nil { + _, err = fmt.Fprint(w, x) + } else { + _, err = fmt.Fprintf(w, "/* %v */", err) + } + return err +} + +type int32Slice []int32 + +func (s int32Slice) Len() int { return len(s) } +func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } +func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } + +// writeExtensions writes all the extensions in pv. +// pv is assumed to be a pointer to a protocol message struct that is extendable. +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { + emap := extensionMaps[pv.Type().Elem()] + ep, _ := extendable(pv.Interface()) + + // Order the extensions by ID. + // This isn't strictly necessary, but it will give us + // canonical output, which will also make testing easier. + m, mu := ep.extensionsRead() + if m == nil { + return nil + } + mu.Lock() + ids := make([]int32, 0, len(m)) + for id := range m { + ids = append(ids, id) + } + sort.Sort(int32Slice(ids)) + mu.Unlock() + + for _, extNum := range ids { + ext := m[extNum] + var desc *ExtensionDesc + if emap != nil { + desc = emap[extNum] + } + if desc == nil { + // Unknown extension. + if err := writeUnknownStruct(w, ext.enc); err != nil { + return err + } + continue + } + + pb, err := GetExtension(ep, desc) + if err != nil { + return fmt.Errorf("failed getting extension: %v", err) + } + + // Repeated extensions will appear as a slice. + if !desc.repeated() { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { + return err + } + } else { + v := reflect.ValueOf(pb) + for i := 0; i < v.Len(); i++ { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + return err + } + } + } + } + return nil +} + +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { + if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { + return err + } + if !w.compact { + if err := w.WriteByte(' '); err != nil { + return err + } + } + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { + return err + } + if err := w.WriteByte('\n'); err != nil { + return err + } + return nil +} + +func (w *textWriter) writeIndent() { + if !w.complete { + return + } + remain := w.ind * 2 + for remain > 0 { + n := remain + if n > len(spaces) { + n = len(spaces) + } + w.w.Write(spaces[:n]) + remain -= n + } + w.complete = false +} + +// TextMarshaler is a configurable text format marshaler. +type TextMarshaler struct { + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types +} + +// Marshal writes a given protocol buffer in text format. +// The only errors returned are from w. +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { + val := reflect.ValueOf(pb) + if pb == nil || val.IsNil() { + w.Write([]byte("")) + return nil + } + var bw *bufio.Writer + ww, ok := w.(writer) + if !ok { + bw = bufio.NewWriter(w) + ww = bw + } + aw := &textWriter{ + w: ww, + complete: true, + compact: tm.Compact, + } + + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() + if err != nil { + return err + } + if _, err = aw.Write(text); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil + } + // Dereference the received pointer so we don't have outer < and >. + v := reflect.Indirect(val) + if err := tm.writeStruct(aw, v); err != nil { + return err + } + if bw != nil { + return bw.Flush() + } + return nil +} + +// Text is the same as Marshal, but returns the string directly. +func (tm *TextMarshaler) Text(pb Message) string { + var buf bytes.Buffer + tm.Marshal(&buf, pb) + return buf.String() +} + +var ( + defaultTextMarshaler = TextMarshaler{} + compactTextMarshaler = TextMarshaler{Compact: true} +) + +// TODO: consider removing some of the Marshal functions below. + +// MarshalText writes a given protocol buffer in text format. +// The only errors returned are from w. +func MarshalText(w io.Writer, pb Message) error { return defaultTextMarshaler.Marshal(w, pb) } + +// MarshalTextString is the same as MarshalText, but returns the string directly. +func MarshalTextString(pb Message) string { return defaultTextMarshaler.Text(pb) } + +// CompactText writes a given protocol buffer in compact text format (one line). +func CompactText(w io.Writer, pb Message) error { return compactTextMarshaler.Marshal(w, pb) } + +// CompactTextString is the same as CompactText, but returns the string directly. +func CompactTextString(pb Message) string { return compactTextMarshaler.Text(pb) } diff --git a/terraform-server/vendor/github.com/golang/protobuf/proto/text_parser.go b/terraform-server/vendor/github.com/golang/protobuf/proto/text_parser.go new file mode 100644 index 00000000..bb55a3af --- /dev/null +++ b/terraform-server/vendor/github.com/golang/protobuf/proto/text_parser.go @@ -0,0 +1,880 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// Functions for parsing the Text protocol buffer format. +// TODO: message sets. + +import ( + "encoding" + "errors" + "fmt" + "reflect" + "strconv" + "strings" + "unicode/utf8" +) + +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + +type ParseError struct { + Message string + Line int // 1-based line number + Offset int // 0-based byte offset from start of input +} + +func (p *ParseError) Error() string { + if p.Line == 1 { + // show offset only for first line + return fmt.Sprintf("line 1.%d: %v", p.Offset, p.Message) + } + return fmt.Sprintf("line %d: %v", p.Line, p.Message) +} + +type token struct { + value string + err *ParseError + line int // line number + offset int // byte number from start of input, not start of line + unquoted string // the unquoted version of value, if it was a quoted string +} + +func (t *token) String() string { + if t.err == nil { + return fmt.Sprintf("%q (line=%d, offset=%d)", t.value, t.line, t.offset) + } + return fmt.Sprintf("parse error: %v", t.err) +} + +type textParser struct { + s string // remaining input + done bool // whether the parsing is finished (success or error) + backed bool // whether back() was called + offset, line int + cur token +} + +func newTextParser(s string) *textParser { + p := new(textParser) + p.s = s + p.line = 1 + p.cur.line = 1 + return p +} + +func (p *textParser) errorf(format string, a ...interface{}) *ParseError { + pe := &ParseError{fmt.Sprintf(format, a...), p.cur.line, p.cur.offset} + p.cur.err = pe + p.done = true + return pe +} + +// Numbers and identifiers are matched by [-+._A-Za-z0-9] +func isIdentOrNumberChar(c byte) bool { + switch { + case 'A' <= c && c <= 'Z', 'a' <= c && c <= 'z': + return true + case '0' <= c && c <= '9': + return true + } + switch c { + case '-', '+', '.', '_': + return true + } + return false +} + +func isWhitespace(c byte) bool { + switch c { + case ' ', '\t', '\n', '\r': + return true + } + return false +} + +func isQuote(c byte) bool { + switch c { + case '"', '\'': + return true + } + return false +} + +func (p *textParser) skipWhitespace() { + i := 0 + for i < len(p.s) && (isWhitespace(p.s[i]) || p.s[i] == '#') { + if p.s[i] == '#' { + // comment; skip to end of line or input + for i < len(p.s) && p.s[i] != '\n' { + i++ + } + if i == len(p.s) { + break + } + } + if p.s[i] == '\n' { + p.line++ + } + i++ + } + p.offset += i + p.s = p.s[i:len(p.s)] + if len(p.s) == 0 { + p.done = true + } +} + +func (p *textParser) advance() { + // Skip whitespace + p.skipWhitespace() + if p.done { + return + } + + // Start of non-whitespace + p.cur.err = nil + p.cur.offset, p.cur.line = p.offset, p.line + p.cur.unquoted = "" + switch p.s[0] { + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': + // Single symbol + p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] + case '"', '\'': + // Quoted string + i := 1 + for i < len(p.s) && p.s[i] != p.s[0] && p.s[i] != '\n' { + if p.s[i] == '\\' && i+1 < len(p.s) { + // skip escaped char + i++ + } + i++ + } + if i >= len(p.s) || p.s[i] != p.s[0] { + p.errorf("unmatched quote") + return + } + unq, err := unquoteC(p.s[1:i], rune(p.s[0])) + if err != nil { + p.errorf("invalid quoted string %s: %v", p.s[0:i+1], err) + return + } + p.cur.value, p.s = p.s[0:i+1], p.s[i+1:len(p.s)] + p.cur.unquoted = unq + default: + i := 0 + for i < len(p.s) && isIdentOrNumberChar(p.s[i]) { + i++ + } + if i == 0 { + p.errorf("unexpected byte %#x", p.s[0]) + return + } + p.cur.value, p.s = p.s[0:i], p.s[i:len(p.s)] + } + p.offset += len(p.cur.value) +} + +var ( + errBadUTF8 = errors.New("proto: bad UTF-8") +) + +func unquoteC(s string, quote rune) (string, error) { + // This is based on C++'s tokenizer.cc. + // Despite its name, this is *not* parsing C syntax. + // For instance, "\0" is an invalid quoted string. + + // Avoid allocation in trivial cases. + simple := true + for _, r := range s { + if r == '\\' || r == quote { + simple = false + break + } + } + if simple { + return s, nil + } + + buf := make([]byte, 0, 3*len(s)/2) + for len(s) > 0 { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", errBadUTF8 + } + s = s[n:] + if r != '\\' { + if r < utf8.RuneSelf { + buf = append(buf, byte(r)) + } else { + buf = append(buf, string(r)...) + } + continue + } + + ch, tail, err := unescape(s) + if err != nil { + return "", err + } + buf = append(buf, ch...) + s = tail + } + return string(buf), nil +} + +func unescape(s string) (ch string, tail string, err error) { + r, n := utf8.DecodeRuneInString(s) + if r == utf8.RuneError && n == 1 { + return "", "", errBadUTF8 + } + s = s[n:] + switch r { + case 'a': + return "\a", s, nil + case 'b': + return "\b", s, nil + case 'f': + return "\f", s, nil + case 'n': + return "\n", s, nil + case 'r': + return "\r", s, nil + case 't': + return "\t", s, nil + case 'v': + return "\v", s, nil + case '?': + return "?", s, nil // trigraph workaround + case '\'', '"', '\\': + return string(r), s, nil + case '0', '1', '2', '3', '4', '5', '6', '7': + if len(s) < 2 { + return "", "", fmt.Errorf(`\%c requires 2 following digits`, r) + } + ss := string(r) + s[:2] + s = s[2:] + i, err := strconv.ParseUint(ss, 8, 8) + if err != nil { + return "", "", fmt.Errorf(`\%s contains non-octal digits`, ss) + } + return string([]byte{byte(i)}), s, nil + case 'x', 'X', 'u', 'U': + var n int + switch r { + case 'x', 'X': + n = 2 + case 'u': + n = 4 + case 'U': + n = 8 + } + if len(s) < n { + return "", "", fmt.Errorf(`\%c requires %d following digits`, r, n) + } + ss := s[:n] + s = s[n:] + i, err := strconv.ParseUint(ss, 16, 64) + if err != nil { + return "", "", fmt.Errorf(`\%c%s contains non-hexadecimal digits`, r, ss) + } + if r == 'x' || r == 'X' { + return string([]byte{byte(i)}), s, nil + } + if i > utf8.MaxRune { + return "", "", fmt.Errorf(`\%c%s is not a valid Unicode code point`, r, ss) + } + return string(i), s, nil + } + return "", "", fmt.Errorf(`unknown escape \%c`, r) +} + +// Back off the parser by one token. Can only be done between calls to next(). +// It makes the next advance() a no-op. +func (p *textParser) back() { p.backed = true } + +// Advances the parser and returns the new current token. +func (p *textParser) next() *token { + if p.backed || p.done { + p.backed = false + return &p.cur + } + p.advance() + if p.done { + p.cur.value = "" + } else if len(p.cur.value) > 0 && isQuote(p.cur.value[0]) { + // Look for multiple quoted strings separated by whitespace, + // and concatenate them. + cat := p.cur + for { + p.skipWhitespace() + if p.done || !isQuote(p.s[0]) { + break + } + p.advance() + if p.cur.err != nil { + return &p.cur + } + cat.value += " " + p.cur.value + cat.unquoted += p.cur.unquoted + } + p.done = false // parser may have seen EOF, but we want to return cat + p.cur = cat + } + return &p.cur +} + +func (p *textParser) consumeToken(s string) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != s { + p.back() + return p.errorf("expected %q, found %q", s, tok.value) + } + return nil +} + +// Return a RequiredNotSetError indicating which required field was not set. +func (p *textParser) missingRequiredFieldError(sv reflect.Value) *RequiredNotSetError { + st := sv.Type() + sprops := GetProperties(st) + for i := 0; i < st.NumField(); i++ { + if !isNil(sv.Field(i)) { + continue + } + + props := sprops.Prop[i] + if props.Required { + return &RequiredNotSetError{fmt.Sprintf("%v.%v", st, props.OrigName)} + } + } + return &RequiredNotSetError{fmt.Sprintf("%v.", st)} // should not happen +} + +// Returns the index in the struct for the named field, as well as the parsed tag properties. +func structFieldByName(sprops *StructProperties, name string) (int, *Properties, bool) { + i, ok := sprops.decoderOrigNames[name] + if ok { + return i, sprops.Prop[i], true + } + return -1, nil, false +} + +// Consume a ':' from the input stream (if the next token is a colon), +// returning an error if a colon is needed but not present. +func (p *textParser) checkForColon(props *Properties, typ reflect.Type) *ParseError { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ":" { + // Colon is optional when the field is a group or message. + needColon := true + switch props.Wire { + case "group": + needColon = false + case "bytes": + // A "bytes" field is either a message, a string, or a repeated field; + // those three become *T, *string and []T respectively, so we can check for + // this field being a pointer to a non-string. + if typ.Kind() == reflect.Ptr { + // *T or *string + if typ.Elem().Kind() == reflect.String { + break + } + } else if typ.Kind() == reflect.Slice { + // []T or []*T + if typ.Elem().Kind() != reflect.Ptr { + break + } + } else if typ.Kind() == reflect.String { + // The proto3 exception is for a string field, + // which requires a colon. + break + } + needColon = false + } + if needColon { + return p.errorf("expected ':', found %q", tok.value) + } + p.back() + } + return nil +} + +func (p *textParser) readStruct(sv reflect.Value, terminator string) error { + st := sv.Type() + sprops := GetProperties(st) + reqCount := sprops.reqCount + var reqFieldErr error + fieldSet := make(map[string]bool) + // A struct is a sequence of "name: value", terminated by one of + // '>' or '}', or the end of the input. A name may also be + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + if tok.value == "[" { + // Looks like an extension or an Any. + // + // TODO: Check whether we need to handle + // namespace rooted names (e.g. ".something.Foo"). + extName, err := p.consumeExtName() + if err != nil { + return err + } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + + var desc *ExtensionDesc + // This could be faster, but it's functional. + // TODO: Do something smarter than a linear scan. + for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { + if d.Name == extName { + desc = d + break + } + } + if desc == nil { + return p.errorf("unrecognized extension %q", extName) + } + + props := &Properties{} + props.Parse(desc.Tag) + + typ := reflect.TypeOf(desc.ExtensionType) + if err := p.checkForColon(props, typ); err != nil { + return err + } + + rep := desc.repeated() + + // Read the extension structure, and set it in + // the value we're constructing. + var ext reflect.Value + if !rep { + ext = reflect.New(typ).Elem() + } else { + ext = reflect.New(typ.Elem()).Elem() + } + if err := p.readAny(ext, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + ep := sv.Addr().Interface().(Message) + if !rep { + SetExtension(ep, desc, ext.Interface()) + } else { + old, err := GetExtension(ep, desc) + var sl reflect.Value + if err == nil { + sl = reflect.ValueOf(old) // existing slice + } else { + sl = reflect.MakeSlice(typ, 0, 1) + } + sl = reflect.Append(sl, ext) + SetExtension(ep, desc, sl.Interface()) + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + continue + } + + // This is a normal, non-extension field. + name := tok.value + var dst reflect.Value + fi, props, ok := structFieldByName(sprops, name) + if ok { + dst = sv.Field(fi) + } else if oop, ok := sprops.OneofTypes[name]; ok { + // It is a oneof. + props = oop.Prop + nv := reflect.New(oop.Type.Elem()) + dst = nv.Elem().Field(0) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) + } + if !dst.IsValid() { + return p.errorf("unknown field name %q in %v", name, st) + } + + if dst.Kind() == reflect.Map { + // Consume any colon. + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Construct the map if it doesn't already exist. + if dst.IsNil() { + dst.Set(reflect.MakeMap(dst.Type())) + } + key := reflect.New(dst.Type().Key()).Elem() + val := reflect.New(dst.Type().Elem()).Elem() + + // The map entry should be this sequence of tokens: + // < key : KEY value : VALUE > + // However, implementations may omit key or value, and technically + // we should support them in any order. See b/28924776 for a time + // this went wrong. + + tok := p.next() + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + for { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == terminator { + break + } + switch tok.value { + case "key": + if err := p.consumeToken(":"); err != nil { + return err + } + if err := p.readAny(key, props.MapKeyProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + case "value": + if err := p.checkForColon(props.MapValProp, dst.Type().Elem()); err != nil { + return err + } + if err := p.readAny(val, props.MapValProp); err != nil { + return err + } + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + default: + p.back() + return p.errorf(`expected "key", "value", or %q, found %q`, terminator, tok.value) + } + } + + dst.SetMapIndex(key, val) + continue + } + + // Check that it's not already set if it's not a repeated field. + if !props.Repeated && fieldSet[name] { + return p.errorf("non-repeated field %q was repeated", name) + } + + if err := p.checkForColon(props, dst.Type()); err != nil { + return err + } + + // Parse into the field. + fieldSet[name] = true + if err := p.readAny(dst, props); err != nil { + if _, ok := err.(*RequiredNotSetError); !ok { + return err + } + reqFieldErr = err + } + if props.Required { + reqCount-- + } + + if err := p.consumeOptionalSeparator(); err != nil { + return err + } + + } + + if reqCount > 0 { + return p.missingRequiredFieldError(sv) + } + return reqFieldErr +} + +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + if p.done && tok.value != "]" { + return "", p.errorf("unclosed type_url or extension name") + } + } + return strings.Join(parts, ""), nil +} + +// consumeOptionalSeparator consumes an optional semicolon or comma. +// It is used in readStruct to provide backward compatibility. +func (p *textParser) consumeOptionalSeparator() error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value != ";" && tok.value != "," { + p.back() + } + return nil +} + +func (p *textParser) readAny(v reflect.Value, props *Properties) error { + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "" { + return p.errorf("unexpected EOF") + } + + switch fv := v; fv.Kind() { + case reflect.Slice: + at := v.Type() + if at.Elem().Kind() == reflect.Uint8 { + // Special case for []byte + if tok.value[0] != '"' && tok.value[0] != '\'' { + // Deliberately written out here, as the error after + // this switch statement would write "invalid []byte: ...", + // which is not as user-friendly. + return p.errorf("invalid string: %v", tok.value) + } + bytes := []byte(tok.unquoted) + fv.Set(reflect.ValueOf(bytes)) + return nil + } + // Repeated field. + if tok.value == "[" { + // Repeated field with list notation, like [1,2,3]. + for { + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + err := p.readAny(fv.Index(fv.Len()-1), props) + if err != nil { + return err + } + tok := p.next() + if tok.err != nil { + return tok.err + } + if tok.value == "]" { + break + } + if tok.value != "," { + return p.errorf("Expected ']' or ',' found %q", tok.value) + } + } + return nil + } + // One value of the repeated field. + p.back() + fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) + return p.readAny(fv.Index(fv.Len()-1), props) + case reflect.Bool: + // true/1/t/True or false/f/0/False. + switch tok.value { + case "true", "1", "t", "True": + fv.SetBool(true) + return nil + case "false", "0", "f", "False": + fv.SetBool(false) + return nil + } + case reflect.Float32, reflect.Float64: + v := tok.value + // Ignore 'f' for compatibility with output generated by C++, but don't + // remove 'f' when the value is "-inf" or "inf". + if strings.HasSuffix(v, "f") && tok.value != "-inf" && tok.value != "inf" { + v = v[:len(v)-1] + } + if f, err := strconv.ParseFloat(v, fv.Type().Bits()); err == nil { + fv.SetFloat(f) + return nil + } + case reflect.Int32: + if x, err := strconv.ParseInt(tok.value, 0, 32); err == nil { + fv.SetInt(x) + return nil + } + + if len(props.Enum) == 0 { + break + } + m, ok := enumValueMaps[props.Enum] + if !ok { + break + } + x, ok := m[tok.value] + if !ok { + break + } + fv.SetInt(int64(x)) + return nil + case reflect.Int64: + if x, err := strconv.ParseInt(tok.value, 0, 64); err == nil { + fv.SetInt(x) + return nil + } + + case reflect.Ptr: + // A basic field (indirected through pointer), or a repeated message/group + p.back() + fv.Set(reflect.New(fv.Type().Elem())) + return p.readAny(fv.Elem(), props) + case reflect.String: + if tok.value[0] == '"' || tok.value[0] == '\'' { + fv.SetString(tok.unquoted) + return nil + } + case reflect.Struct: + var terminator string + switch tok.value { + case "{": + terminator = "}" + case "<": + terminator = ">" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + // TODO: Handle nested messages which implement encoding.TextUnmarshaler. + return p.readStruct(fv, terminator) + case reflect.Uint32: + if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { + fv.SetUint(uint64(x)) + return nil + } + case reflect.Uint64: + if x, err := strconv.ParseUint(tok.value, 0, 64); err == nil { + fv.SetUint(x) + return nil + } + } + return p.errorf("invalid %v: %v", v.Type(), tok.value) +} + +// UnmarshalText reads a protocol buffer in Text format. UnmarshalText resets pb +// before starting to unmarshal, so any existing data in pb is always removed. +// If a required field is not set and no other error occurs, +// UnmarshalText returns *RequiredNotSetError. +func UnmarshalText(s string, pb Message) error { + if um, ok := pb.(encoding.TextUnmarshaler); ok { + return um.UnmarshalText([]byte(s)) + } + pb.Reset() + v := reflect.ValueOf(pb) + return newTextParser(s).readStruct(v.Elem(), "") +} diff --git a/terraform-server/vendor/github.com/golang/snappy/.gitignore b/terraform-server/vendor/github.com/golang/snappy/.gitignore new file mode 100644 index 00000000..042091d9 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/.gitignore @@ -0,0 +1,16 @@ +cmd/snappytool/snappytool +testdata/bench + +# These explicitly listed benchmark data files are for an obsolete version of +# snappy_test.go. +testdata/alice29.txt +testdata/asyoulik.txt +testdata/fireworks.jpeg +testdata/geo.protodata +testdata/html +testdata/html_x_4 +testdata/kppkn.gtb +testdata/lcet10.txt +testdata/paper-100k.pdf +testdata/plrabn12.txt +testdata/urls.10K diff --git a/terraform-server/vendor/github.com/golang/snappy/AUTHORS b/terraform-server/vendor/github.com/golang/snappy/AUTHORS new file mode 100644 index 00000000..bcfa1952 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of Snappy-Go authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS files. +# See the latter for an explanation. + +# Names should be added to this file as +# Name or Organization +# The email address is not required for organizations. + +# Please keep the list sorted. + +Damian Gryski +Google Inc. +Jan Mercl <0xjnml@gmail.com> +Rodolfo Carvalho +Sebastien Binet diff --git a/terraform-server/vendor/github.com/golang/snappy/CONTRIBUTORS b/terraform-server/vendor/github.com/golang/snappy/CONTRIBUTORS new file mode 100644 index 00000000..931ae316 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/CONTRIBUTORS @@ -0,0 +1,37 @@ +# This is the official list of people who can contribute +# (and typically have contributed) code to the Snappy-Go repository. +# The AUTHORS file lists the copyright holders; this file +# lists people. For example, Google employees are listed here +# but not in AUTHORS, because Google holds the copyright. +# +# The submission process automatically checks to make sure +# that people submitting code are listed in this file (by email address). +# +# Names should be added to this file only after verifying that +# the individual or the individual's organization has agreed to +# the appropriate Contributor License Agreement, found here: +# +# http://code.google.com/legal/individual-cla-v1.0.html +# http://code.google.com/legal/corporate-cla-v1.0.html +# +# The agreement for individuals can be filled out on the web. +# +# When adding J Random Contributor's name to this file, +# either J's name or J's organization's name should be +# added to the AUTHORS file, depending on whether the +# individual or corporate CLA was used. + +# Names should be added to this file like so: +# Name + +# Please keep the list sorted. + +Damian Gryski +Jan Mercl <0xjnml@gmail.com> +Kai Backman +Marc-Antoine Ruel +Nigel Tao +Rob Pike +Rodolfo Carvalho +Russ Cox +Sebastien Binet diff --git a/terraform-server/vendor/github.com/golang/snappy/LICENSE b/terraform-server/vendor/github.com/golang/snappy/LICENSE new file mode 100644 index 00000000..6050c10f --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2011 The Snappy-Go Authors. All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + * Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + * Redistributions in binary form must reproduce the above +copyright notice, this list of conditions and the following disclaimer +in the documentation and/or other materials provided with the +distribution. + * Neither the name of Google Inc. nor the names of its +contributors may be used to endorse or promote products derived from +this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/terraform-server/vendor/github.com/golang/snappy/README b/terraform-server/vendor/github.com/golang/snappy/README new file mode 100644 index 00000000..cea12879 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/README @@ -0,0 +1,107 @@ +The Snappy compression format in the Go programming language. + +To download and install from source: +$ go get github.com/golang/snappy + +Unless otherwise noted, the Snappy-Go source files are distributed +under the BSD-style license found in the LICENSE file. + + + +Benchmarks. + +The golang/snappy benchmarks include compressing (Z) and decompressing (U) ten +or so files, the same set used by the C++ Snappy code (github.com/google/snappy +and note the "google", not "golang"). On an "Intel(R) Core(TM) i7-3770 CPU @ +3.40GHz", Go's GOARCH=amd64 numbers as of 2016-05-29: + +"go test -test.bench=." + +_UFlat0-8 2.19GB/s ± 0% html +_UFlat1-8 1.41GB/s ± 0% urls +_UFlat2-8 23.5GB/s ± 2% jpg +_UFlat3-8 1.91GB/s ± 0% jpg_200 +_UFlat4-8 14.0GB/s ± 1% pdf +_UFlat5-8 1.97GB/s ± 0% html4 +_UFlat6-8 814MB/s ± 0% txt1 +_UFlat7-8 785MB/s ± 0% txt2 +_UFlat8-8 857MB/s ± 0% txt3 +_UFlat9-8 719MB/s ± 1% txt4 +_UFlat10-8 2.84GB/s ± 0% pb +_UFlat11-8 1.05GB/s ± 0% gaviota + +_ZFlat0-8 1.04GB/s ± 0% html +_ZFlat1-8 534MB/s ± 0% urls +_ZFlat2-8 15.7GB/s ± 1% jpg +_ZFlat3-8 740MB/s ± 3% jpg_200 +_ZFlat4-8 9.20GB/s ± 1% pdf +_ZFlat5-8 991MB/s ± 0% html4 +_ZFlat6-8 379MB/s ± 0% txt1 +_ZFlat7-8 352MB/s ± 0% txt2 +_ZFlat8-8 396MB/s ± 1% txt3 +_ZFlat9-8 327MB/s ± 1% txt4 +_ZFlat10-8 1.33GB/s ± 1% pb +_ZFlat11-8 605MB/s ± 1% gaviota + + + +"go test -test.bench=. -tags=noasm" + +_UFlat0-8 621MB/s ± 2% html +_UFlat1-8 494MB/s ± 1% urls +_UFlat2-8 23.2GB/s ± 1% jpg +_UFlat3-8 1.12GB/s ± 1% jpg_200 +_UFlat4-8 4.35GB/s ± 1% pdf +_UFlat5-8 609MB/s ± 0% html4 +_UFlat6-8 296MB/s ± 0% txt1 +_UFlat7-8 288MB/s ± 0% txt2 +_UFlat8-8 309MB/s ± 1% txt3 +_UFlat9-8 280MB/s ± 1% txt4 +_UFlat10-8 753MB/s ± 0% pb +_UFlat11-8 400MB/s ± 0% gaviota + +_ZFlat0-8 409MB/s ± 1% html +_ZFlat1-8 250MB/s ± 1% urls +_ZFlat2-8 12.3GB/s ± 1% jpg +_ZFlat3-8 132MB/s ± 0% jpg_200 +_ZFlat4-8 2.92GB/s ± 0% pdf +_ZFlat5-8 405MB/s ± 1% html4 +_ZFlat6-8 179MB/s ± 1% txt1 +_ZFlat7-8 170MB/s ± 1% txt2 +_ZFlat8-8 189MB/s ± 1% txt3 +_ZFlat9-8 164MB/s ± 1% txt4 +_ZFlat10-8 479MB/s ± 1% pb +_ZFlat11-8 270MB/s ± 1% gaviota + + + +For comparison (Go's encoded output is byte-for-byte identical to C++'s), here +are the numbers from C++ Snappy's + +make CXXFLAGS="-O2 -DNDEBUG -g" clean snappy_unittest.log && cat snappy_unittest.log + +BM_UFlat/0 2.4GB/s html +BM_UFlat/1 1.4GB/s urls +BM_UFlat/2 21.8GB/s jpg +BM_UFlat/3 1.5GB/s jpg_200 +BM_UFlat/4 13.3GB/s pdf +BM_UFlat/5 2.1GB/s html4 +BM_UFlat/6 1.0GB/s txt1 +BM_UFlat/7 959.4MB/s txt2 +BM_UFlat/8 1.0GB/s txt3 +BM_UFlat/9 864.5MB/s txt4 +BM_UFlat/10 2.9GB/s pb +BM_UFlat/11 1.2GB/s gaviota + +BM_ZFlat/0 944.3MB/s html (22.31 %) +BM_ZFlat/1 501.6MB/s urls (47.78 %) +BM_ZFlat/2 14.3GB/s jpg (99.95 %) +BM_ZFlat/3 538.3MB/s jpg_200 (73.00 %) +BM_ZFlat/4 8.3GB/s pdf (83.30 %) +BM_ZFlat/5 903.5MB/s html4 (22.52 %) +BM_ZFlat/6 336.0MB/s txt1 (57.88 %) +BM_ZFlat/7 312.3MB/s txt2 (61.91 %) +BM_ZFlat/8 353.1MB/s txt3 (54.99 %) +BM_ZFlat/9 289.9MB/s txt4 (66.26 %) +BM_ZFlat/10 1.2GB/s pb (19.68 %) +BM_ZFlat/11 527.4MB/s gaviota (37.72 %) diff --git a/terraform-server/vendor/github.com/golang/snappy/decode.go b/terraform-server/vendor/github.com/golang/snappy/decode.go new file mode 100644 index 00000000..72efb035 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/decode.go @@ -0,0 +1,237 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +var ( + // ErrCorrupt reports that the input is invalid. + ErrCorrupt = errors.New("snappy: corrupt input") + // ErrTooLarge reports that the uncompressed length is too large. + ErrTooLarge = errors.New("snappy: decoded block is too large") + // ErrUnsupported reports that the input isn't supported. + ErrUnsupported = errors.New("snappy: unsupported input") + + errUnsupportedLiteralLength = errors.New("snappy: unsupported literal length") +) + +// DecodedLen returns the length of the decoded block. +func DecodedLen(src []byte) (int, error) { + v, _, err := decodedLen(src) + return v, err +} + +// decodedLen returns the length of the decoded block and the number of bytes +// that the length header occupied. +func decodedLen(src []byte) (blockLen, headerLen int, err error) { + v, n := binary.Uvarint(src) + if n <= 0 || v > 0xffffffff { + return 0, 0, ErrCorrupt + } + + const wordSize = 32 << (^uint(0) >> 32 & 1) + if wordSize == 32 && v > 0x7fffffff { + return 0, 0, ErrTooLarge + } + return int(v), n, nil +} + +const ( + decodeErrCodeCorrupt = 1 + decodeErrCodeUnsupportedLiteralLength = 2 +) + +// Decode returns the decoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire decoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Decode(dst, src []byte) ([]byte, error) { + dLen, s, err := decodedLen(src) + if err != nil { + return nil, err + } + if dLen <= len(dst) { + dst = dst[:dLen] + } else { + dst = make([]byte, dLen) + } + switch decode(dst, src[s:]) { + case 0: + return dst, nil + case decodeErrCodeUnsupportedLiteralLength: + return nil, errUnsupportedLiteralLength + } + return nil, ErrCorrupt +} + +// NewReader returns a new Reader that decompresses from r, using the framing +// format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +func NewReader(r io.Reader) *Reader { + return &Reader{ + r: r, + decoded: make([]byte, maxBlockSize), + buf: make([]byte, maxEncodedLenOfMaxBlockSize+checksumSize), + } +} + +// Reader is an io.Reader that can read Snappy-compressed bytes. +type Reader struct { + r io.Reader + err error + decoded []byte + buf []byte + // decoded[i:j] contains decoded bytes that have not yet been passed on. + i, j int + readHeader bool +} + +// Reset discards any buffered data, resets all state, and switches the Snappy +// reader to read from r. This permits reusing a Reader rather than allocating +// a new one. +func (r *Reader) Reset(reader io.Reader) { + r.r = reader + r.err = nil + r.i = 0 + r.j = 0 + r.readHeader = false +} + +func (r *Reader) readFull(p []byte, allowEOF bool) (ok bool) { + if _, r.err = io.ReadFull(r.r, p); r.err != nil { + if r.err == io.ErrUnexpectedEOF || (r.err == io.EOF && !allowEOF) { + r.err = ErrCorrupt + } + return false + } + return true +} + +// Read satisfies the io.Reader interface. +func (r *Reader) Read(p []byte) (int, error) { + if r.err != nil { + return 0, r.err + } + for { + if r.i < r.j { + n := copy(p, r.decoded[r.i:r.j]) + r.i += n + return n, nil + } + if !r.readFull(r.buf[:4], true) { + return 0, r.err + } + chunkType := r.buf[0] + if !r.readHeader { + if chunkType != chunkTypeStreamIdentifier { + r.err = ErrCorrupt + return 0, r.err + } + r.readHeader = true + } + chunkLen := int(r.buf[1]) | int(r.buf[2])<<8 | int(r.buf[3])<<16 + if chunkLen > len(r.buf) { + r.err = ErrUnsupported + return 0, r.err + } + + // The chunk types are specified at + // https://github.com/google/snappy/blob/master/framing_format.txt + switch chunkType { + case chunkTypeCompressedData: + // Section 4.2. Compressed data (chunk type 0x00). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:chunkLen] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + buf = buf[checksumSize:] + + n, err := DecodedLen(buf) + if err != nil { + r.err = err + return 0, r.err + } + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if _, err := Decode(r.decoded, buf); err != nil { + r.err = err + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeUncompressedData: + // Section 4.3. Uncompressed data (chunk type 0x01). + if chunkLen < checksumSize { + r.err = ErrCorrupt + return 0, r.err + } + buf := r.buf[:checksumSize] + if !r.readFull(buf, false) { + return 0, r.err + } + checksum := uint32(buf[0]) | uint32(buf[1])<<8 | uint32(buf[2])<<16 | uint32(buf[3])<<24 + // Read directly into r.decoded instead of via r.buf. + n := chunkLen - checksumSize + if n > len(r.decoded) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.decoded[:n], false) { + return 0, r.err + } + if crc(r.decoded[:n]) != checksum { + r.err = ErrCorrupt + return 0, r.err + } + r.i, r.j = 0, n + continue + + case chunkTypeStreamIdentifier: + // Section 4.1. Stream identifier (chunk type 0xff). + if chunkLen != len(magicBody) { + r.err = ErrCorrupt + return 0, r.err + } + if !r.readFull(r.buf[:len(magicBody)], false) { + return 0, r.err + } + for i := 0; i < len(magicBody); i++ { + if r.buf[i] != magicBody[i] { + r.err = ErrCorrupt + return 0, r.err + } + } + continue + } + + if chunkType <= 0x7f { + // Section 4.5. Reserved unskippable chunks (chunk types 0x02-0x7f). + r.err = ErrUnsupported + return 0, r.err + } + // Section 4.4 Padding (chunk type 0xfe). + // Section 4.6. Reserved skippable chunks (chunk types 0x80-0xfd). + if !r.readFull(r.buf[:chunkLen], false) { + return 0, r.err + } + } +} diff --git a/terraform-server/vendor/github.com/golang/snappy/decode_amd64.go b/terraform-server/vendor/github.com/golang/snappy/decode_amd64.go new file mode 100644 index 00000000..fcd192b8 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/decode_amd64.go @@ -0,0 +1,14 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// decode has the same semantics as in decode_other.go. +// +//go:noescape +func decode(dst, src []byte) int diff --git a/terraform-server/vendor/github.com/golang/snappy/decode_amd64.s b/terraform-server/vendor/github.com/golang/snappy/decode_amd64.s new file mode 100644 index 00000000..e6179f65 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/decode_amd64.s @@ -0,0 +1,490 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The asm code generally follows the pure Go code in decode_other.go, except +// where marked with a "!!!". + +// func decode(dst, src []byte) int +// +// All local variables fit into registers. The non-zero stack size is only to +// spill registers and push args when issuing a CALL. The register allocation: +// - AX scratch +// - BX scratch +// - CX length or x +// - DX offset +// - SI &src[s] +// - DI &dst[d] +// + R8 dst_base +// + R9 dst_len +// + R10 dst_base + dst_len +// + R11 src_base +// + R12 src_len +// + R13 src_base + src_len +// - R14 used by doCopy +// - R15 used by doCopy +// +// The registers R8-R13 (marked with a "+") are set at the start of the +// function, and after a CALL returns, and are not otherwise modified. +// +// The d variable is implicitly DI - R8, and len(dst)-d is R10 - DI. +// The s variable is implicitly SI - R11, and len(src)-s is R13 - SI. +TEXT ·decode(SB), NOSPLIT, $48-56 + // Initialize SI, DI and R8-R13. + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, DI + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, SI + MOVQ R11, R13 + ADDQ R12, R13 + +loop: + // for s < len(src) + CMPQ SI, R13 + JEQ end + + // CX = uint32(src[s]) + // + // switch src[s] & 0x03 + MOVBLZX (SI), CX + MOVL CX, BX + ANDL $3, BX + CMPL BX, $1 + JAE tagCopy + + // ---------------------------------------- + // The code below handles literal tags. + + // case tagLiteral: + // x := uint32(src[s] >> 2) + // switch + SHRL $2, CX + CMPL CX, $60 + JAE tagLit60Plus + + // case x < 60: + // s++ + INCQ SI + +doLit: + // This is the end of the inner "switch", when we have a literal tag. + // + // We assume that CX == x and x fits in a uint32, where x is the variable + // used in the pure Go decode_other.go code. + + // length = int(x) + 1 + // + // Unlike the pure Go code, we don't need to check if length <= 0 because + // CX can hold 64 bits, so the increment cannot overflow. + INCQ CX + + // Prepare to check if copying length bytes will run past the end of dst or + // src. + // + // AX = len(dst) - d + // BX = len(src) - s + MOVQ R10, AX + SUBQ DI, AX + MOVQ R13, BX + SUBQ SI, BX + + // !!! Try a faster technique for short (16 or fewer bytes) copies. + // + // if length > 16 || len(dst)-d < 16 || len(src)-s < 16 { + // goto callMemmove // Fall back on calling runtime·memmove. + // } + // + // The C++ snappy code calls this TryFastAppend. It also checks len(src)-s + // against 21 instead of 16, because it cannot assume that all of its input + // is contiguous in memory and so it needs to leave enough source bytes to + // read the next tag without refilling buffers, but Go's Decode assumes + // contiguousness (the src argument is a []byte). + CMPQ CX, $16 + JGT callMemmove + CMPQ AX, $16 + JLT callMemmove + CMPQ BX, $16 + JLT callMemmove + + // !!! Implement the copy from src to dst as a 16-byte load and store. + // (Decode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only length bytes, but that's + // OK. If the input is a valid Snappy encoding then subsequent iterations + // will fix up the overrun. Otherwise, Decode returns a nil []byte (and a + // non-nil error), so the overrun will be ignored. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(SI), X0 + MOVOU X0, 0(DI) + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +callMemmove: + // if length > len(dst)-d || length > len(src)-s { etc } + CMPQ CX, AX + JGT errCorrupt + CMPQ CX, BX + JGT errCorrupt + + // copy(dst[d:], src[s:s+length]) + // + // This means calling runtime·memmove(&dst[d], &src[s], length), so we push + // DI, SI and CX as arguments. Coincidentally, we also need to spill those + // three registers to the stack, to save local variables across the CALL. + MOVQ DI, 0(SP) + MOVQ SI, 8(SP) + MOVQ CX, 16(SP) + MOVQ DI, 24(SP) + MOVQ SI, 32(SP) + MOVQ CX, 40(SP) + CALL runtime·memmove(SB) + + // Restore local variables: unspill registers from the stack and + // re-calculate R8-R13. + MOVQ 24(SP), DI + MOVQ 32(SP), SI + MOVQ 40(SP), CX + MOVQ dst_base+0(FP), R8 + MOVQ dst_len+8(FP), R9 + MOVQ R8, R10 + ADDQ R9, R10 + MOVQ src_base+24(FP), R11 + MOVQ src_len+32(FP), R12 + MOVQ R11, R13 + ADDQ R12, R13 + + // d += length + // s += length + ADDQ CX, DI + ADDQ CX, SI + JMP loop + +tagLit60Plus: + // !!! This fragment does the + // + // s += x - 58; if uint(s) > uint(len(src)) { etc } + // + // checks. In the asm version, we code it once instead of once per switch case. + ADDQ CX, SI + SUBQ $58, SI + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // case x == 60: + CMPL CX, $61 + JEQ tagLit61 + JA tagLit62Plus + + // x = uint32(src[s-1]) + MOVBLZX -1(SI), CX + JMP doLit + +tagLit61: + // case x == 61: + // x = uint32(src[s-2]) | uint32(src[s-1])<<8 + MOVWLZX -2(SI), CX + JMP doLit + +tagLit62Plus: + CMPL CX, $62 + JA tagLit63 + + // case x == 62: + // x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + MOVWLZX -3(SI), CX + MOVBLZX -1(SI), BX + SHLL $16, BX + ORL BX, CX + JMP doLit + +tagLit63: + // case x == 63: + // x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + MOVL -4(SI), CX + JMP doLit + +// The code above handles literal tags. +// ---------------------------------------- +// The code below handles copy tags. + +tagCopy4: + // case tagCopy4: + // s += 5 + ADDQ $5, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-5])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + MOVLQZX -4(SI), DX + JMP doCopy + +tagCopy2: + // case tagCopy2: + // s += 3 + ADDQ $3, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // length = 1 + int(src[s-3])>>2 + SHRQ $2, CX + INCQ CX + + // offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + MOVWQZX -2(SI), DX + JMP doCopy + +tagCopy: + // We have a copy tag. We assume that: + // - BX == src[s] & 0x03 + // - CX == src[s] + CMPQ BX, $2 + JEQ tagCopy2 + JA tagCopy4 + + // case tagCopy1: + // s += 2 + ADDQ $2, SI + + // if uint(s) > uint(len(src)) { etc } + MOVQ SI, BX + SUBQ R11, BX + CMPQ BX, R12 + JA errCorrupt + + // offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + MOVQ CX, DX + ANDQ $0xe0, DX + SHLQ $3, DX + MOVBQZX -1(SI), BX + ORQ BX, DX + + // length = 4 + int(src[s-2])>>2&0x7 + SHRQ $2, CX + ANDQ $7, CX + ADDQ $4, CX + +doCopy: + // This is the end of the outer "switch", when we have a copy tag. + // + // We assume that: + // - CX == length && CX > 0 + // - DX == offset + + // if offset <= 0 { etc } + CMPQ DX, $0 + JLE errCorrupt + + // if d < offset { etc } + MOVQ DI, BX + SUBQ R8, BX + CMPQ BX, DX + JLT errCorrupt + + // if length > len(dst)-d { etc } + MOVQ R10, BX + SUBQ DI, BX + CMPQ CX, BX + JGT errCorrupt + + // forwardCopy(dst[d:d+length], dst[d-offset:]); d += length + // + // Set: + // - R14 = len(dst)-d + // - R15 = &dst[d-offset] + MOVQ R10, R14 + SUBQ DI, R14 + MOVQ DI, R15 + SUBQ DX, R15 + + // !!! Try a faster technique for short (16 or fewer bytes) forward copies. + // + // First, try using two 8-byte load/stores, similar to the doLit technique + // above. Even if dst[d:d+length] and dst[d-offset:] can overlap, this is + // still OK if offset >= 8. Note that this has to be two 8-byte load/stores + // and not one 16-byte load/store, and the first store has to be before the + // second load, due to the overlap if offset is in the range [8, 16). + // + // if length > 16 || offset < 8 || len(dst)-d < 16 { + // goto slowForwardCopy + // } + // copy 16 bytes + // d += length + CMPQ CX, $16 + JGT slowForwardCopy + CMPQ DX, $8 + JLT slowForwardCopy + CMPQ R14, $16 + JLT slowForwardCopy + MOVQ 0(R15), AX + MOVQ AX, 0(DI) + MOVQ 8(R15), BX + MOVQ BX, 8(DI) + ADDQ CX, DI + JMP loop + +slowForwardCopy: + // !!! If the forward copy is longer than 16 bytes, or if offset < 8, we + // can still try 8-byte load stores, provided we can overrun up to 10 extra + // bytes. As above, the overrun will be fixed up by subsequent iterations + // of the outermost loop. + // + // The C++ snappy code calls this technique IncrementalCopyFastPath. Its + // commentary says: + // + // ---- + // + // The main part of this loop is a simple copy of eight bytes at a time + // until we've copied (at least) the requested amount of bytes. However, + // if d and d-offset are less than eight bytes apart (indicating a + // repeating pattern of length < 8), we first need to expand the pattern in + // order to get the correct results. For instance, if the buffer looks like + // this, with the eight-byte and patterns marked as + // intervals: + // + // abxxxxxxxxxxxx + // [------] d-offset + // [------] d + // + // a single eight-byte copy from to will repeat the pattern + // once, after which we can move two bytes without moving : + // + // ababxxxxxxxxxx + // [------] d-offset + // [------] d + // + // and repeat the exercise until the two no longer overlap. + // + // This allows us to do very well in the special case of one single byte + // repeated many times, without taking a big hit for more general cases. + // + // The worst case of extra writing past the end of the match occurs when + // offset == 1 and length == 1; the last copy will read from byte positions + // [0..7] and write to [4..11], whereas it was only supposed to write to + // position 1. Thus, ten excess bytes. + // + // ---- + // + // That "10 byte overrun" worst case is confirmed by Go's + // TestSlowForwardCopyOverrun, which also tests the fixUpSlowForwardCopy + // and finishSlowForwardCopy algorithm. + // + // if length > len(dst)-d-10 { + // goto verySlowForwardCopy + // } + SUBQ $10, R14 + CMPQ CX, R14 + JGT verySlowForwardCopy + +makeOffsetAtLeast8: + // !!! As above, expand the pattern so that offset >= 8 and we can use + // 8-byte load/stores. + // + // for offset < 8 { + // copy 8 bytes from dst[d-offset:] to dst[d:] + // length -= offset + // d += offset + // offset += offset + // // The two previous lines together means that d-offset, and therefore + // // R15, is unchanged. + // } + CMPQ DX, $8 + JGE fixUpSlowForwardCopy + MOVQ (R15), BX + MOVQ BX, (DI) + SUBQ DX, CX + ADDQ DX, DI + ADDQ DX, DX + JMP makeOffsetAtLeast8 + +fixUpSlowForwardCopy: + // !!! Add length (which might be negative now) to d (implied by DI being + // &dst[d]) so that d ends up at the right place when we jump back to the + // top of the loop. Before we do that, though, we save DI to AX so that, if + // length is positive, copying the remaining length bytes will write to the + // right place. + MOVQ DI, AX + ADDQ CX, DI + +finishSlowForwardCopy: + // !!! Repeat 8-byte load/stores until length <= 0. Ending with a negative + // length means that we overrun, but as above, that will be fixed up by + // subsequent iterations of the outermost loop. + CMPQ CX, $0 + JLE loop + MOVQ (R15), BX + MOVQ BX, (AX) + ADDQ $8, R15 + ADDQ $8, AX + SUBQ $8, CX + JMP finishSlowForwardCopy + +verySlowForwardCopy: + // verySlowForwardCopy is a simple implementation of forward copy. In C + // parlance, this is a do/while loop instead of a while loop, since we know + // that length > 0. In Go syntax: + // + // for { + // dst[d] = dst[d - offset] + // d++ + // length-- + // if length == 0 { + // break + // } + // } + MOVB (R15), BX + MOVB BX, (DI) + INCQ R15 + INCQ DI + DECQ CX + JNZ verySlowForwardCopy + JMP loop + +// The code above handles copy tags. +// ---------------------------------------- + +end: + // This is the end of the "for s < len(src)". + // + // if d != len(dst) { etc } + CMPQ DI, R10 + JNE errCorrupt + + // return 0 + MOVQ $0, ret+48(FP) + RET + +errCorrupt: + // return decodeErrCodeCorrupt + MOVQ $1, ret+48(FP) + RET diff --git a/terraform-server/vendor/github.com/golang/snappy/decode_other.go b/terraform-server/vendor/github.com/golang/snappy/decode_other.go new file mode 100644 index 00000000..8c9f2049 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/decode_other.go @@ -0,0 +1,101 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +// decode writes the decoding of src to dst. It assumes that the varint-encoded +// length of the decompressed bytes has already been read, and that len(dst) +// equals that length. +// +// It returns 0 on success or a decodeErrCodeXxx error code on failure. +func decode(dst, src []byte) int { + var d, s, offset, length int + for s < len(src) { + switch src[s] & 0x03 { + case tagLiteral: + x := uint32(src[s] >> 2) + switch { + case x < 60: + s++ + case x == 60: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-1]) + case x == 61: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-2]) | uint32(src[s-1])<<8 + case x == 62: + s += 4 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-3]) | uint32(src[s-2])<<8 | uint32(src[s-1])<<16 + case x == 63: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + x = uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24 + } + length = int(x) + 1 + if length <= 0 { + return decodeErrCodeUnsupportedLiteralLength + } + if length > len(dst)-d || length > len(src)-s { + return decodeErrCodeCorrupt + } + copy(dst[d:], src[s:s+length]) + d += length + s += length + continue + + case tagCopy1: + s += 2 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 4 + int(src[s-2])>>2&0x7 + offset = int(uint32(src[s-2])&0xe0<<3 | uint32(src[s-1])) + + case tagCopy2: + s += 3 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-3])>>2 + offset = int(uint32(src[s-2]) | uint32(src[s-1])<<8) + + case tagCopy4: + s += 5 + if uint(s) > uint(len(src)) { // The uint conversions catch overflow from the previous line. + return decodeErrCodeCorrupt + } + length = 1 + int(src[s-5])>>2 + offset = int(uint32(src[s-4]) | uint32(src[s-3])<<8 | uint32(src[s-2])<<16 | uint32(src[s-1])<<24) + } + + if offset <= 0 || d < offset || length > len(dst)-d { + return decodeErrCodeCorrupt + } + // Copy from an earlier sub-slice of dst to a later sub-slice. Unlike + // the built-in copy function, this byte-by-byte copy always runs + // forwards, even if the slices overlap. Conceptually, this is: + // + // d += forwardCopy(dst[d:d+length], dst[d-offset:]) + for end := d + length; d != end; d++ { + dst[d] = dst[d-offset] + } + } + if d != len(dst) { + return decodeErrCodeCorrupt + } + return 0 +} diff --git a/terraform-server/vendor/github.com/golang/snappy/encode.go b/terraform-server/vendor/github.com/golang/snappy/encode.go new file mode 100644 index 00000000..8d393e90 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/encode.go @@ -0,0 +1,285 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package snappy + +import ( + "encoding/binary" + "errors" + "io" +) + +// Encode returns the encoded form of src. The returned slice may be a sub- +// slice of dst if dst was large enough to hold the entire encoded block. +// Otherwise, a newly allocated slice will be returned. +// +// The dst and src must not overlap. It is valid to pass a nil dst. +func Encode(dst, src []byte) []byte { + if n := MaxEncodedLen(len(src)); n < 0 { + panic(ErrTooLarge) + } else if len(dst) < n { + dst = make([]byte, n) + } + + // The block starts with the varint-encoded length of the decompressed bytes. + d := binary.PutUvarint(dst, uint64(len(src))) + + for len(src) > 0 { + p := src + src = nil + if len(p) > maxBlockSize { + p, src = p[:maxBlockSize], p[maxBlockSize:] + } + if len(p) < minNonLiteralBlockSize { + d += emitLiteral(dst[d:], p) + } else { + d += encodeBlock(dst[d:], p) + } + } + return dst[:d] +} + +// inputMargin is the minimum number of extra input bytes to keep, inside +// encodeBlock's inner loop. On some architectures, this margin lets us +// implement a fast path for emitLiteral, where the copy of short (<= 16 byte) +// literals can be implemented as a single load to and store from a 16-byte +// register. That literal's actual length can be as short as 1 byte, so this +// can copy up to 15 bytes too much, but that's OK as subsequent iterations of +// the encoding loop will fix up the copy overrun, and this inputMargin ensures +// that we don't overrun the dst and src buffers. +const inputMargin = 16 - 1 + +// minNonLiteralBlockSize is the minimum size of the input to encodeBlock that +// could be encoded with a copy tag. This is the minimum with respect to the +// algorithm used by encodeBlock, not a minimum enforced by the file format. +// +// The encoded output must start with at least a 1 byte literal, as there are +// no previous bytes to copy. A minimal (1 byte) copy after that, generated +// from an emitCopy call in encodeBlock's main loop, would require at least +// another inputMargin bytes, for the reason above: we want any emitLiteral +// calls inside encodeBlock's main loop to use the fast path if possible, which +// requires being able to overrun by inputMargin bytes. Thus, +// minNonLiteralBlockSize equals 1 + 1 + inputMargin. +// +// The C++ code doesn't use this exact threshold, but it could, as discussed at +// https://groups.google.com/d/topic/snappy-compression/oGbhsdIJSJ8/discussion +// The difference between Go (2+inputMargin) and C++ (inputMargin) is purely an +// optimization. It should not affect the encoded form. This is tested by +// TestSameEncodingAsCppShortCopies. +const minNonLiteralBlockSize = 1 + 1 + inputMargin + +// MaxEncodedLen returns the maximum length of a snappy block, given its +// uncompressed length. +// +// It will return a negative value if srcLen is too large to encode. +func MaxEncodedLen(srcLen int) int { + n := uint64(srcLen) + if n > 0xffffffff { + return -1 + } + // Compressed data can be defined as: + // compressed := item* literal* + // item := literal* copy + // + // The trailing literal sequence has a space blowup of at most 62/60 + // since a literal of length 60 needs one tag byte + one extra byte + // for length information. + // + // Item blowup is trickier to measure. Suppose the "copy" op copies + // 4 bytes of data. Because of a special check in the encoding code, + // we produce a 4-byte copy only if the offset is < 65536. Therefore + // the copy op takes 3 bytes to encode, and this type of item leads + // to at most the 62/60 blowup for representing literals. + // + // Suppose the "copy" op copies 5 bytes of data. If the offset is big + // enough, it will take 5 bytes to encode the copy op. Therefore the + // worst case here is a one-byte literal followed by a five-byte copy. + // That is, 6 bytes of input turn into 7 bytes of "compressed" data. + // + // This last factor dominates the blowup, so the final estimate is: + n = 32 + n + n/6 + if n > 0xffffffff { + return -1 + } + return int(n) +} + +var errClosed = errors.New("snappy: Writer is closed") + +// NewWriter returns a new Writer that compresses to w. +// +// The Writer returned does not buffer writes. There is no need to Flush or +// Close such a Writer. +// +// Deprecated: the Writer returned is not suitable for many small writes, only +// for few large writes. Use NewBufferedWriter instead, which is efficient +// regardless of the frequency and shape of the writes, and remember to Close +// that Writer when done. +func NewWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + obuf: make([]byte, obufLen), + } +} + +// NewBufferedWriter returns a new Writer that compresses to w, using the +// framing format described at +// https://github.com/google/snappy/blob/master/framing_format.txt +// +// The Writer returned buffers writes. Users must call Close to guarantee all +// data has been forwarded to the underlying io.Writer. They may also call +// Flush zero or more times before calling Close. +func NewBufferedWriter(w io.Writer) *Writer { + return &Writer{ + w: w, + ibuf: make([]byte, 0, maxBlockSize), + obuf: make([]byte, obufLen), + } +} + +// Writer is an io.Writer that can write Snappy-compressed bytes. +type Writer struct { + w io.Writer + err error + + // ibuf is a buffer for the incoming (uncompressed) bytes. + // + // Its use is optional. For backwards compatibility, Writers created by the + // NewWriter function have ibuf == nil, do not buffer incoming bytes, and + // therefore do not need to be Flush'ed or Close'd. + ibuf []byte + + // obuf is a buffer for the outgoing (compressed) bytes. + obuf []byte + + // wroteStreamHeader is whether we have written the stream header. + wroteStreamHeader bool +} + +// Reset discards the writer's state and switches the Snappy writer to write to +// w. This permits reusing a Writer rather than allocating a new one. +func (w *Writer) Reset(writer io.Writer) { + w.w = writer + w.err = nil + if w.ibuf != nil { + w.ibuf = w.ibuf[:0] + } + w.wroteStreamHeader = false +} + +// Write satisfies the io.Writer interface. +func (w *Writer) Write(p []byte) (nRet int, errRet error) { + if w.ibuf == nil { + // Do not buffer incoming bytes. This does not perform or compress well + // if the caller of Writer.Write writes many small slices. This + // behavior is therefore deprecated, but still supported for backwards + // compatibility with code that doesn't explicitly Flush or Close. + return w.write(p) + } + + // The remainder of this method is based on bufio.Writer.Write from the + // standard library. + + for len(p) > (cap(w.ibuf)-len(w.ibuf)) && w.err == nil { + var n int + if len(w.ibuf) == 0 { + // Large write, empty buffer. + // Write directly from p to avoid copy. + n, _ = w.write(p) + } else { + n = copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + w.Flush() + } + nRet += n + p = p[n:] + } + if w.err != nil { + return nRet, w.err + } + n := copy(w.ibuf[len(w.ibuf):cap(w.ibuf)], p) + w.ibuf = w.ibuf[:len(w.ibuf)+n] + nRet += n + return nRet, nil +} + +func (w *Writer) write(p []byte) (nRet int, errRet error) { + if w.err != nil { + return 0, w.err + } + for len(p) > 0 { + obufStart := len(magicChunk) + if !w.wroteStreamHeader { + w.wroteStreamHeader = true + copy(w.obuf, magicChunk) + obufStart = 0 + } + + var uncompressed []byte + if len(p) > maxBlockSize { + uncompressed, p = p[:maxBlockSize], p[maxBlockSize:] + } else { + uncompressed, p = p, nil + } + checksum := crc(uncompressed) + + // Compress the buffer, discarding the result if the improvement + // isn't at least 12.5%. + compressed := Encode(w.obuf[obufHeaderLen:], uncompressed) + chunkType := uint8(chunkTypeCompressedData) + chunkLen := 4 + len(compressed) + obufEnd := obufHeaderLen + len(compressed) + if len(compressed) >= len(uncompressed)-len(uncompressed)/8 { + chunkType = chunkTypeUncompressedData + chunkLen = 4 + len(uncompressed) + obufEnd = obufHeaderLen + } + + // Fill in the per-chunk header that comes before the body. + w.obuf[len(magicChunk)+0] = chunkType + w.obuf[len(magicChunk)+1] = uint8(chunkLen >> 0) + w.obuf[len(magicChunk)+2] = uint8(chunkLen >> 8) + w.obuf[len(magicChunk)+3] = uint8(chunkLen >> 16) + w.obuf[len(magicChunk)+4] = uint8(checksum >> 0) + w.obuf[len(magicChunk)+5] = uint8(checksum >> 8) + w.obuf[len(magicChunk)+6] = uint8(checksum >> 16) + w.obuf[len(magicChunk)+7] = uint8(checksum >> 24) + + if _, err := w.w.Write(w.obuf[obufStart:obufEnd]); err != nil { + w.err = err + return nRet, err + } + if chunkType == chunkTypeUncompressedData { + if _, err := w.w.Write(uncompressed); err != nil { + w.err = err + return nRet, err + } + } + nRet += len(uncompressed) + } + return nRet, nil +} + +// Flush flushes the Writer to its underlying io.Writer. +func (w *Writer) Flush() error { + if w.err != nil { + return w.err + } + if len(w.ibuf) == 0 { + return nil + } + w.write(w.ibuf) + w.ibuf = w.ibuf[:0] + return w.err +} + +// Close calls Flush and then closes the Writer. +func (w *Writer) Close() error { + w.Flush() + ret := w.err + if w.err == nil { + w.err = errClosed + } + return ret +} diff --git a/terraform-server/vendor/github.com/golang/snappy/encode_amd64.go b/terraform-server/vendor/github.com/golang/snappy/encode_amd64.go new file mode 100644 index 00000000..150d91bc --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/encode_amd64.go @@ -0,0 +1,29 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +package snappy + +// emitLiteral has the same semantics as in encode_other.go. +// +//go:noescape +func emitLiteral(dst, lit []byte) int + +// emitCopy has the same semantics as in encode_other.go. +// +//go:noescape +func emitCopy(dst []byte, offset, length int) int + +// extendMatch has the same semantics as in encode_other.go. +// +//go:noescape +func extendMatch(src []byte, i, j int) int + +// encodeBlock has the same semantics as in encode_other.go. +// +//go:noescape +func encodeBlock(dst, src []byte) (d int) diff --git a/terraform-server/vendor/github.com/golang/snappy/encode_amd64.s b/terraform-server/vendor/github.com/golang/snappy/encode_amd64.s new file mode 100644 index 00000000..adfd979f --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/encode_amd64.s @@ -0,0 +1,730 @@ +// Copyright 2016 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !appengine +// +build gc +// +build !noasm + +#include "textflag.h" + +// The XXX lines assemble on Go 1.4, 1.5 and 1.7, but not 1.6, due to a +// Go toolchain regression. See https://github.com/golang/go/issues/15426 and +// https://github.com/golang/snappy/issues/29 +// +// As a workaround, the package was built with a known good assembler, and +// those instructions were disassembled by "objdump -d" to yield the +// 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 +// style comments, in AT&T asm syntax. Note that rsp here is a physical +// register, not Go/asm's SP pseudo-register (see https://golang.org/doc/asm). +// The instructions were then encoded as "BYTE $0x.." sequences, which assemble +// fine on Go 1.6. + +// The asm code generally follows the pure Go code in encode_other.go, except +// where marked with a "!!!". + +// ---------------------------------------------------------------------------- + +// func emitLiteral(dst, lit []byte) int +// +// All local variables fit into registers. The register allocation: +// - AX len(lit) +// - BX n +// - DX return value +// - DI &dst[i] +// - R10 &lit[0] +// +// The 24 bytes of stack space is to call runtime·memmove. +// +// The unusual register allocation of local variables, such as R10 for the +// source pointer, matches the allocation used at the call site in encodeBlock, +// which makes it easier to manually inline this function. +TEXT ·emitLiteral(SB), NOSPLIT, $24-56 + MOVQ dst_base+0(FP), DI + MOVQ lit_base+24(FP), R10 + MOVQ lit_len+32(FP), AX + MOVQ AX, DX + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT oneByte + CMPL BX, $256 + JLT twoBytes + +threeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + ADDQ $3, DX + JMP memmove + +twoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + ADDQ $2, DX + JMP memmove + +oneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + ADDQ $1, DX + +memmove: + MOVQ DX, ret+48(FP) + + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + CALL runtime·memmove(SB) + RET + +// ---------------------------------------------------------------------------- + +// func emitCopy(dst []byte, offset, length int) int +// +// All local variables fit into registers. The register allocation: +// - AX length +// - SI &dst[0] +// - DI &dst[i] +// - R11 offset +// +// The unusual register allocation of local variables, such as R11 for the +// offset, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·emitCopy(SB), NOSPLIT, $0-48 + MOVQ dst_base+0(FP), DI + MOVQ DI, SI + MOVQ offset+24(FP), R11 + MOVQ length+32(FP), AX + +loop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT step1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP loop0 + +step1: + // if length > 64 { etc } + CMPL AX, $64 + JLE step2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +step2: + // if length >= 12 || offset >= 2048 { goto step3 } + CMPL AX, $12 + JGE step3 + CMPL R11, $2048 + JGE step3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +step3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + + // Return the number of bytes written. + SUBQ SI, DI + MOVQ DI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func extendMatch(src []byte, i, j int) int +// +// All local variables fit into registers. The register allocation: +// - DX &src[0] +// - SI &src[j] +// - R13 &src[len(src) - 8] +// - R14 &src[len(src)] +// - R15 &src[i] +// +// The unusual register allocation of local variables, such as R15 for a source +// pointer, matches the allocation used at the call site in encodeBlock, which +// makes it easier to manually inline this function. +TEXT ·extendMatch(SB), NOSPLIT, $0-48 + MOVQ src_base+0(FP), DX + MOVQ src_len+8(FP), R14 + MOVQ i+24(FP), R15 + MOVQ j+32(FP), SI + ADDQ DX, R14 + ADDQ DX, R15 + ADDQ DX, SI + MOVQ R14, R13 + SUBQ $8, R13 + +cmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA cmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE bsf + ADDQ $8, R15 + ADDQ $8, SI + JMP cmp8 + +bsf: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +cmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE extendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE extendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP cmp1 + +extendMatchEnd: + // Convert from &src[ret] to ret. + SUBQ DX, SI + MOVQ SI, ret+40(FP) + RET + +// ---------------------------------------------------------------------------- + +// func encodeBlock(dst, src []byte) (d int) +// +// All local variables fit into registers, other than "var table". The register +// allocation: +// - AX . . +// - BX . . +// - CX 56 shift (note that amd64 shifts by non-immediates must use CX). +// - DX 64 &src[0], tableSize +// - SI 72 &src[s] +// - DI 80 &dst[d] +// - R9 88 sLimit +// - R10 . &src[nextEmit] +// - R11 96 prevHash, currHash, nextHash, offset +// - R12 104 &src[base], skip +// - R13 . &src[nextS], &src[len(src) - 8] +// - R14 . len(src), bytesBetweenHashLookups, &src[len(src)], x +// - R15 112 candidate +// +// The second column (56, 64, etc) is the stack offset to spill the registers +// when calling other functions. We could pack this slightly tighter, but it's +// simpler to have a dedicated spill map independent of the function called. +// +// "var table [maxTableSize]uint16" takes up 32768 bytes of stack space. An +// extra 56 bytes, to call other functions, and an extra 64 bytes, to spill +// local variables (registers) during calls gives 32768 + 56 + 64 = 32888. +TEXT ·encodeBlock(SB), 0, $32888-56 + MOVQ dst_base+0(FP), DI + MOVQ src_base+24(FP), SI + MOVQ src_len+32(FP), R14 + + // shift, tableSize := uint32(32-8), 1<<8 + MOVQ $24, CX + MOVQ $256, DX + +calcShift: + // for ; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + // shift-- + // } + CMPQ DX, $16384 + JGE varTable + CMPQ DX, R14 + JGE varTable + SUBQ $1, CX + SHLQ $1, DX + JMP calcShift + +varTable: + // var table [maxTableSize]uint16 + // + // In the asm code, unlike the Go code, we can zero-initialize only the + // first tableSize elements. Each uint16 element is 2 bytes and each MOVOU + // writes 16 bytes, so we can do only tableSize/8 writes instead of the + // 2048 writes that would zero-initialize all of table's 32768 bytes. + SHRQ $3, DX + LEAQ table-32768(SP), BX + PXOR X0, X0 + +memclr: + MOVOU X0, 0(BX) + ADDQ $16, BX + SUBQ $1, DX + JNZ memclr + + // !!! DX = &src[0] + MOVQ SI, DX + + // sLimit := len(src) - inputMargin + MOVQ R14, R9 + SUBQ $15, R9 + + // !!! Pre-emptively spill CX, DX and R9 to the stack. Their values don't + // change for the rest of the function. + MOVQ CX, 56(SP) + MOVQ DX, 64(SP) + MOVQ R9, 88(SP) + + // nextEmit := 0 + MOVQ DX, R10 + + // s := 1 + ADDQ $1, SI + + // nextHash := hash(load32(src, s), shift) + MOVL 0(SI), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + +outer: + // for { etc } + + // skip := 32 + MOVQ $32, R12 + + // nextS := s + MOVQ SI, R13 + + // candidate := 0 + MOVQ $0, R15 + +inner0: + // for { etc } + + // s := nextS + MOVQ R13, SI + + // bytesBetweenHashLookups := skip >> 5 + MOVQ R12, R14 + SHRQ $5, R14 + + // nextS = s + bytesBetweenHashLookups + ADDQ R14, R13 + + // skip += bytesBetweenHashLookups + ADDQ R14, R12 + + // if nextS > sLimit { goto emitRemainder } + MOVQ R13, AX + SUBQ DX, AX + CMPQ AX, R9 + JA emitRemainder + + // candidate = int(table[nextHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[nextHash] = uint16(s) + MOVQ SI, AX + SUBQ DX, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // nextHash = hash(load32(src, nextS), shift) + MOVL 0(R13), R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // if load32(src, s) != load32(src, candidate) { continue } break + MOVL 0(SI), AX + MOVL (DX)(R15*1), BX + CMPL AX, BX + JNE inner0 + +fourByteMatch: + // As per the encode_other.go code: + // + // A 4-byte match has been found. We'll later see etc. + + // !!! Jump to a fast path for short (<= 16 byte) literals. See the comment + // on inputMargin in encode.go. + MOVQ SI, AX + SUBQ R10, AX + CMPQ AX, $16 + JLE emitLiteralFastPath + + // ---------------------------------------- + // Begin inline of the emitLiteral call. + // + // d += emitLiteral(dst[d:], src[nextEmit:s]) + + MOVL AX, BX + SUBL $1, BX + + CMPL BX, $60 + JLT inlineEmitLiteralOneByte + CMPL BX, $256 + JLT inlineEmitLiteralTwoBytes + +inlineEmitLiteralThreeBytes: + MOVB $0xf4, 0(DI) + MOVW BX, 1(DI) + ADDQ $3, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralTwoBytes: + MOVB $0xf0, 0(DI) + MOVB BX, 1(DI) + ADDQ $2, DI + JMP inlineEmitLiteralMemmove + +inlineEmitLiteralOneByte: + SHLB $2, BX + MOVB BX, 0(DI) + ADDQ $1, DI + +inlineEmitLiteralMemmove: + // Spill local variables (registers) onto the stack; call; unspill. + // + // copy(dst[i:], lit) + // + // This means calling runtime·memmove(&dst[i], &lit[0], len(lit)), so we push + // DI, R10 and AX as arguments. + MOVQ DI, 0(SP) + MOVQ R10, 8(SP) + MOVQ AX, 16(SP) + ADDQ AX, DI // Finish the "d +=" part of "d += emitLiteral(etc)". + MOVQ SI, 72(SP) + MOVQ DI, 80(SP) + MOVQ R15, 112(SP) + CALL runtime·memmove(SB) + MOVQ 56(SP), CX + MOVQ 64(SP), DX + MOVQ 72(SP), SI + MOVQ 80(SP), DI + MOVQ 88(SP), R9 + MOVQ 112(SP), R15 + JMP inner1 + +inlineEmitLiteralEnd: + // End inline of the emitLiteral call. + // ---------------------------------------- + +emitLiteralFastPath: + // !!! Emit the 1-byte encoding "uint8(len(lit)-1)<<2". + MOVB AX, BX + SUBB $1, BX + SHLB $2, BX + MOVB BX, (DI) + ADDQ $1, DI + + // !!! Implement the copy from lit to dst as a 16-byte load and store. + // (Encode's documentation says that dst and src must not overlap.) + // + // This always copies 16 bytes, instead of only len(lit) bytes, but that's + // OK. Subsequent iterations will fix up the overrun. + // + // Note that on amd64, it is legal and cheap to issue unaligned 8-byte or + // 16-byte loads and stores. This technique probably wouldn't be as + // effective on architectures that are fussier about alignment. + MOVOU 0(R10), X0 + MOVOU X0, 0(DI) + ADDQ AX, DI + +inner1: + // for { etc } + + // base := s + MOVQ SI, R12 + + // !!! offset := base - candidate + MOVQ R12, R11 + SUBQ R15, R11 + SUBQ DX, R11 + + // ---------------------------------------- + // Begin inline of the extendMatch call. + // + // s = extendMatch(src, candidate+4, s+4) + + // !!! R14 = &src[len(src)] + MOVQ src_len+32(FP), R14 + ADDQ DX, R14 + + // !!! R13 = &src[len(src) - 8] + MOVQ R14, R13 + SUBQ $8, R13 + + // !!! R15 = &src[candidate + 4] + ADDQ $4, R15 + ADDQ DX, R15 + + // !!! s += 4 + ADDQ $4, SI + +inlineExtendMatchCmp8: + // As long as we are 8 or more bytes before the end of src, we can load and + // compare 8 bytes at a time. If those 8 bytes are equal, repeat. + CMPQ SI, R13 + JA inlineExtendMatchCmp1 + MOVQ (R15), AX + MOVQ (SI), BX + CMPQ AX, BX + JNE inlineExtendMatchBSF + ADDQ $8, R15 + ADDQ $8, SI + JMP inlineExtendMatchCmp8 + +inlineExtendMatchBSF: + // If those 8 bytes were not equal, XOR the two 8 byte values, and return + // the index of the first byte that differs. The BSF instruction finds the + // least significant 1 bit, the amd64 architecture is little-endian, and + // the shift by 3 converts a bit index to a byte index. + XORQ AX, BX + BSFQ BX, BX + SHRQ $3, BX + ADDQ BX, SI + JMP inlineExtendMatchEnd + +inlineExtendMatchCmp1: + // In src's tail, compare 1 byte at a time. + CMPQ SI, R14 + JAE inlineExtendMatchEnd + MOVB (R15), AX + MOVB (SI), BX + CMPB AX, BX + JNE inlineExtendMatchEnd + ADDQ $1, R15 + ADDQ $1, SI + JMP inlineExtendMatchCmp1 + +inlineExtendMatchEnd: + // End inline of the extendMatch call. + // ---------------------------------------- + + // ---------------------------------------- + // Begin inline of the emitCopy call. + // + // d += emitCopy(dst[d:], base-candidate, s-base) + + // !!! length := s - base + MOVQ SI, AX + SUBQ R12, AX + +inlineEmitCopyLoop0: + // for length >= 68 { etc } + CMPL AX, $68 + JLT inlineEmitCopyStep1 + + // Emit a length 64 copy, encoded as 3 bytes. + MOVB $0xfe, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $64, AX + JMP inlineEmitCopyLoop0 + +inlineEmitCopyStep1: + // if length > 64 { etc } + CMPL AX, $64 + JLE inlineEmitCopyStep2 + + // Emit a length 60 copy, encoded as 3 bytes. + MOVB $0xee, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + SUBL $60, AX + +inlineEmitCopyStep2: + // if length >= 12 || offset >= 2048 { goto inlineEmitCopyStep3 } + CMPL AX, $12 + JGE inlineEmitCopyStep3 + CMPL R11, $2048 + JGE inlineEmitCopyStep3 + + // Emit the remaining copy, encoded as 2 bytes. + MOVB R11, 1(DI) + SHRL $8, R11 + SHLB $5, R11 + SUBB $4, AX + SHLB $2, AX + ORB AX, R11 + ORB $1, R11 + MOVB R11, 0(DI) + ADDQ $2, DI + JMP inlineEmitCopyEnd + +inlineEmitCopyStep3: + // Emit the remaining copy, encoded as 3 bytes. + SUBL $1, AX + SHLB $2, AX + ORB $2, AX + MOVB AX, 0(DI) + MOVW R11, 1(DI) + ADDQ $3, DI + +inlineEmitCopyEnd: + // End inline of the emitCopy call. + // ---------------------------------------- + + // nextEmit = s + MOVQ SI, R10 + + // if s >= sLimit { goto emitRemainder } + MOVQ SI, AX + SUBQ DX, AX + CMPQ AX, R9 + JAE emitRemainder + + // As per the encode_other.go code: + // + // We could immediately etc. + + // x := load64(src, s-1) + MOVQ -1(SI), R14 + + // prevHash := hash(uint32(x>>0), shift) + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // table[prevHash] = uint16(s-1) + MOVQ SI, AX + SUBQ DX, AX + SUBQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // currHash := hash(uint32(x>>8), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // candidate = int(table[currHash]) + // XXX: MOVWQZX table-32768(SP)(R11*2), R15 + // XXX: 4e 0f b7 7c 5c 78 movzwq 0x78(%rsp,%r11,2),%r15 + BYTE $0x4e + BYTE $0x0f + BYTE $0xb7 + BYTE $0x7c + BYTE $0x5c + BYTE $0x78 + + // table[currHash] = uint16(s) + ADDQ $1, AX + + // XXX: MOVW AX, table-32768(SP)(R11*2) + // XXX: 66 42 89 44 5c 78 mov %ax,0x78(%rsp,%r11,2) + BYTE $0x66 + BYTE $0x42 + BYTE $0x89 + BYTE $0x44 + BYTE $0x5c + BYTE $0x78 + + // if uint32(x>>8) == load32(src, candidate) { continue } + MOVL (DX)(R15*1), BX + CMPL R14, BX + JEQ inner1 + + // nextHash = hash(uint32(x>>16), shift) + SHRQ $8, R14 + MOVL R14, R11 + IMULL $0x1e35a7bd, R11 + SHRL CX, R11 + + // s++ + ADDQ $1, SI + + // break out of the inner1 for loop, i.e. continue the outer loop. + JMP outer + +emitRemainder: + // if nextEmit < len(src) { etc } + MOVQ src_len+32(FP), AX + ADDQ DX, AX + CMPQ R10, AX + JEQ encodeBlockEnd + + // d += emitLiteral(dst[d:], src[nextEmit:]) + // + // Push args. + MOVQ DI, 0(SP) + MOVQ $0, 8(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ $0, 16(SP) // Unnecessary, as the callee ignores it, but conservative. + MOVQ R10, 24(SP) + SUBQ R10, AX + MOVQ AX, 32(SP) + MOVQ AX, 40(SP) // Unnecessary, as the callee ignores it, but conservative. + + // Spill local variables (registers) onto the stack; call; unspill. + MOVQ DI, 80(SP) + CALL ·emitLiteral(SB) + MOVQ 80(SP), DI + + // Finish the "d +=" part of "d += emitLiteral(etc)". + ADDQ 48(SP), DI + +encodeBlockEnd: + MOVQ dst_base+0(FP), AX + SUBQ AX, DI + MOVQ DI, d+48(FP) + RET diff --git a/terraform-server/vendor/github.com/golang/snappy/encode_other.go b/terraform-server/vendor/github.com/golang/snappy/encode_other.go new file mode 100644 index 00000000..dbcae905 --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/encode_other.go @@ -0,0 +1,238 @@ +// Copyright 2016 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine !gc noasm + +package snappy + +func load32(b []byte, i int) uint32 { + b = b[i : i+4 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint32(b[0]) | uint32(b[1])<<8 | uint32(b[2])<<16 | uint32(b[3])<<24 +} + +func load64(b []byte, i int) uint64 { + b = b[i : i+8 : len(b)] // Help the compiler eliminate bounds checks on the next line. + return uint64(b[0]) | uint64(b[1])<<8 | uint64(b[2])<<16 | uint64(b[3])<<24 | + uint64(b[4])<<32 | uint64(b[5])<<40 | uint64(b[6])<<48 | uint64(b[7])<<56 +} + +// emitLiteral writes a literal chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= len(lit) && len(lit) <= 65536 +func emitLiteral(dst, lit []byte) int { + i, n := 0, uint(len(lit)-1) + switch { + case n < 60: + dst[0] = uint8(n)<<2 | tagLiteral + i = 1 + case n < 1<<8: + dst[0] = 60<<2 | tagLiteral + dst[1] = uint8(n) + i = 2 + default: + dst[0] = 61<<2 | tagLiteral + dst[1] = uint8(n) + dst[2] = uint8(n >> 8) + i = 3 + } + return i + copy(dst[i:], lit) +} + +// emitCopy writes a copy chunk and returns the number of bytes written. +// +// It assumes that: +// dst is long enough to hold the encoded bytes +// 1 <= offset && offset <= 65535 +// 4 <= length && length <= 65535 +func emitCopy(dst []byte, offset, length int) int { + i := 0 + // The maximum length for a single tagCopy1 or tagCopy2 op is 64 bytes. The + // threshold for this loop is a little higher (at 68 = 64 + 4), and the + // length emitted down below is is a little lower (at 60 = 64 - 4), because + // it's shorter to encode a length 67 copy as a length 60 tagCopy2 followed + // by a length 7 tagCopy1 (which encodes as 3+2 bytes) than to encode it as + // a length 64 tagCopy2 followed by a length 3 tagCopy2 (which encodes as + // 3+3 bytes). The magic 4 in the 64±4 is because the minimum length for a + // tagCopy1 op is 4 bytes, which is why a length 3 copy has to be an + // encodes-as-3-bytes tagCopy2 instead of an encodes-as-2-bytes tagCopy1. + for length >= 68 { + // Emit a length 64 copy, encoded as 3 bytes. + dst[i+0] = 63<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 64 + } + if length > 64 { + // Emit a length 60 copy, encoded as 3 bytes. + dst[i+0] = 59<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + i += 3 + length -= 60 + } + if length >= 12 || offset >= 2048 { + // Emit the remaining copy, encoded as 3 bytes. + dst[i+0] = uint8(length-1)<<2 | tagCopy2 + dst[i+1] = uint8(offset) + dst[i+2] = uint8(offset >> 8) + return i + 3 + } + // Emit the remaining copy, encoded as 2 bytes. + dst[i+0] = uint8(offset>>8)<<5 | uint8(length-4)<<2 | tagCopy1 + dst[i+1] = uint8(offset) + return i + 2 +} + +// extendMatch returns the largest k such that k <= len(src) and that +// src[i:i+k-j] and src[j:k] have the same contents. +// +// It assumes that: +// 0 <= i && i < j && j <= len(src) +func extendMatch(src []byte, i, j int) int { + for ; j < len(src) && src[i] == src[j]; i, j = i+1, j+1 { + } + return j +} + +func hash(u, shift uint32) uint32 { + return (u * 0x1e35a7bd) >> shift +} + +// encodeBlock encodes a non-empty src to a guaranteed-large-enough dst. It +// assumes that the varint-encoded length of the decompressed bytes has already +// been written. +// +// It also assumes that: +// len(dst) >= MaxEncodedLen(len(src)) && +// minNonLiteralBlockSize <= len(src) && len(src) <= maxBlockSize +func encodeBlock(dst, src []byte) (d int) { + // Initialize the hash table. Its size ranges from 1<<8 to 1<<14 inclusive. + // The table element type is uint16, as s < sLimit and sLimit < len(src) + // and len(src) <= maxBlockSize and maxBlockSize == 65536. + const ( + maxTableSize = 1 << 14 + // tableMask is redundant, but helps the compiler eliminate bounds + // checks. + tableMask = maxTableSize - 1 + ) + shift := uint32(32 - 8) + for tableSize := 1 << 8; tableSize < maxTableSize && tableSize < len(src); tableSize *= 2 { + shift-- + } + // In Go, all array elements are zero-initialized, so there is no advantage + // to a smaller tableSize per se. However, it matches the C++ algorithm, + // and in the asm versions of this code, we can get away with zeroing only + // the first tableSize elements. + var table [maxTableSize]uint16 + + // sLimit is when to stop looking for offset/length copies. The inputMargin + // lets us use a fast path for emitLiteral in the main loop, while we are + // looking for copies. + sLimit := len(src) - inputMargin + + // nextEmit is where in src the next emitLiteral should start from. + nextEmit := 0 + + // The encoded form must start with a literal, as there are no previous + // bytes to copy, so we start looking for hash matches at s == 1. + s := 1 + nextHash := hash(load32(src, s), shift) + + for { + // Copied from the C++ snappy implementation: + // + // Heuristic match skipping: If 32 bytes are scanned with no matches + // found, start looking only at every other byte. If 32 more bytes are + // scanned (or skipped), look at every third byte, etc.. When a match + // is found, immediately go back to looking at every byte. This is a + // small loss (~5% performance, ~0.1% density) for compressible data + // due to more bookkeeping, but for non-compressible data (such as + // JPEG) it's a huge win since the compressor quickly "realizes" the + // data is incompressible and doesn't bother looking for matches + // everywhere. + // + // The "skip" variable keeps track of how many bytes there are since + // the last match; dividing it by 32 (ie. right-shifting by five) gives + // the number of bytes to move ahead for each iteration. + skip := 32 + + nextS := s + candidate := 0 + for { + s = nextS + bytesBetweenHashLookups := skip >> 5 + nextS = s + bytesBetweenHashLookups + skip += bytesBetweenHashLookups + if nextS > sLimit { + goto emitRemainder + } + candidate = int(table[nextHash&tableMask]) + table[nextHash&tableMask] = uint16(s) + nextHash = hash(load32(src, nextS), shift) + if load32(src, s) == load32(src, candidate) { + break + } + } + + // A 4-byte match has been found. We'll later see if more than 4 bytes + // match. But, prior to the match, src[nextEmit:s] are unmatched. Emit + // them as literal bytes. + d += emitLiteral(dst[d:], src[nextEmit:s]) + + // Call emitCopy, and then see if another emitCopy could be our next + // move. Repeat until we find no match for the input immediately after + // what was consumed by the last emitCopy call. + // + // If we exit this loop normally then we need to call emitLiteral next, + // though we don't yet know how big the literal will be. We handle that + // by proceeding to the next iteration of the main loop. We also can + // exit this loop via goto if we get close to exhausting the input. + for { + // Invariant: we have a 4-byte match at s, and no need to emit any + // literal bytes prior to s. + base := s + + // Extend the 4-byte match as long as possible. + // + // This is an inlined version of: + // s = extendMatch(src, candidate+4, s+4) + s += 4 + for i := candidate + 4; s < len(src) && src[i] == src[s]; i, s = i+1, s+1 { + } + + d += emitCopy(dst[d:], base-candidate, s-base) + nextEmit = s + if s >= sLimit { + goto emitRemainder + } + + // We could immediately start working at s now, but to improve + // compression we first update the hash table at s-1 and at s. If + // another emitCopy is not our next move, also calculate nextHash + // at s+1. At least on GOARCH=amd64, these three hash calculations + // are faster as one load64 call (with some shifts) instead of + // three load32 calls. + x := load64(src, s-1) + prevHash := hash(uint32(x>>0), shift) + table[prevHash&tableMask] = uint16(s - 1) + currHash := hash(uint32(x>>8), shift) + candidate = int(table[currHash&tableMask]) + table[currHash&tableMask] = uint16(s) + if uint32(x>>8) != load32(src, candidate) { + nextHash = hash(uint32(x>>16), shift) + s++ + break + } + } + } + +emitRemainder: + if nextEmit < len(src) { + d += emitLiteral(dst[d:], src[nextEmit:]) + } + return d +} diff --git a/terraform-server/vendor/github.com/golang/snappy/snappy.go b/terraform-server/vendor/github.com/golang/snappy/snappy.go new file mode 100644 index 00000000..ece692ea --- /dev/null +++ b/terraform-server/vendor/github.com/golang/snappy/snappy.go @@ -0,0 +1,98 @@ +// Copyright 2011 The Snappy-Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package snappy implements the Snappy compression format. It aims for very +// high speeds and reasonable compression. +// +// There are actually two Snappy formats: block and stream. They are related, +// but different: trying to decompress block-compressed data as a Snappy stream +// will fail, and vice versa. The block format is the Decode and Encode +// functions and the stream format is the Reader and Writer types. +// +// The block format, the more common case, is used when the complete size (the +// number of bytes) of the original data is known upfront, at the time +// compression starts. The stream format, also known as the framing format, is +// for when that isn't always true. +// +// The canonical, C++ implementation is at https://github.com/google/snappy and +// it only implements the block format. +package snappy // import "github.com/golang/snappy" + +import ( + "hash/crc32" +) + +/* +Each encoded block begins with the varint-encoded length of the decoded data, +followed by a sequence of chunks. Chunks begin and end on byte boundaries. The +first byte of each chunk is broken into its 2 least and 6 most significant bits +called l and m: l ranges in [0, 4) and m ranges in [0, 64). l is the chunk tag. +Zero means a literal tag. All other values mean a copy tag. + +For literal tags: + - If m < 60, the next 1 + m bytes are literal bytes. + - Otherwise, let n be the little-endian unsigned integer denoted by the next + m - 59 bytes. The next 1 + n bytes after that are literal bytes. + +For copy tags, length bytes are copied from offset bytes ago, in the style of +Lempel-Ziv compression algorithms. In particular: + - For l == 1, the offset ranges in [0, 1<<11) and the length in [4, 12). + The length is 4 + the low 3 bits of m. The high 3 bits of m form bits 8-10 + of the offset. The next byte is bits 0-7 of the offset. + - For l == 2, the offset ranges in [0, 1<<16) and the length in [1, 65). + The length is 1 + m. The offset is the little-endian unsigned integer + denoted by the next 2 bytes. + - For l == 3, this tag is a legacy format that is no longer issued by most + encoders. Nonetheless, the offset ranges in [0, 1<<32) and the length in + [1, 65). The length is 1 + m. The offset is the little-endian unsigned + integer denoted by the next 4 bytes. +*/ +const ( + tagLiteral = 0x00 + tagCopy1 = 0x01 + tagCopy2 = 0x02 + tagCopy4 = 0x03 +) + +const ( + checksumSize = 4 + chunkHeaderSize = 4 + magicChunk = "\xff\x06\x00\x00" + magicBody + magicBody = "sNaPpY" + + // maxBlockSize is the maximum size of the input to encodeBlock. It is not + // part of the wire format per se, but some parts of the encoder assume + // that an offset fits into a uint16. + // + // Also, for the framing format (Writer type instead of Encode function), + // https://github.com/google/snappy/blob/master/framing_format.txt says + // that "the uncompressed data in a chunk must be no longer than 65536 + // bytes". + maxBlockSize = 65536 + + // maxEncodedLenOfMaxBlockSize equals MaxEncodedLen(maxBlockSize), but is + // hard coded to be a const instead of a variable, so that obufLen can also + // be a const. Their equivalence is confirmed by + // TestMaxEncodedLenOfMaxBlockSize. + maxEncodedLenOfMaxBlockSize = 76490 + + obufHeaderLen = len(magicChunk) + checksumSize + chunkHeaderSize + obufLen = obufHeaderLen + maxEncodedLenOfMaxBlockSize +) + +const ( + chunkTypeCompressedData = 0x00 + chunkTypeUncompressedData = 0x01 + chunkTypePadding = 0xfe + chunkTypeStreamIdentifier = 0xff +) + +var crcTable = crc32.MakeTable(crc32.Castagnoli) + +// crc implements the checksum specified in section 3 of +// https://github.com/google/snappy/blob/master/framing_format.txt +func crc(b []byte) uint32 { + c := crc32.Update(0, crcTable, b) + return uint32(c>>15|c<<17) + 0xa282ead8 +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/.codecov.yml b/terraform-server/vendor/github.com/json-iterator/go/.codecov.yml new file mode 100644 index 00000000..955dc0be --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/.codecov.yml @@ -0,0 +1,3 @@ +ignore: + - "output_tests/.*" + diff --git a/terraform-server/vendor/github.com/json-iterator/go/.gitignore b/terraform-server/vendor/github.com/json-iterator/go/.gitignore new file mode 100644 index 00000000..15556530 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/.gitignore @@ -0,0 +1,4 @@ +/vendor +/bug_test.go +/coverage.txt +/.idea diff --git a/terraform-server/vendor/github.com/json-iterator/go/.travis.yml b/terraform-server/vendor/github.com/json-iterator/go/.travis.yml new file mode 100644 index 00000000..449e67cd --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.8.x + - 1.x + +before_install: + - go get -t -v ./... + +script: + - ./test.sh + +after_success: + - bash <(curl -s https://codecov.io/bash) diff --git a/terraform-server/vendor/github.com/json-iterator/go/Gopkg.lock b/terraform-server/vendor/github.com/json-iterator/go/Gopkg.lock new file mode 100644 index 00000000..c8a9fbb3 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/Gopkg.lock @@ -0,0 +1,21 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/modern-go/concurrent" + packages = ["."] + revision = "e0a39a4cb4216ea8db28e22a69f4ec25610d513a" + version = "1.0.0" + +[[projects]] + name = "github.com/modern-go/reflect2" + packages = ["."] + revision = "4b7aa43c6742a2c18fdef89dd197aaae7dac7ccd" + version = "1.0.1" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "ea54a775e5a354cb015502d2e7aa4b74230fc77e894f34a838b268c25ec8eeb8" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/terraform-server/vendor/github.com/json-iterator/go/Gopkg.toml b/terraform-server/vendor/github.com/json-iterator/go/Gopkg.toml new file mode 100644 index 00000000..313a0f88 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/Gopkg.toml @@ -0,0 +1,26 @@ +# Gopkg.toml example +# +# Refer to https://github.com/golang/dep/blob/master/docs/Gopkg.toml.md +# for detailed Gopkg.toml documentation. +# +# required = ["github.com/user/thing/cmd/thing"] +# ignored = ["github.com/user/project/pkgX", "bitbucket.org/user/project/pkgA/pkgY"] +# +# [[constraint]] +# name = "github.com/user/project" +# version = "1.0.0" +# +# [[constraint]] +# name = "github.com/user/project2" +# branch = "dev" +# source = "github.com/myfork/project2" +# +# [[override]] +# name = "github.com/x/y" +# version = "2.4.0" + +ignored = ["github.com/davecgh/go-spew*","github.com/google/gofuzz*","github.com/stretchr/testify*"] + +[[constraint]] + name = "github.com/modern-go/reflect2" + version = "1.0.1" diff --git a/terraform-server/vendor/github.com/json-iterator/go/LICENSE b/terraform-server/vendor/github.com/json-iterator/go/LICENSE new file mode 100644 index 00000000..2cf4f5ab --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/LICENSE @@ -0,0 +1,21 @@ +MIT License + +Copyright (c) 2016 json-iterator + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/terraform-server/vendor/github.com/json-iterator/go/README.md b/terraform-server/vendor/github.com/json-iterator/go/README.md new file mode 100644 index 00000000..50d56ffb --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/README.md @@ -0,0 +1,87 @@ +[![Sourcegraph](https://sourcegraph.com/github.com/json-iterator/go/-/badge.svg)](https://sourcegraph.com/github.com/json-iterator/go?badge) +[![GoDoc](http://img.shields.io/badge/go-documentation-blue.svg?style=flat-square)](http://godoc.org/github.com/json-iterator/go) +[![Build Status](https://travis-ci.org/json-iterator/go.svg?branch=master)](https://travis-ci.org/json-iterator/go) +[![codecov](https://codecov.io/gh/json-iterator/go/branch/master/graph/badge.svg)](https://codecov.io/gh/json-iterator/go) +[![rcard](https://goreportcard.com/badge/github.com/json-iterator/go)](https://goreportcard.com/report/github.com/json-iterator/go) +[![License](http://img.shields.io/badge/license-mit-blue.svg?style=flat-square)](https://raw.githubusercontent.com/json-iterator/go/master/LICENSE) +[![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) + +A high-performance 100% compatible drop-in replacement of "encoding/json" + +You can also use thrift like JSON using [thrift-iterator](https://github.com/thrift-iterator/go) + +# Benchmark + +![benchmark](http://jsoniter.com/benchmarks/go-benchmark.png) + +Source code: https://github.com/json-iterator/go-benchmark/blob/master/src/github.com/json-iterator/go-benchmark/benchmark_medium_payload_test.go + +Raw Result (easyjson requires static code generation) + +| | ns/op | allocation bytes | allocation times | +| --- | --- | --- | --- | +| std decode | 35510 ns/op | 1960 B/op | 99 allocs/op | +| easyjson decode | 8499 ns/op | 160 B/op | 4 allocs/op | +| jsoniter decode | 5623 ns/op | 160 B/op | 3 allocs/op | +| std encode | 2213 ns/op | 712 B/op | 5 allocs/op | +| easyjson encode | 883 ns/op | 576 B/op | 3 allocs/op | +| jsoniter encode | 837 ns/op | 384 B/op | 4 allocs/op | + +Always benchmark with your own workload. +The result depends heavily on the data input. + +# Usage + +100% compatibility with standard lib + +Replace + +```go +import "encoding/json" +json.Marshal(&data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Marshal(&data) +``` + +Replace + +```go +import "encoding/json" +json.Unmarshal(input, &data) +``` + +with + +```go +import "github.com/json-iterator/go" + +var json = jsoniter.ConfigCompatibleWithStandardLibrary +json.Unmarshal(input, &data) +``` + +[More documentation](http://jsoniter.com/migrate-from-go-std.html) + +# How to get + +``` +go get github.com/json-iterator/go +``` + +# Contribution Welcomed ! + +Contributors + +* [thockin](https://github.com/thockin) +* [mattn](https://github.com/mattn) +* [cch123](https://github.com/cch123) +* [Oleg Shaldybin](https://github.com/olegshaldybin) +* [Jason Toffaletti](https://github.com/toffaletti) + +Report issue or pull request, or email taowen@gmail.com, or [![Gitter chat](https://badges.gitter.im/gitterHQ/gitter.png)](https://gitter.im/json-iterator/Lobby) diff --git a/terraform-server/vendor/github.com/json-iterator/go/adapter.go b/terraform-server/vendor/github.com/json-iterator/go/adapter.go new file mode 100644 index 00000000..92d2cc4a --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/adapter.go @@ -0,0 +1,150 @@ +package jsoniter + +import ( + "bytes" + "io" +) + +// RawMessage to make replace json with jsoniter +type RawMessage []byte + +// Unmarshal adapts to json/encoding Unmarshal API +// +// Unmarshal parses the JSON-encoded data and stores the result in the value pointed to by v. +// Refer to https://godoc.org/encoding/json#Unmarshal for more information +func Unmarshal(data []byte, v interface{}) error { + return ConfigDefault.Unmarshal(data, v) +} + +// UnmarshalFromString is a convenient method to read from string instead of []byte +func UnmarshalFromString(str string, v interface{}) error { + return ConfigDefault.UnmarshalFromString(str, v) +} + +// Get quick method to get value from deeply nested JSON structure +func Get(data []byte, path ...interface{}) Any { + return ConfigDefault.Get(data, path...) +} + +// Marshal adapts to json/encoding Marshal API +// +// Marshal returns the JSON encoding of v, adapts to json/encoding Marshal API +// Refer to https://godoc.org/encoding/json#Marshal for more information +func Marshal(v interface{}) ([]byte, error) { + return ConfigDefault.Marshal(v) +} + +// MarshalIndent same as json.MarshalIndent. Prefix is not supported. +func MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + return ConfigDefault.MarshalIndent(v, prefix, indent) +} + +// MarshalToString convenient method to write as string instead of []byte +func MarshalToString(v interface{}) (string, error) { + return ConfigDefault.MarshalToString(v) +} + +// NewDecoder adapts to json/stream NewDecoder API. +// +// NewDecoder returns a new decoder that reads from r. +// +// Instead of a json/encoding Decoder, an Decoder is returned +// Refer to https://godoc.org/encoding/json#NewDecoder for more information +func NewDecoder(reader io.Reader) *Decoder { + return ConfigDefault.NewDecoder(reader) +} + +// Decoder reads and decodes JSON values from an input stream. +// Decoder provides identical APIs with json/stream Decoder (Token() and UseNumber() are in progress) +type Decoder struct { + iter *Iterator +} + +// Decode decode JSON into interface{} +func (adapter *Decoder) Decode(obj interface{}) error { + if adapter.iter.head == adapter.iter.tail && adapter.iter.reader != nil { + if !adapter.iter.loadMore() { + return io.EOF + } + } + adapter.iter.ReadVal(obj) + err := adapter.iter.Error + if err == io.EOF { + return nil + } + return adapter.iter.Error +} + +// More is there more? +func (adapter *Decoder) More() bool { + iter := adapter.iter + if iter.Error != nil { + return false + } + c := iter.nextToken() + if c == 0 { + return false + } + iter.unreadByte() + return c != ']' && c != '}' +} + +// Buffered remaining buffer +func (adapter *Decoder) Buffered() io.Reader { + remaining := adapter.iter.buf[adapter.iter.head:adapter.iter.tail] + return bytes.NewReader(remaining) +} + +// UseNumber causes the Decoder to unmarshal a number into an interface{} as a +// Number instead of as a float64. +func (adapter *Decoder) UseNumber() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.UseNumber = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// DisallowUnknownFields causes the Decoder to return an error when the destination +// is a struct and the input contains object keys which do not match any +// non-ignored, exported fields in the destination. +func (adapter *Decoder) DisallowUnknownFields() { + cfg := adapter.iter.cfg.configBeforeFrozen + cfg.DisallowUnknownFields = true + adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions) +} + +// NewEncoder same as json.NewEncoder +func NewEncoder(writer io.Writer) *Encoder { + return ConfigDefault.NewEncoder(writer) +} + +// Encoder same as json.Encoder +type Encoder struct { + stream *Stream +} + +// Encode encode interface{} as JSON to io.Writer +func (adapter *Encoder) Encode(val interface{}) error { + adapter.stream.WriteVal(val) + adapter.stream.WriteRaw("\n") + adapter.stream.Flush() + return adapter.stream.Error +} + +// SetIndent set the indention. Prefix is not supported +func (adapter *Encoder) SetIndent(prefix, indent string) { + config := adapter.stream.cfg.configBeforeFrozen + config.IndentionStep = len(indent) + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// SetEscapeHTML escape html by default, set to false to disable +func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) { + config := adapter.stream.cfg.configBeforeFrozen + config.EscapeHTML = escapeHTML + adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions) +} + +// Valid reports whether data is a valid JSON encoding. +func Valid(data []byte) bool { + return ConfigDefault.Valid(data) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any.go b/terraform-server/vendor/github.com/json-iterator/go/any.go new file mode 100644 index 00000000..f6b8aeab --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any.go @@ -0,0 +1,325 @@ +package jsoniter + +import ( + "errors" + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "strconv" + "unsafe" +) + +// Any generic object representation. +// The lazy json implementation holds []byte and parse lazily. +type Any interface { + LastError() error + ValueType() ValueType + MustBeValid() Any + ToBool() bool + ToInt() int + ToInt32() int32 + ToInt64() int64 + ToUint() uint + ToUint32() uint32 + ToUint64() uint64 + ToFloat32() float32 + ToFloat64() float64 + ToString() string + ToVal(val interface{}) + Get(path ...interface{}) Any + Size() int + Keys() []string + GetInterface() interface{} + WriteTo(stream *Stream) +} + +type baseAny struct{} + +func (any *baseAny) Get(path ...interface{}) Any { + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *baseAny) Size() int { + return 0 +} + +func (any *baseAny) Keys() []string { + return []string{} +} + +func (any *baseAny) ToVal(obj interface{}) { + panic("not implemented") +} + +// WrapInt32 turn int32 into Any interface +func WrapInt32(val int32) Any { + return &int32Any{baseAny{}, val} +} + +// WrapInt64 turn int64 into Any interface +func WrapInt64(val int64) Any { + return &int64Any{baseAny{}, val} +} + +// WrapUint32 turn uint32 into Any interface +func WrapUint32(val uint32) Any { + return &uint32Any{baseAny{}, val} +} + +// WrapUint64 turn uint64 into Any interface +func WrapUint64(val uint64) Any { + return &uint64Any{baseAny{}, val} +} + +// WrapFloat64 turn float64 into Any interface +func WrapFloat64(val float64) Any { + return &floatAny{baseAny{}, val} +} + +// WrapString turn string into Any interface +func WrapString(val string) Any { + return &stringAny{baseAny{}, val} +} + +// Wrap turn a go object into Any interface +func Wrap(val interface{}) Any { + if val == nil { + return &nilAny{} + } + asAny, isAny := val.(Any) + if isAny { + return asAny + } + typ := reflect2.TypeOf(val) + switch typ.Kind() { + case reflect.Slice: + return wrapArray(val) + case reflect.Struct: + return wrapStruct(val) + case reflect.Map: + return wrapMap(val) + case reflect.String: + return WrapString(val.(string)) + case reflect.Int: + if strconv.IntSize == 32 { + return WrapInt32(int32(val.(int))) + } + return WrapInt64(int64(val.(int))) + case reflect.Int8: + return WrapInt32(int32(val.(int8))) + case reflect.Int16: + return WrapInt32(int32(val.(int16))) + case reflect.Int32: + return WrapInt32(val.(int32)) + case reflect.Int64: + return WrapInt64(val.(int64)) + case reflect.Uint: + if strconv.IntSize == 32 { + return WrapUint32(uint32(val.(uint))) + } + return WrapUint64(uint64(val.(uint))) + case reflect.Uintptr: + if ptrSize == 32 { + return WrapUint32(uint32(val.(uintptr))) + } + return WrapUint64(uint64(val.(uintptr))) + case reflect.Uint8: + return WrapUint32(uint32(val.(uint8))) + case reflect.Uint16: + return WrapUint32(uint32(val.(uint16))) + case reflect.Uint32: + return WrapUint32(uint32(val.(uint32))) + case reflect.Uint64: + return WrapUint64(val.(uint64)) + case reflect.Float32: + return WrapFloat64(float64(val.(float32))) + case reflect.Float64: + return WrapFloat64(val.(float64)) + case reflect.Bool: + if val.(bool) == true { + return &trueAny{} + } + return &falseAny{} + } + return &invalidAny{baseAny{}, fmt.Errorf("unsupported type: %v", typ)} +} + +// ReadAny read next JSON element as an Any object. It is a better json.RawMessage. +func (iter *Iterator) ReadAny() Any { + return iter.readAny() +} + +func (iter *Iterator) readAny() Any { + c := iter.nextToken() + switch c { + case '"': + iter.unreadByte() + return &stringAny{baseAny{}, iter.ReadString()} + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + return &nilAny{} + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + return &trueAny{} + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + return &falseAny{} + case '{': + return iter.readObjectAny() + case '[': + return iter.readArrayAny() + case '-': + return iter.readNumberAny(false) + case 0: + return &invalidAny{baseAny{}, errors.New("input is empty")} + default: + return iter.readNumberAny(true) + } +} + +func (iter *Iterator) readNumberAny(positive bool) Any { + iter.startCapture(iter.head - 1) + iter.skipNumber() + lazyBuf := iter.stopCapture() + return &numberLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readObjectAny() Any { + iter.startCapture(iter.head - 1) + iter.skipObject() + lazyBuf := iter.stopCapture() + return &objectLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func (iter *Iterator) readArrayAny() Any { + iter.startCapture(iter.head - 1) + iter.skipArray() + lazyBuf := iter.stopCapture() + return &arrayLazyAny{baseAny{}, iter.cfg, lazyBuf, nil} +} + +func locateObjectField(iter *Iterator, target string) []byte { + var found []byte + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + if field == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + return true + }) + return found +} + +func locateArrayElement(iter *Iterator, target int) []byte { + var found []byte + n := 0 + iter.ReadArrayCB(func(iter *Iterator) bool { + if n == target { + found = iter.SkipAndReturnBytes() + return false + } + iter.Skip() + n++ + return true + }) + return found +} + +func locatePath(iter *Iterator, path []interface{}) Any { + for i, pathKeyObj := range path { + switch pathKey := pathKeyObj.(type) { + case string: + valueBytes := locateObjectField(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int: + valueBytes := locateArrayElement(iter, pathKey) + if valueBytes == nil { + return newInvalidAny(path[i:]) + } + iter.ResetBytes(valueBytes) + case int32: + if '*' == pathKey { + return iter.readAny().Get(path[i:]...) + } + return newInvalidAny(path[i:]) + default: + return newInvalidAny(path[i:]) + } + } + if iter.Error != nil && iter.Error != io.EOF { + return &invalidAny{baseAny{}, iter.Error} + } + return iter.readAny() +} + +var anyType = reflect2.TypeOfPtr((*Any)(nil)).Elem() + +func createDecoderOfAny(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +func createEncoderOfAny(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == anyType { + return &directAnyCodec{} + } + if typ.Implements(anyType) { + return &anyCodec{ + valType: typ, + } + } + return nil +} + +type anyCodec struct { + valType reflect2.Type +} + +func (codec *anyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + panic("not implemented") +} + +func (codec *anyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + any.WriteTo(stream) +} + +func (codec *anyCodec) IsEmpty(ptr unsafe.Pointer) bool { + obj := codec.valType.UnsafeIndirect(ptr) + any := obj.(Any) + return any.Size() == 0 +} + +type directAnyCodec struct { +} + +func (codec *directAnyCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *(*Any)(ptr) = iter.readAny() +} + +func (codec *directAnyCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + any := *(*Any)(ptr) + if any == nil { + stream.WriteNil() + return + } + any.WriteTo(stream) +} + +func (codec *directAnyCodec) IsEmpty(ptr unsafe.Pointer) bool { + any := *(*Any)(ptr) + return any.Size() == 0 +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_array.go b/terraform-server/vendor/github.com/json-iterator/go/any_array.go new file mode 100644 index 00000000..0449e9aa --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_array.go @@ -0,0 +1,278 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type arrayLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *arrayLazyAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayLazyAny) MustBeValid() Any { + return any +} + +func (any *arrayLazyAny) LastError() error { + return any.err +} + +func (any *arrayLazyAny) ToBool() bool { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.ReadArray() +} + +func (any *arrayLazyAny) ToInt() int { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt32() int32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToInt64() int64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint() uint { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint32() uint32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToUint64() uint64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat32() float32 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToFloat64() float64 { + if any.ToBool() { + return 1 + } + return 0 +} + +func (any *arrayLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *arrayLazyAny) ToVal(val interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(val) +} + +func (any *arrayLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateArrayElement(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + arr := make([]Any, 0) + iter.ReadArrayCB(func(iter *Iterator) bool { + found := iter.readAny().Get(path[1:]...) + if found.ValueType() != InvalidValue { + arr = append(arr, found) + } + return true + }) + return wrapArray(arr) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadArrayCB(func(iter *Iterator) bool { + size++ + iter.Skip() + return true + }) + return size +} + +func (any *arrayLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *arrayLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type arrayAny struct { + baseAny + val reflect.Value +} + +func wrapArray(val interface{}) *arrayAny { + return &arrayAny{baseAny{}, reflect.ValueOf(val)} +} + +func (any *arrayAny) ValueType() ValueType { + return ArrayValue +} + +func (any *arrayAny) MustBeValid() Any { + return any +} + +func (any *arrayAny) LastError() error { + return nil +} + +func (any *arrayAny) ToBool() bool { + return any.val.Len() != 0 +} + +func (any *arrayAny) ToInt() int { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt32() int32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToInt64() int64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint() uint { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint32() uint32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToUint64() uint64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat32() float32 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToFloat64() float64 { + if any.val.Len() == 0 { + return 0 + } + return 1 +} + +func (any *arrayAny) ToString() string { + str, _ := MarshalToString(any.val.Interface()) + return str +} + +func (any *arrayAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int: + if firstPath < 0 || firstPath >= any.val.Len() { + return newInvalidAny(path) + } + return Wrap(any.val.Index(firstPath).Interface()) + case int32: + if '*' == firstPath { + mappedAll := make([]Any, 0) + for i := 0; i < any.val.Len(); i++ { + mapped := Wrap(any.val.Index(i).Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll = append(mappedAll, mapped) + } + } + return wrapArray(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *arrayAny) Size() int { + return any.val.Len() +} + +func (any *arrayAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *arrayAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_bool.go b/terraform-server/vendor/github.com/json-iterator/go/any_bool.go new file mode 100644 index 00000000..9452324a --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_bool.go @@ -0,0 +1,137 @@ +package jsoniter + +type trueAny struct { + baseAny +} + +func (any *trueAny) LastError() error { + return nil +} + +func (any *trueAny) ToBool() bool { + return true +} + +func (any *trueAny) ToInt() int { + return 1 +} + +func (any *trueAny) ToInt32() int32 { + return 1 +} + +func (any *trueAny) ToInt64() int64 { + return 1 +} + +func (any *trueAny) ToUint() uint { + return 1 +} + +func (any *trueAny) ToUint32() uint32 { + return 1 +} + +func (any *trueAny) ToUint64() uint64 { + return 1 +} + +func (any *trueAny) ToFloat32() float32 { + return 1 +} + +func (any *trueAny) ToFloat64() float64 { + return 1 +} + +func (any *trueAny) ToString() string { + return "true" +} + +func (any *trueAny) WriteTo(stream *Stream) { + stream.WriteTrue() +} + +func (any *trueAny) Parse() *Iterator { + return nil +} + +func (any *trueAny) GetInterface() interface{} { + return true +} + +func (any *trueAny) ValueType() ValueType { + return BoolValue +} + +func (any *trueAny) MustBeValid() Any { + return any +} + +type falseAny struct { + baseAny +} + +func (any *falseAny) LastError() error { + return nil +} + +func (any *falseAny) ToBool() bool { + return false +} + +func (any *falseAny) ToInt() int { + return 0 +} + +func (any *falseAny) ToInt32() int32 { + return 0 +} + +func (any *falseAny) ToInt64() int64 { + return 0 +} + +func (any *falseAny) ToUint() uint { + return 0 +} + +func (any *falseAny) ToUint32() uint32 { + return 0 +} + +func (any *falseAny) ToUint64() uint64 { + return 0 +} + +func (any *falseAny) ToFloat32() float32 { + return 0 +} + +func (any *falseAny) ToFloat64() float64 { + return 0 +} + +func (any *falseAny) ToString() string { + return "false" +} + +func (any *falseAny) WriteTo(stream *Stream) { + stream.WriteFalse() +} + +func (any *falseAny) Parse() *Iterator { + return nil +} + +func (any *falseAny) GetInterface() interface{} { + return false +} + +func (any *falseAny) ValueType() ValueType { + return BoolValue +} + +func (any *falseAny) MustBeValid() Any { + return any +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_float.go b/terraform-server/vendor/github.com/json-iterator/go/any_float.go new file mode 100644 index 00000000..35fdb094 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_float.go @@ -0,0 +1,83 @@ +package jsoniter + +import ( + "strconv" +) + +type floatAny struct { + baseAny + val float64 +} + +func (any *floatAny) Parse() *Iterator { + return nil +} + +func (any *floatAny) ValueType() ValueType { + return NumberValue +} + +func (any *floatAny) MustBeValid() Any { + return any +} + +func (any *floatAny) LastError() error { + return nil +} + +func (any *floatAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *floatAny) ToInt() int { + return int(any.val) +} + +func (any *floatAny) ToInt32() int32 { + return int32(any.val) +} + +func (any *floatAny) ToInt64() int64 { + return int64(any.val) +} + +func (any *floatAny) ToUint() uint { + if any.val > 0 { + return uint(any.val) + } + return 0 +} + +func (any *floatAny) ToUint32() uint32 { + if any.val > 0 { + return uint32(any.val) + } + return 0 +} + +func (any *floatAny) ToUint64() uint64 { + if any.val > 0 { + return uint64(any.val) + } + return 0 +} + +func (any *floatAny) ToFloat32() float32 { + return float32(any.val) +} + +func (any *floatAny) ToFloat64() float64 { + return any.val +} + +func (any *floatAny) ToString() string { + return strconv.FormatFloat(any.val, 'E', -1, 64) +} + +func (any *floatAny) WriteTo(stream *Stream) { + stream.WriteFloat64(any.val) +} + +func (any *floatAny) GetInterface() interface{} { + return any.val +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_int32.go b/terraform-server/vendor/github.com/json-iterator/go/any_int32.go new file mode 100644 index 00000000..1b56f399 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_int32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int32Any struct { + baseAny + val int32 +} + +func (any *int32Any) LastError() error { + return nil +} + +func (any *int32Any) ValueType() ValueType { + return NumberValue +} + +func (any *int32Any) MustBeValid() Any { + return any +} + +func (any *int32Any) ToBool() bool { + return any.val != 0 +} + +func (any *int32Any) ToInt() int { + return int(any.val) +} + +func (any *int32Any) ToInt32() int32 { + return any.val +} + +func (any *int32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *int32Any) ToUint() uint { + return uint(any.val) +} + +func (any *int32Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *int32Any) WriteTo(stream *Stream) { + stream.WriteInt32(any.val) +} + +func (any *int32Any) Parse() *Iterator { + return nil +} + +func (any *int32Any) GetInterface() interface{} { + return any.val +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_int64.go b/terraform-server/vendor/github.com/json-iterator/go/any_int64.go new file mode 100644 index 00000000..c440d72b --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_int64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type int64Any struct { + baseAny + val int64 +} + +func (any *int64Any) LastError() error { + return nil +} + +func (any *int64Any) ValueType() ValueType { + return NumberValue +} + +func (any *int64Any) MustBeValid() Any { + return any +} + +func (any *int64Any) ToBool() bool { + return any.val != 0 +} + +func (any *int64Any) ToInt() int { + return int(any.val) +} + +func (any *int64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *int64Any) ToInt64() int64 { + return any.val +} + +func (any *int64Any) ToUint() uint { + return uint(any.val) +} + +func (any *int64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *int64Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *int64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *int64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *int64Any) ToString() string { + return strconv.FormatInt(any.val, 10) +} + +func (any *int64Any) WriteTo(stream *Stream) { + stream.WriteInt64(any.val) +} + +func (any *int64Any) Parse() *Iterator { + return nil +} + +func (any *int64Any) GetInterface() interface{} { + return any.val +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_invalid.go b/terraform-server/vendor/github.com/json-iterator/go/any_invalid.go new file mode 100644 index 00000000..1d859eac --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_invalid.go @@ -0,0 +1,82 @@ +package jsoniter + +import "fmt" + +type invalidAny struct { + baseAny + err error +} + +func newInvalidAny(path []interface{}) *invalidAny { + return &invalidAny{baseAny{}, fmt.Errorf("%v not found", path)} +} + +func (any *invalidAny) LastError() error { + return any.err +} + +func (any *invalidAny) ValueType() ValueType { + return InvalidValue +} + +func (any *invalidAny) MustBeValid() Any { + panic(any.err) +} + +func (any *invalidAny) ToBool() bool { + return false +} + +func (any *invalidAny) ToInt() int { + return 0 +} + +func (any *invalidAny) ToInt32() int32 { + return 0 +} + +func (any *invalidAny) ToInt64() int64 { + return 0 +} + +func (any *invalidAny) ToUint() uint { + return 0 +} + +func (any *invalidAny) ToUint32() uint32 { + return 0 +} + +func (any *invalidAny) ToUint64() uint64 { + return 0 +} + +func (any *invalidAny) ToFloat32() float32 { + return 0 +} + +func (any *invalidAny) ToFloat64() float64 { + return 0 +} + +func (any *invalidAny) ToString() string { + return "" +} + +func (any *invalidAny) WriteTo(stream *Stream) { +} + +func (any *invalidAny) Get(path ...interface{}) Any { + if any.err == nil { + return &invalidAny{baseAny{}, fmt.Errorf("get %v from invalid", path)} + } + return &invalidAny{baseAny{}, fmt.Errorf("%v, get %v from invalid", any.err, path)} +} + +func (any *invalidAny) Parse() *Iterator { + return nil +} + +func (any *invalidAny) GetInterface() interface{} { + return nil +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_nil.go b/terraform-server/vendor/github.com/json-iterator/go/any_nil.go new file mode 100644 index 00000000..d04cb54c --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_nil.go @@ -0,0 +1,69 @@ +package jsoniter + +type nilAny struct { + baseAny +} + +func (any *nilAny) LastError() error { + return nil +} + +func (any *nilAny) ValueType() ValueType { + return NilValue +} + +func (any *nilAny) MustBeValid() Any { + return any +} + +func (any *nilAny) ToBool() bool { + return false +} + +func (any *nilAny) ToInt() int { + return 0 +} + +func (any *nilAny) ToInt32() int32 { + return 0 +} + +func (any *nilAny) ToInt64() int64 { + return 0 +} + +func (any *nilAny) ToUint() uint { + return 0 +} + +func (any *nilAny) ToUint32() uint32 { + return 0 +} + +func (any *nilAny) ToUint64() uint64 { + return 0 +} + +func (any *nilAny) ToFloat32() float32 { + return 0 +} + +func (any *nilAny) ToFloat64() float64 { + return 0 +} + +func (any *nilAny) ToString() string { + return "" +} + +func (any *nilAny) WriteTo(stream *Stream) { + stream.WriteNil() +} + +func (any *nilAny) Parse() *Iterator { + return nil +} + +func (any *nilAny) GetInterface() interface{} { + return nil +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_number.go b/terraform-server/vendor/github.com/json-iterator/go/any_number.go new file mode 100644 index 00000000..9d1e901a --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_number.go @@ -0,0 +1,123 @@ +package jsoniter + +import ( + "io" + "unsafe" +) + +type numberLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *numberLazyAny) ValueType() ValueType { + return NumberValue +} + +func (any *numberLazyAny) MustBeValid() Any { + return any +} + +func (any *numberLazyAny) LastError() error { + return any.err +} + +func (any *numberLazyAny) ToBool() bool { + return any.ToFloat64() != 0 +} + +func (any *numberLazyAny) ToInt() int { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt32() int32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToInt64() int64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadInt64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint() uint { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint32() uint32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToUint64() uint64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadUint64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat32() float32 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat32() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToFloat64() float64 { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + val := iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + any.err = iter.Error + } + return val +} + +func (any *numberLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *numberLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *numberLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_object.go b/terraform-server/vendor/github.com/json-iterator/go/any_object.go new file mode 100644 index 00000000..c44ef5c9 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_object.go @@ -0,0 +1,374 @@ +package jsoniter + +import ( + "reflect" + "unsafe" +) + +type objectLazyAny struct { + baseAny + cfg *frozenConfig + buf []byte + err error +} + +func (any *objectLazyAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectLazyAny) MustBeValid() Any { + return any +} + +func (any *objectLazyAny) LastError() error { + return any.err +} + +func (any *objectLazyAny) ToBool() bool { + return true +} + +func (any *objectLazyAny) ToInt() int { + return 0 +} + +func (any *objectLazyAny) ToInt32() int32 { + return 0 +} + +func (any *objectLazyAny) ToInt64() int64 { + return 0 +} + +func (any *objectLazyAny) ToUint() uint { + return 0 +} + +func (any *objectLazyAny) ToUint32() uint32 { + return 0 +} + +func (any *objectLazyAny) ToUint64() uint64 { + return 0 +} + +func (any *objectLazyAny) ToFloat32() float32 { + return 0 +} + +func (any *objectLazyAny) ToFloat64() float64 { + return 0 +} + +func (any *objectLazyAny) ToString() string { + return *(*string)(unsafe.Pointer(&any.buf)) +} + +func (any *objectLazyAny) ToVal(obj interface{}) { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadVal(obj) +} + +func (any *objectLazyAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + valueBytes := locateObjectField(iter, firstPath) + if valueBytes == nil { + return newInvalidAny(path) + } + iter.ResetBytes(valueBytes) + return locatePath(iter, path[1:]) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + mapped := locatePath(iter, path[1:]) + if mapped.ValueType() != InvalidValue { + mappedAll[field] = mapped + } + return true + }) + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectLazyAny) Keys() []string { + keys := []string{} + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadMapCB(func(iter *Iterator, field string) bool { + iter.Skip() + keys = append(keys, field) + return true + }) + return keys +} + +func (any *objectLazyAny) Size() int { + size := 0 + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + size++ + return true + }) + return size +} + +func (any *objectLazyAny) WriteTo(stream *Stream) { + stream.Write(any.buf) +} + +func (any *objectLazyAny) GetInterface() interface{} { + iter := any.cfg.BorrowIterator(any.buf) + defer any.cfg.ReturnIterator(iter) + return iter.Read() +} + +type objectAny struct { + baseAny + err error + val reflect.Value +} + +func wrapStruct(val interface{}) *objectAny { + return &objectAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *objectAny) ValueType() ValueType { + return ObjectValue +} + +func (any *objectAny) MustBeValid() Any { + return any +} + +func (any *objectAny) Parse() *Iterator { + return nil +} + +func (any *objectAny) LastError() error { + return any.err +} + +func (any *objectAny) ToBool() bool { + return any.val.NumField() != 0 +} + +func (any *objectAny) ToInt() int { + return 0 +} + +func (any *objectAny) ToInt32() int32 { + return 0 +} + +func (any *objectAny) ToInt64() int64 { + return 0 +} + +func (any *objectAny) ToUint() uint { + return 0 +} + +func (any *objectAny) ToUint32() uint32 { + return 0 +} + +func (any *objectAny) ToUint64() uint64 { + return 0 +} + +func (any *objectAny) ToFloat32() float32 { + return 0 +} + +func (any *objectAny) ToFloat64() float64 { + return 0 +} + +func (any *objectAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *objectAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case string: + field := any.val.FieldByName(firstPath) + if !field.IsValid() { + return newInvalidAny(path) + } + return Wrap(field.Interface()) + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for i := 0; i < any.val.NumField(); i++ { + field := any.val.Field(i) + if field.CanInterface() { + mapped := Wrap(field.Interface()).Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[any.val.Type().Field(i).Name] = mapped + } + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + return newInvalidAny(path) + } +} + +func (any *objectAny) Keys() []string { + keys := make([]string, 0, any.val.NumField()) + for i := 0; i < any.val.NumField(); i++ { + keys = append(keys, any.val.Type().Field(i).Name) + } + return keys +} + +func (any *objectAny) Size() int { + return any.val.NumField() +} + +func (any *objectAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *objectAny) GetInterface() interface{} { + return any.val.Interface() +} + +type mapAny struct { + baseAny + err error + val reflect.Value +} + +func wrapMap(val interface{}) *mapAny { + return &mapAny{baseAny{}, nil, reflect.ValueOf(val)} +} + +func (any *mapAny) ValueType() ValueType { + return ObjectValue +} + +func (any *mapAny) MustBeValid() Any { + return any +} + +func (any *mapAny) Parse() *Iterator { + return nil +} + +func (any *mapAny) LastError() error { + return any.err +} + +func (any *mapAny) ToBool() bool { + return true +} + +func (any *mapAny) ToInt() int { + return 0 +} + +func (any *mapAny) ToInt32() int32 { + return 0 +} + +func (any *mapAny) ToInt64() int64 { + return 0 +} + +func (any *mapAny) ToUint() uint { + return 0 +} + +func (any *mapAny) ToUint32() uint32 { + return 0 +} + +func (any *mapAny) ToUint64() uint64 { + return 0 +} + +func (any *mapAny) ToFloat32() float32 { + return 0 +} + +func (any *mapAny) ToFloat64() float64 { + return 0 +} + +func (any *mapAny) ToString() string { + str, err := MarshalToString(any.val.Interface()) + any.err = err + return str +} + +func (any *mapAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + switch firstPath := path[0].(type) { + case int32: + if '*' == firstPath { + mappedAll := map[string]Any{} + for _, key := range any.val.MapKeys() { + keyAsStr := key.String() + element := Wrap(any.val.MapIndex(key).Interface()) + mapped := element.Get(path[1:]...) + if mapped.ValueType() != InvalidValue { + mappedAll[keyAsStr] = mapped + } + } + return wrapMap(mappedAll) + } + return newInvalidAny(path) + default: + value := any.val.MapIndex(reflect.ValueOf(firstPath)) + if !value.IsValid() { + return newInvalidAny(path) + } + return Wrap(value.Interface()) + } +} + +func (any *mapAny) Keys() []string { + keys := make([]string, 0, any.val.Len()) + for _, key := range any.val.MapKeys() { + keys = append(keys, key.String()) + } + return keys +} + +func (any *mapAny) Size() int { + return any.val.Len() +} + +func (any *mapAny) WriteTo(stream *Stream) { + stream.WriteVal(any.val) +} + +func (any *mapAny) GetInterface() interface{} { + return any.val.Interface() +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_str.go b/terraform-server/vendor/github.com/json-iterator/go/any_str.go new file mode 100644 index 00000000..a4b93c78 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_str.go @@ -0,0 +1,166 @@ +package jsoniter + +import ( + "fmt" + "strconv" +) + +type stringAny struct { + baseAny + val string +} + +func (any *stringAny) Get(path ...interface{}) Any { + if len(path) == 0 { + return any + } + return &invalidAny{baseAny{}, fmt.Errorf("GetIndex %v from simple value", path)} +} + +func (any *stringAny) Parse() *Iterator { + return nil +} + +func (any *stringAny) ValueType() ValueType { + return StringValue +} + +func (any *stringAny) MustBeValid() Any { + return any +} + +func (any *stringAny) LastError() error { + return nil +} + +func (any *stringAny) ToBool() bool { + str := any.ToString() + if str == "0" { + return false + } + for _, c := range str { + switch c { + case ' ', '\n', '\r', '\t': + default: + return true + } + } + return false +} + +func (any *stringAny) ToInt() int { + return int(any.ToInt64()) + +} + +func (any *stringAny) ToInt32() int32 { + return int32(any.ToInt64()) +} + +func (any *stringAny) ToInt64() int64 { + if any.val == "" { + return 0 + } + + flag := 1 + startPos := 0 + endPos := 0 + if any.val[0] == '+' || any.val[0] == '-' { + startPos = 1 + } + + if any.val[0] == '-' { + flag = -1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseInt(any.val[startPos:endPos], 10, 64) + return int64(flag) * parsed +} + +func (any *stringAny) ToUint() uint { + return uint(any.ToUint64()) +} + +func (any *stringAny) ToUint32() uint32 { + return uint32(any.ToUint64()) +} + +func (any *stringAny) ToUint64() uint64 { + if any.val == "" { + return 0 + } + + startPos := 0 + endPos := 0 + + if any.val[0] == '-' { + return 0 + } + if any.val[0] == '+' { + startPos = 1 + } + + for i := startPos; i < len(any.val); i++ { + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + break + } + } + parsed, _ := strconv.ParseUint(any.val[startPos:endPos], 10, 64) + return parsed +} + +func (any *stringAny) ToFloat32() float32 { + return float32(any.ToFloat64()) +} + +func (any *stringAny) ToFloat64() float64 { + if len(any.val) == 0 { + return 0 + } + + // first char invalid + if any.val[0] != '+' && any.val[0] != '-' && (any.val[0] > '9' || any.val[0] < '0') { + return 0 + } + + // extract valid num expression from string + // eg 123true => 123, -12.12xxa => -12.12 + endPos := 1 + for i := 1; i < len(any.val); i++ { + if any.val[i] == '.' || any.val[i] == 'e' || any.val[i] == 'E' || any.val[i] == '+' || any.val[i] == '-' { + endPos = i + 1 + continue + } + + // end position is the first char which is not digit + if any.val[i] >= '0' && any.val[i] <= '9' { + endPos = i + 1 + } else { + endPos = i + break + } + } + parsed, _ := strconv.ParseFloat(any.val[:endPos], 64) + return parsed +} + +func (any *stringAny) ToString() string { + return any.val +} + +func (any *stringAny) WriteTo(stream *Stream) { + stream.WriteString(any.val) +} + +func (any *stringAny) GetInterface() interface{} { + return any.val +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_uint32.go b/terraform-server/vendor/github.com/json-iterator/go/any_uint32.go new file mode 100644 index 00000000..656bbd33 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_uint32.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint32Any struct { + baseAny + val uint32 +} + +func (any *uint32Any) LastError() error { + return nil +} + +func (any *uint32Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint32Any) MustBeValid() Any { + return any +} + +func (any *uint32Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint32Any) ToInt() int { + return int(any.val) +} + +func (any *uint32Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint32Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint32Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint32Any) ToUint32() uint32 { + return any.val +} + +func (any *uint32Any) ToUint64() uint64 { + return uint64(any.val) +} + +func (any *uint32Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint32Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint32Any) ToString() string { + return strconv.FormatInt(int64(any.val), 10) +} + +func (any *uint32Any) WriteTo(stream *Stream) { + stream.WriteUint32(any.val) +} + +func (any *uint32Any) Parse() *Iterator { + return nil +} + +func (any *uint32Any) GetInterface() interface{} { + return any.val +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/any_uint64.go b/terraform-server/vendor/github.com/json-iterator/go/any_uint64.go new file mode 100644 index 00000000..7df2fce3 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/any_uint64.go @@ -0,0 +1,74 @@ +package jsoniter + +import ( + "strconv" +) + +type uint64Any struct { + baseAny + val uint64 +} + +func (any *uint64Any) LastError() error { + return nil +} + +func (any *uint64Any) ValueType() ValueType { + return NumberValue +} + +func (any *uint64Any) MustBeValid() Any { + return any +} + +func (any *uint64Any) ToBool() bool { + return any.val != 0 +} + +func (any *uint64Any) ToInt() int { + return int(any.val) +} + +func (any *uint64Any) ToInt32() int32 { + return int32(any.val) +} + +func (any *uint64Any) ToInt64() int64 { + return int64(any.val) +} + +func (any *uint64Any) ToUint() uint { + return uint(any.val) +} + +func (any *uint64Any) ToUint32() uint32 { + return uint32(any.val) +} + +func (any *uint64Any) ToUint64() uint64 { + return any.val +} + +func (any *uint64Any) ToFloat32() float32 { + return float32(any.val) +} + +func (any *uint64Any) ToFloat64() float64 { + return float64(any.val) +} + +func (any *uint64Any) ToString() string { + return strconv.FormatUint(any.val, 10) +} + +func (any *uint64Any) WriteTo(stream *Stream) { + stream.WriteUint64(any.val) +} + +func (any *uint64Any) Parse() *Iterator { + return nil +} + +func (any *uint64Any) GetInterface() interface{} { + return any.val +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/build.sh b/terraform-server/vendor/github.com/json-iterator/go/build.sh new file mode 100644 index 00000000..b45ef688 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/build.sh @@ -0,0 +1,12 @@ +#!/bin/bash +set -e +set -x + +if [ ! -d /tmp/build-golang/src/github.com/json-iterator ]; then + mkdir -p /tmp/build-golang/src/github.com/json-iterator + ln -s $PWD /tmp/build-golang/src/github.com/json-iterator/go +fi +export GOPATH=/tmp/build-golang +go get -u github.com/golang/dep/cmd/dep +cd /tmp/build-golang/src/github.com/json-iterator/go +exec $GOPATH/bin/dep ensure -update diff --git a/terraform-server/vendor/github.com/json-iterator/go/config.go b/terraform-server/vendor/github.com/json-iterator/go/config.go new file mode 100644 index 00000000..8c58fcba --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/config.go @@ -0,0 +1,375 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "reflect" + "sync" + "unsafe" + + "github.com/modern-go/concurrent" + "github.com/modern-go/reflect2" +) + +// Config customize how the API should behave. +// The API is created from Config by Froze. +type Config struct { + IndentionStep int + MarshalFloatWith6Digits bool + EscapeHTML bool + SortMapKeys bool + UseNumber bool + DisallowUnknownFields bool + TagKey string + OnlyTaggedField bool + ValidateJsonRawMessage bool + ObjectFieldMustBeSimpleString bool + CaseSensitive bool +} + +// API the public interface of this package. +// Primary Marshal and Unmarshal. +type API interface { + IteratorPool + StreamPool + MarshalToString(v interface{}) (string, error) + Marshal(v interface{}) ([]byte, error) + MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) + UnmarshalFromString(str string, v interface{}) error + Unmarshal(data []byte, v interface{}) error + Get(data []byte, path ...interface{}) Any + NewEncoder(writer io.Writer) *Encoder + NewDecoder(reader io.Reader) *Decoder + Valid(data []byte) bool + RegisterExtension(extension Extension) + DecoderOf(typ reflect2.Type) ValDecoder + EncoderOf(typ reflect2.Type) ValEncoder +} + +// ConfigDefault the default API +var ConfigDefault = Config{ + EscapeHTML: true, +}.Froze() + +// ConfigCompatibleWithStandardLibrary tries to be 100% compatible with standard library behavior +var ConfigCompatibleWithStandardLibrary = Config{ + EscapeHTML: true, + SortMapKeys: true, + ValidateJsonRawMessage: true, +}.Froze() + +// ConfigFastest marshals float with only 6 digits precision +var ConfigFastest = Config{ + EscapeHTML: false, + MarshalFloatWith6Digits: true, // will lose precession + ObjectFieldMustBeSimpleString: true, // do not unescape object field +}.Froze() + +type frozenConfig struct { + configBeforeFrozen Config + sortMapKeys bool + indentionStep int + objectFieldMustBeSimpleString bool + onlyTaggedField bool + disallowUnknownFields bool + decoderCache *concurrent.Map + encoderCache *concurrent.Map + encoderExtension Extension + decoderExtension Extension + extraExtensions []Extension + streamPool *sync.Pool + iteratorPool *sync.Pool + caseSensitive bool +} + +func (cfg *frozenConfig) initCache() { + cfg.decoderCache = concurrent.NewMap() + cfg.encoderCache = concurrent.NewMap() +} + +func (cfg *frozenConfig) addDecoderToCache(cacheKey uintptr, decoder ValDecoder) { + cfg.decoderCache.Store(cacheKey, decoder) +} + +func (cfg *frozenConfig) addEncoderToCache(cacheKey uintptr, encoder ValEncoder) { + cfg.encoderCache.Store(cacheKey, encoder) +} + +func (cfg *frozenConfig) getDecoderFromCache(cacheKey uintptr) ValDecoder { + decoder, found := cfg.decoderCache.Load(cacheKey) + if found { + return decoder.(ValDecoder) + } + return nil +} + +func (cfg *frozenConfig) getEncoderFromCache(cacheKey uintptr) ValEncoder { + encoder, found := cfg.encoderCache.Load(cacheKey) + if found { + return encoder.(ValEncoder) + } + return nil +} + +var cfgCache = concurrent.NewMap() + +func getFrozenConfigFromCache(cfg Config) *frozenConfig { + obj, found := cfgCache.Load(cfg) + if found { + return obj.(*frozenConfig) + } + return nil +} + +func addFrozenConfigToCache(cfg Config, frozenConfig *frozenConfig) { + cfgCache.Store(cfg, frozenConfig) +} + +// Froze forge API from config +func (cfg Config) Froze() API { + api := &frozenConfig{ + sortMapKeys: cfg.SortMapKeys, + indentionStep: cfg.IndentionStep, + objectFieldMustBeSimpleString: cfg.ObjectFieldMustBeSimpleString, + onlyTaggedField: cfg.OnlyTaggedField, + disallowUnknownFields: cfg.DisallowUnknownFields, + caseSensitive: cfg.CaseSensitive, + } + api.streamPool = &sync.Pool{ + New: func() interface{} { + return NewStream(api, nil, 512) + }, + } + api.iteratorPool = &sync.Pool{ + New: func() interface{} { + return NewIterator(api) + }, + } + api.initCache() + encoderExtension := EncoderExtension{} + decoderExtension := DecoderExtension{} + if cfg.MarshalFloatWith6Digits { + api.marshalFloatWith6Digits(encoderExtension) + } + if cfg.EscapeHTML { + api.escapeHTML(encoderExtension) + } + if cfg.UseNumber { + api.useNumber(decoderExtension) + } + if cfg.ValidateJsonRawMessage { + api.validateJsonRawMessage(encoderExtension) + } + api.encoderExtension = encoderExtension + api.decoderExtension = decoderExtension + api.configBeforeFrozen = cfg + return api +} + +func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig { + api := getFrozenConfigFromCache(cfg) + if api != nil { + return api + } + api = cfg.Froze().(*frozenConfig) + for _, extension := range extraExtensions { + api.RegisterExtension(extension) + } + addFrozenConfigToCache(cfg, api) + return api +} + +func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) { + encoder := &funcEncoder{func(ptr unsafe.Pointer, stream *Stream) { + rawMessage := *(*json.RawMessage)(ptr) + iter := cfg.BorrowIterator([]byte(rawMessage)) + iter.Read() + if iter.Error != nil { + stream.WriteRaw("null") + } else { + cfg.ReturnIterator(iter) + stream.WriteRaw(string(rawMessage)) + } + }, func(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 + }} + extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder + extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder +} + +func (cfg *frozenConfig) useNumber(extension DecoderExtension) { + extension[reflect2.TypeOfPtr((*interface{})(nil)).Elem()] = &funcDecoder{func(ptr unsafe.Pointer, iter *Iterator) { + exitingValue := *((*interface{})(ptr)) + if exitingValue != nil && reflect.TypeOf(exitingValue).Kind() == reflect.Ptr { + iter.ReadVal(exitingValue) + return + } + if iter.WhatIsNext() == NumberValue { + *((*interface{})(ptr)) = json.Number(iter.readNumberAsString()) + } else { + *((*interface{})(ptr)) = iter.Read() + } + }} +} +func (cfg *frozenConfig) getTagKey() string { + tagKey := cfg.configBeforeFrozen.TagKey + if tagKey == "" { + return "json" + } + return tagKey +} + +func (cfg *frozenConfig) RegisterExtension(extension Extension) { + cfg.extraExtensions = append(cfg.extraExtensions, extension) + copied := cfg.configBeforeFrozen + cfg.configBeforeFrozen = copied +} + +type lossyFloat32Encoder struct { +} + +func (encoder *lossyFloat32Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32Lossy(*((*float32)(ptr))) +} + +func (encoder *lossyFloat32Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type lossyFloat64Encoder struct { +} + +func (encoder *lossyFloat64Encoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64Lossy(*((*float64)(ptr))) +} + +func (encoder *lossyFloat64Encoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +// EnableLossyFloatMarshalling keeps 10**(-6) precision +// for float variables for better performance. +func (cfg *frozenConfig) marshalFloatWith6Digits(extension EncoderExtension) { + // for better performance + extension[reflect2.TypeOfPtr((*float32)(nil)).Elem()] = &lossyFloat32Encoder{} + extension[reflect2.TypeOfPtr((*float64)(nil)).Elem()] = &lossyFloat64Encoder{} +} + +type htmlEscapedStringEncoder struct { +} + +func (encoder *htmlEscapedStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteStringWithHTMLEscaped(str) +} + +func (encoder *htmlEscapedStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +func (cfg *frozenConfig) escapeHTML(encoderExtension EncoderExtension) { + encoderExtension[reflect2.TypeOfPtr((*string)(nil)).Elem()] = &htmlEscapedStringEncoder{} +} + +func (cfg *frozenConfig) cleanDecoders() { + typeDecoders = map[string]ValDecoder{} + fieldDecoders = map[string]ValDecoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) cleanEncoders() { + typeEncoders = map[string]ValEncoder{} + fieldEncoders = map[string]ValEncoder{} + *cfg = *(cfg.configBeforeFrozen.Froze().(*frozenConfig)) +} + +func (cfg *frozenConfig) MarshalToString(v interface{}) (string, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return "", stream.Error + } + return string(stream.Buffer()), nil +} + +func (cfg *frozenConfig) Marshal(v interface{}) ([]byte, error) { + stream := cfg.BorrowStream(nil) + defer cfg.ReturnStream(stream) + stream.WriteVal(v) + if stream.Error != nil { + return nil, stream.Error + } + result := stream.Buffer() + copied := make([]byte, len(result)) + copy(copied, result) + return copied, nil +} + +func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]byte, error) { + if prefix != "" { + panic("prefix is not supported") + } + for _, r := range indent { + if r != ' ' { + panic("indent can only be space") + } + } + newCfg := cfg.configBeforeFrozen + newCfg.IndentionStep = len(indent) + return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v) +} + +func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error { + data := []byte(str) + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) Get(data []byte, path ...interface{}) Any { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + return locatePath(iter, path) +} + +func (cfg *frozenConfig) Unmarshal(data []byte, v interface{}) error { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.ReadVal(v) + c := iter.nextToken() + if c == 0 { + if iter.Error == io.EOF { + return nil + } + return iter.Error + } + iter.ReportError("Unmarshal", "there are bytes left after unmarshal") + return iter.Error +} + +func (cfg *frozenConfig) NewEncoder(writer io.Writer) *Encoder { + stream := NewStream(cfg, writer, 512) + return &Encoder{stream} +} + +func (cfg *frozenConfig) NewDecoder(reader io.Reader) *Decoder { + iter := Parse(cfg, reader, 512) + return &Decoder{iter} +} + +func (cfg *frozenConfig) Valid(data []byte) bool { + iter := cfg.BorrowIterator(data) + defer cfg.ReturnIterator(iter) + iter.Skip() + return iter.Error == nil +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md b/terraform-server/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md new file mode 100644 index 00000000..3095662b --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/fuzzy_mode_convert_table.md @@ -0,0 +1,7 @@ +| json type \ dest type | bool | int | uint | float |string| +| --- | --- | --- | --- |--|--| +| number | positive => true
negative => true
zero => false| 23.2 => 23
-32.1 => -32| 12.1 => 12
-12.1 => 0|as normal|same as origin| +| string | empty string => false
string "0" => false
other strings => true | "123.32" => 123
"-123.4" => -123
"123.23xxxw" => 123
"abcde12" => 0
"-32.1" => -32| 13.2 => 13
-1.1 => 0 |12.1 => 12.1
-12.3 => -12.3
12.4xxa => 12.4
+1.1e2 =>110 |same as origin| +| bool | true => true
false => false| true => 1
false => 0 | true => 1
false => 0 |true => 1
false => 0|true => "true"
false => "false"| +| object | true | 0 | 0 |0|originnal json| +| array | empty array => false
nonempty array => true| [] => 0
[1,2] => 1 | [] => 0
[1,2] => 1 |[] => 0
[1,2] => 1|original json| \ No newline at end of file diff --git a/terraform-server/vendor/github.com/json-iterator/go/go.mod b/terraform-server/vendor/github.com/json-iterator/go/go.mod new file mode 100644 index 00000000..e05c42ff --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/go.mod @@ -0,0 +1,11 @@ +module github.com/json-iterator/go + +go 1.12 + +require ( + github.com/davecgh/go-spew v1.1.1 + github.com/google/gofuzz v1.0.0 + github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 + github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 + github.com/stretchr/testify v1.3.0 +) diff --git a/terraform-server/vendor/github.com/json-iterator/go/go.sum b/terraform-server/vendor/github.com/json-iterator/go/go.sum new file mode 100644 index 00000000..d778b5a1 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/go.sum @@ -0,0 +1,14 @@ +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/google/gofuzz v1.0.0 h1:A8PeW59pxE9IoFRqBp37U+mSNaQoZ46F1f0f863XSXw= +github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421 h1:ZqeYNhU3OHLH3mGKHDcjJRFFRrJa6eAM5H+CtDdOsPc= +github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742 h1:Esafd1046DLDQ0W1YjYsBW+p8U2u7vzgW2SQVmlNazg= +github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.3.0 h1:TivCn/peBQ7UY8ooIcPgZFpTNSz0Q2U6UrFlUfqbe0Q= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter.go b/terraform-server/vendor/github.com/json-iterator/go/iter.go new file mode 100644 index 00000000..29b31cf7 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter.go @@ -0,0 +1,349 @@ +package jsoniter + +import ( + "encoding/json" + "fmt" + "io" +) + +// ValueType the type for JSON element +type ValueType int + +const ( + // InvalidValue invalid JSON element + InvalidValue ValueType = iota + // StringValue JSON element "string" + StringValue + // NumberValue JSON element 100 or 0.10 + NumberValue + // NilValue JSON element null + NilValue + // BoolValue JSON element true or false + BoolValue + // ArrayValue JSON element [] + ArrayValue + // ObjectValue JSON element {} + ObjectValue +) + +var hexDigits []byte +var valueTypes []ValueType + +func init() { + hexDigits = make([]byte, 256) + for i := 0; i < len(hexDigits); i++ { + hexDigits[i] = 255 + } + for i := '0'; i <= '9'; i++ { + hexDigits[i] = byte(i - '0') + } + for i := 'a'; i <= 'f'; i++ { + hexDigits[i] = byte((i - 'a') + 10) + } + for i := 'A'; i <= 'F'; i++ { + hexDigits[i] = byte((i - 'A') + 10) + } + valueTypes = make([]ValueType, 256) + for i := 0; i < len(valueTypes); i++ { + valueTypes[i] = InvalidValue + } + valueTypes['"'] = StringValue + valueTypes['-'] = NumberValue + valueTypes['0'] = NumberValue + valueTypes['1'] = NumberValue + valueTypes['2'] = NumberValue + valueTypes['3'] = NumberValue + valueTypes['4'] = NumberValue + valueTypes['5'] = NumberValue + valueTypes['6'] = NumberValue + valueTypes['7'] = NumberValue + valueTypes['8'] = NumberValue + valueTypes['9'] = NumberValue + valueTypes['t'] = BoolValue + valueTypes['f'] = BoolValue + valueTypes['n'] = NilValue + valueTypes['['] = ArrayValue + valueTypes['{'] = ObjectValue +} + +// Iterator is a io.Reader like object, with JSON specific read functions. +// Error is not returned as return value, but stored as Error member on this iterator instance. +type Iterator struct { + cfg *frozenConfig + reader io.Reader + buf []byte + head int + tail int + depth int + captureStartedAt int + captured []byte + Error error + Attachment interface{} // open for customized decoder +} + +// NewIterator creates an empty Iterator instance +func NewIterator(cfg API) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: nil, + head: 0, + tail: 0, + depth: 0, + } +} + +// Parse creates an Iterator instance from io.Reader +func Parse(cfg API, reader io.Reader, bufSize int) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: reader, + buf: make([]byte, bufSize), + head: 0, + tail: 0, + depth: 0, + } +} + +// ParseBytes creates an Iterator instance from byte array +func ParseBytes(cfg API, input []byte) *Iterator { + return &Iterator{ + cfg: cfg.(*frozenConfig), + reader: nil, + buf: input, + head: 0, + tail: len(input), + depth: 0, + } +} + +// ParseString creates an Iterator instance from string +func ParseString(cfg API, input string) *Iterator { + return ParseBytes(cfg, []byte(input)) +} + +// Pool returns a pool can provide more iterator with same configuration +func (iter *Iterator) Pool() IteratorPool { + return iter.cfg +} + +// Reset reuse iterator instance by specifying another reader +func (iter *Iterator) Reset(reader io.Reader) *Iterator { + iter.reader = reader + iter.head = 0 + iter.tail = 0 + iter.depth = 0 + return iter +} + +// ResetBytes reuse iterator instance by specifying another byte array as input +func (iter *Iterator) ResetBytes(input []byte) *Iterator { + iter.reader = nil + iter.buf = input + iter.head = 0 + iter.tail = len(input) + iter.depth = 0 + return iter +} + +// WhatIsNext gets ValueType of relatively next json element +func (iter *Iterator) WhatIsNext() ValueType { + valueType := valueTypes[iter.nextToken()] + iter.unreadByte() + return valueType +} + +func (iter *Iterator) skipWhitespacesWithoutLoadMore() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + return false + } + return true +} + +func (iter *Iterator) isObjectEnd() bool { + c := iter.nextToken() + if c == ',' { + return false + } + if c == '}' { + return true + } + iter.ReportError("isObjectEnd", "object ended prematurely, unexpected char "+string([]byte{c})) + return true +} + +func (iter *Iterator) nextToken() byte { + // a variation of skip whitespaces, returning the next non-whitespace token + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\t', '\r': + continue + } + iter.head = i + 1 + return c + } + if !iter.loadMore() { + return 0 + } + } +} + +// ReportError record a error in iterator instance with current position. +func (iter *Iterator) ReportError(operation string, msg string) { + if iter.Error != nil { + if iter.Error != io.EOF { + return + } + } + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + peekEnd := iter.head + 10 + if peekEnd > iter.tail { + peekEnd = iter.tail + } + parsing := string(iter.buf[peekStart:peekEnd]) + contextStart := iter.head - 50 + if contextStart < 0 { + contextStart = 0 + } + contextEnd := iter.head + 50 + if contextEnd > iter.tail { + contextEnd = iter.tail + } + context := string(iter.buf[contextStart:contextEnd]) + iter.Error = fmt.Errorf("%s: %s, error found in #%v byte of ...|%s|..., bigger context ...|%s|...", + operation, msg, iter.head-peekStart, parsing, context) +} + +// CurrentBuffer gets current buffer as string for debugging purpose +func (iter *Iterator) CurrentBuffer() string { + peekStart := iter.head - 10 + if peekStart < 0 { + peekStart = 0 + } + return fmt.Sprintf("parsing #%v byte, around ...|%s|..., whole buffer ...|%s|...", iter.head, + string(iter.buf[peekStart:iter.head]), string(iter.buf[0:iter.tail])) +} + +func (iter *Iterator) readByte() (ret byte) { + if iter.head == iter.tail { + if iter.loadMore() { + ret = iter.buf[iter.head] + iter.head++ + return ret + } + return 0 + } + ret = iter.buf[iter.head] + iter.head++ + return ret +} + +func (iter *Iterator) loadMore() bool { + if iter.reader == nil { + if iter.Error == nil { + iter.head = iter.tail + iter.Error = io.EOF + } + return false + } + if iter.captured != nil { + iter.captured = append(iter.captured, + iter.buf[iter.captureStartedAt:iter.tail]...) + iter.captureStartedAt = 0 + } + for { + n, err := iter.reader.Read(iter.buf) + if n == 0 { + if err != nil { + if iter.Error == nil { + iter.Error = err + } + return false + } + } else { + iter.head = 0 + iter.tail = n + return true + } + } +} + +func (iter *Iterator) unreadByte() { + if iter.Error != nil { + return + } + iter.head-- + return +} + +// Read read the next JSON element as generic interface{}. +func (iter *Iterator) Read() interface{} { + valueType := iter.WhatIsNext() + switch valueType { + case StringValue: + return iter.ReadString() + case NumberValue: + if iter.cfg.configBeforeFrozen.UseNumber { + return json.Number(iter.readNumberAsString()) + } + return iter.ReadFloat64() + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + return nil + case BoolValue: + return iter.ReadBool() + case ArrayValue: + arr := []interface{}{} + iter.ReadArrayCB(func(iter *Iterator) bool { + var elem interface{} + iter.ReadVal(&elem) + arr = append(arr, elem) + return true + }) + return arr + case ObjectValue: + obj := map[string]interface{}{} + iter.ReadMapCB(func(Iter *Iterator, field string) bool { + var elem interface{} + iter.ReadVal(&elem) + obj[field] = elem + return true + }) + return obj + default: + iter.ReportError("Read", fmt.Sprintf("unexpected value type: %v", valueType)) + return nil + } +} + +// limit maximum depth of nesting, as allowed by https://tools.ietf.org/html/rfc7159#section-9 +const maxDepth = 10000 + +func (iter *Iterator) incrementDepth() (success bool) { + iter.depth++ + if iter.depth <= maxDepth { + return true + } + iter.ReportError("incrementDepth", "exceeded max depth") + return false +} + +func (iter *Iterator) decrementDepth() (success bool) { + iter.depth-- + if iter.depth >= 0 { + return true + } + iter.ReportError("decrementDepth", "unexpected negative nesting") + return false +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_array.go b/terraform-server/vendor/github.com/json-iterator/go/iter_array.go new file mode 100644 index 00000000..204fe0e0 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_array.go @@ -0,0 +1,64 @@ +package jsoniter + +// ReadArray read array element, tells if the array has more element to read. +func (iter *Iterator) ReadArray() (ret bool) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return false // null + case '[': + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + return true + } + return false + case ']': + return false + case ',': + return true + default: + iter.ReportError("ReadArray", "expect [ or , or ] or n, but found "+string([]byte{c})) + return + } +} + +// ReadArrayCB read array with callback +func (iter *Iterator) ReadArrayCB(callback func(*Iterator) bool) (ret bool) { + c := iter.nextToken() + if c == '[' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c != ']' { + iter.unreadByte() + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + if !callback(iter) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != ']' { + iter.ReportError("ReadArrayCB", "expect ] in the end, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + return iter.decrementDepth() + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadArrayCB", "expect [ or n, but found "+string([]byte{c})) + return false +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_float.go b/terraform-server/vendor/github.com/json-iterator/go/iter_float.go new file mode 100644 index 00000000..b9754638 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_float.go @@ -0,0 +1,339 @@ +package jsoniter + +import ( + "encoding/json" + "io" + "math/big" + "strconv" + "strings" + "unsafe" +) + +var floatDigits []int8 + +const invalidCharForNumber = int8(-1) +const endOfNumber = int8(-2) +const dotInNumber = int8(-3) + +func init() { + floatDigits = make([]int8, 256) + for i := 0; i < len(floatDigits); i++ { + floatDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + floatDigits[i] = i - int8('0') + } + floatDigits[','] = endOfNumber + floatDigits[']'] = endOfNumber + floatDigits['}'] = endOfNumber + floatDigits[' '] = endOfNumber + floatDigits['\t'] = endOfNumber + floatDigits['\n'] = endOfNumber + floatDigits['.'] = dotInNumber +} + +// ReadBigFloat read big.Float +func (iter *Iterator) ReadBigFloat() (ret *big.Float) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + prec := 64 + if len(str) > prec { + prec = len(str) + } + val, _, err := big.ParseFloat(str, 10, uint(prec), big.ToZero) + if err != nil { + iter.Error = err + return nil + } + return val +} + +// ReadBigInt read big.Int +func (iter *Iterator) ReadBigInt() (ret *big.Int) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return nil + } + ret = big.NewInt(0) + var success bool + ret, success = ret.SetString(str, 10) + if !success { + iter.ReportError("ReadBigInt", "invalid big int") + return nil + } + return ret +} + +//ReadFloat32 read float32 +func (iter *Iterator) ReadFloat32() (ret float32) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat32() + } + iter.unreadByte() + return iter.readPositiveFloat32() +} + +func (iter *Iterator) readPositiveFloat32() (ret float32) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.ReportError("readFloat32", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat32", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat32SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat32", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat32SlowPath() + case endOfNumber: + iter.head = i + return float32(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat32SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float32(float64(value) / float64(pow10[decimalPlaces])) + } + // too many decimal places + return iter.readFloat32SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat32SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat32SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat32SlowPath() +} + +func (iter *Iterator) readNumberAsString() (ret string) { + strBuf := [16]byte{} + str := strBuf[0:0] +load_loop: + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '+', '-', '.', 'e', 'E', '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + str = append(str, c) + continue + default: + iter.head = i + break load_loop + } + } + if !iter.loadMore() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + return + } + if len(str) == 0 { + iter.ReportError("readNumberAsString", "invalid number") + } + return *(*string)(unsafe.Pointer(&str)) +} + +func (iter *Iterator) readFloat32SlowPath() (ret float32) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat32SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 32) + if err != nil { + iter.Error = err + return + } + return float32(val) +} + +// ReadFloat64 read float64 +func (iter *Iterator) ReadFloat64() (ret float64) { + c := iter.nextToken() + if c == '-' { + return -iter.readPositiveFloat64() + } + iter.unreadByte() + return iter.readPositiveFloat64() +} + +func (iter *Iterator) readPositiveFloat64() (ret float64) { + i := iter.head + // first char + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c := iter.buf[i] + i++ + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.ReportError("readFloat64", "empty number") + return + case dotInNumber: + iter.ReportError("readFloat64", "leading dot is invalid") + return + case 0: + if i == iter.tail { + return iter.readFloat64SlowPath() + } + c = iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.ReportError("readFloat64", "leading zero is invalid") + return + } + } + value := uint64(ind) + // chars before dot +non_decimal_loop: + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case invalidCharForNumber: + return iter.readFloat64SlowPath() + case endOfNumber: + iter.head = i + return float64(value) + case dotInNumber: + break non_decimal_loop + } + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) // value = value * 10 + ind; + } + // chars after dot + if c == '.' { + i++ + decimalPlaces := 0 + if i == iter.tail { + return iter.readFloat64SlowPath() + } + for ; i < iter.tail; i++ { + c = iter.buf[i] + ind := floatDigits[c] + switch ind { + case endOfNumber: + if decimalPlaces > 0 && decimalPlaces < len(pow10) { + iter.head = i + return float64(value) / float64(pow10[decimalPlaces]) + } + // too many decimal places + return iter.readFloat64SlowPath() + case invalidCharForNumber, dotInNumber: + return iter.readFloat64SlowPath() + } + decimalPlaces++ + if value > uint64SafeToMultiple10 { + return iter.readFloat64SlowPath() + } + value = (value << 3) + (value << 1) + uint64(ind) + } + } + return iter.readFloat64SlowPath() +} + +func (iter *Iterator) readFloat64SlowPath() (ret float64) { + str := iter.readNumberAsString() + if iter.Error != nil && iter.Error != io.EOF { + return + } + errMsg := validateFloat(str) + if errMsg != "" { + iter.ReportError("readFloat64SlowPath", errMsg) + return + } + val, err := strconv.ParseFloat(str, 64) + if err != nil { + iter.Error = err + return + } + return val +} + +func validateFloat(str string) string { + // strconv.ParseFloat is not validating `1.` or `1.e1` + if len(str) == 0 { + return "empty number" + } + if str[0] == '-' { + return "-- is not valid" + } + dotPos := strings.IndexByte(str, '.') + if dotPos != -1 { + if dotPos == len(str)-1 { + return "dot can not be last character" + } + switch str[dotPos+1] { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + return "missing digit after dot" + } + } + return "" +} + +// ReadNumber read json.Number +func (iter *Iterator) ReadNumber() (ret json.Number) { + return json.Number(iter.readNumberAsString()) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_int.go b/terraform-server/vendor/github.com/json-iterator/go/iter_int.go new file mode 100644 index 00000000..21423203 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_int.go @@ -0,0 +1,345 @@ +package jsoniter + +import ( + "math" + "strconv" +) + +var intDigits []int8 + +const uint32SafeToMultiply10 = uint32(0xffffffff)/10 - 1 +const uint64SafeToMultiple10 = uint64(0xffffffffffffffff)/10 - 1 + +func init() { + intDigits = make([]int8, 256) + for i := 0; i < len(intDigits); i++ { + intDigits[i] = invalidCharForNumber + } + for i := int8('0'); i <= int8('9'); i++ { + intDigits[i] = i - int8('0') + } +} + +// ReadUint read uint +func (iter *Iterator) ReadUint() uint { + if strconv.IntSize == 32 { + return uint(iter.ReadUint32()) + } + return uint(iter.ReadUint64()) +} + +// ReadInt read int +func (iter *Iterator) ReadInt() int { + if strconv.IntSize == 32 { + return int(iter.ReadInt32()) + } + return int(iter.ReadInt64()) +} + +// ReadInt8 read int8 +func (iter *Iterator) ReadInt8() (ret int8) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt8+1 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int8(val) + } + val := iter.readUint32(c) + if val > math.MaxInt8 { + iter.ReportError("ReadInt8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int8(val) +} + +// ReadUint8 read uint8 +func (iter *Iterator) ReadUint8() (ret uint8) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint8 { + iter.ReportError("ReadUint8", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint8(val) +} + +// ReadInt16 read int16 +func (iter *Iterator) ReadInt16() (ret int16) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt16+1 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int16(val) + } + val := iter.readUint32(c) + if val > math.MaxInt16 { + iter.ReportError("ReadInt16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int16(val) +} + +// ReadUint16 read uint16 +func (iter *Iterator) ReadUint16() (ret uint16) { + val := iter.readUint32(iter.nextToken()) + if val > math.MaxUint16 { + iter.ReportError("ReadUint16", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return uint16(val) +} + +// ReadInt32 read int32 +func (iter *Iterator) ReadInt32() (ret int32) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint32(iter.readByte()) + if val > math.MaxInt32+1 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return -int32(val) + } + val := iter.readUint32(c) + if val > math.MaxInt32 { + iter.ReportError("ReadInt32", "overflow: "+strconv.FormatInt(int64(val), 10)) + return + } + return int32(val) +} + +// ReadUint32 read uint32 +func (iter *Iterator) ReadUint32() (ret uint32) { + return iter.readUint32(iter.nextToken()) +} + +func (iter *Iterator) readUint32(c byte) (ret uint32) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint32", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint32(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint32(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint32(ind2)*10 + uint32(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint32(ind2)*100 + uint32(ind3)*10 + uint32(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint32(ind2)*1000 + uint32(ind3)*100 + uint32(ind4)*10 + uint32(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint32(ind2)*10000 + uint32(ind3)*1000 + uint32(ind4)*100 + uint32(ind5)*10 + uint32(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint32(ind2)*100000 + uint32(ind3)*10000 + uint32(ind4)*1000 + uint32(ind5)*100 + uint32(ind6)*10 + uint32(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint32(ind2)*1000000 + uint32(ind3)*100000 + uint32(ind4)*10000 + uint32(ind5)*1000 + uint32(ind6)*100 + uint32(ind7)*10 + uint32(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint32SafeToMultiply10 { + value2 := (value << 3) + (value << 1) + uint32(ind) + if value2 < value { + iter.ReportError("readUint32", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint32(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +// ReadInt64 read int64 +func (iter *Iterator) ReadInt64() (ret int64) { + c := iter.nextToken() + if c == '-' { + val := iter.readUint64(iter.readByte()) + if val > math.MaxInt64+1 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return -int64(val) + } + val := iter.readUint64(c) + if val > math.MaxInt64 { + iter.ReportError("ReadInt64", "overflow: "+strconv.FormatUint(uint64(val), 10)) + return + } + return int64(val) +} + +// ReadUint64 read uint64 +func (iter *Iterator) ReadUint64() uint64 { + return iter.readUint64(iter.nextToken()) +} + +func (iter *Iterator) readUint64(c byte) (ret uint64) { + ind := intDigits[c] + if ind == 0 { + iter.assertInteger() + return 0 // single zero + } + if ind == invalidCharForNumber { + iter.ReportError("readUint64", "unexpected character: "+string([]byte{byte(ind)})) + return + } + value := uint64(ind) + if iter.tail-iter.head > 10 { + i := iter.head + ind2 := intDigits[iter.buf[i]] + if ind2 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + i++ + ind3 := intDigits[iter.buf[i]] + if ind3 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10 + uint64(ind2) + } + //iter.head = i + 1 + //value = value * 100 + uint32(ind2) * 10 + uint32(ind3) + i++ + ind4 := intDigits[iter.buf[i]] + if ind4 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100 + uint64(ind2)*10 + uint64(ind3) + } + i++ + ind5 := intDigits[iter.buf[i]] + if ind5 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000 + uint64(ind2)*100 + uint64(ind3)*10 + uint64(ind4) + } + i++ + ind6 := intDigits[iter.buf[i]] + if ind6 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*10000 + uint64(ind2)*1000 + uint64(ind3)*100 + uint64(ind4)*10 + uint64(ind5) + } + i++ + ind7 := intDigits[iter.buf[i]] + if ind7 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*100000 + uint64(ind2)*10000 + uint64(ind3)*1000 + uint64(ind4)*100 + uint64(ind5)*10 + uint64(ind6) + } + i++ + ind8 := intDigits[iter.buf[i]] + if ind8 == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value*1000000 + uint64(ind2)*100000 + uint64(ind3)*10000 + uint64(ind4)*1000 + uint64(ind5)*100 + uint64(ind6)*10 + uint64(ind7) + } + i++ + ind9 := intDigits[iter.buf[i]] + value = value*10000000 + uint64(ind2)*1000000 + uint64(ind3)*100000 + uint64(ind4)*10000 + uint64(ind5)*1000 + uint64(ind6)*100 + uint64(ind7)*10 + uint64(ind8) + iter.head = i + if ind9 == invalidCharForNumber { + iter.assertInteger() + return value + } + } + for { + for i := iter.head; i < iter.tail; i++ { + ind = intDigits[iter.buf[i]] + if ind == invalidCharForNumber { + iter.head = i + iter.assertInteger() + return value + } + if value > uint64SafeToMultiple10 { + value2 := (value << 3) + (value << 1) + uint64(ind) + if value2 < value { + iter.ReportError("readUint64", "overflow") + return + } + value = value2 + continue + } + value = (value << 3) + (value << 1) + uint64(ind) + } + if !iter.loadMore() { + iter.assertInteger() + return value + } + } +} + +func (iter *Iterator) assertInteger() { + if iter.head < len(iter.buf) && iter.buf[iter.head] == '.' { + iter.ReportError("assertInteger", "can not decode float as int") + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_object.go b/terraform-server/vendor/github.com/json-iterator/go/iter_object.go new file mode 100644 index 00000000..b6513711 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_object.go @@ -0,0 +1,267 @@ +package jsoniter + +import ( + "fmt" + "strings" +) + +// ReadObject read one field from object. +// If object ended, returns empty string. +// Otherwise, returns the field name. +func (iter *Iterator) ReadObject() (ret string) { + c := iter.nextToken() + switch c { + case 'n': + iter.skipThreeBytes('u', 'l', 'l') + return "" // null + case '{': + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + } + if c == '}' { + return "" // end of object + } + iter.ReportError("ReadObject", `expect " after {, but found `+string([]byte{c})) + return + case ',': + field := iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + return field + case '}': + return "" // end of object + default: + iter.ReportError("ReadObject", fmt.Sprintf(`expect { or , or } or n, but found %s`, string([]byte{c}))) + return + } +} + +// CaseInsensitive +func (iter *Iterator) readFieldHash() int64 { + hash := int64(0x811c9dc5) + c := iter.nextToken() + if c != '"' { + iter.ReportError("readFieldHash", `expect ", but found `+string([]byte{c})) + return 0 + } + for { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + b := iter.buf[i] + if b == '\\' { + iter.head = i + for _, b := range iter.readStringSlowPath() { + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if b == '"' { + iter.head = i + 1 + c = iter.nextToken() + if c != ':' { + iter.ReportError("readFieldHash", `expect :, but found `+string([]byte{c})) + return 0 + } + return hash + } + if 'A' <= b && b <= 'Z' && !iter.cfg.caseSensitive { + b += 'a' - 'A' + } + hash ^= int64(b) + hash *= 0x1000193 + } + if !iter.loadMore() { + iter.ReportError("readFieldHash", `incomplete field name`) + return 0 + } + } +} + +func calcHash(str string, caseSensitive bool) int64 { + if !caseSensitive { + str = strings.ToLower(str) + } + hash := int64(0x811c9dc5) + for _, b := range []byte(str) { + hash ^= int64(b) + hash *= 0x1000193 + } + return int64(hash) +} + +// ReadObjectCB read object with callback, the key is ascii only and field name not copied +func (iter *Iterator) ReadObjectCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + var field string + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadObjectCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadObjectCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadObjectCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +// ReadMapCB read map with callback, the key can be any string +func (iter *Iterator) ReadMapCB(callback func(*Iterator, string) bool) bool { + c := iter.nextToken() + if c == '{' { + if !iter.incrementDepth() { + return false + } + c = iter.nextToken() + if c == '"' { + iter.unreadByte() + field := iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + for c == ',' { + field = iter.ReadString() + if iter.nextToken() != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + iter.decrementDepth() + return false + } + if !callback(iter, field) { + iter.decrementDepth() + return false + } + c = iter.nextToken() + } + if c != '}' { + iter.ReportError("ReadMapCB", `object not ended with }`) + iter.decrementDepth() + return false + } + return iter.decrementDepth() + } + if c == '}' { + return iter.decrementDepth() + } + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + iter.decrementDepth() + return false + } + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return true // null + } + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectStart() bool { + c := iter.nextToken() + if c == '{' { + c = iter.nextToken() + if c == '}' { + return false + } + iter.unreadByte() + return true + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return false + } + iter.ReportError("readObjectStart", "expect { or n, but found "+string([]byte{c})) + return false +} + +func (iter *Iterator) readObjectFieldAsBytes() (ret []byte) { + str := iter.ReadStringAsSlice() + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if iter.buf[iter.head] != ':' { + iter.ReportError("readObjectFieldAsBytes", "expect : after object field, but found "+string([]byte{iter.buf[iter.head]})) + return + } + iter.head++ + if iter.skipWhitespacesWithoutLoadMore() { + if ret == nil { + ret = make([]byte, len(str)) + copy(ret, str) + } + if !iter.loadMore() { + return + } + } + if ret == nil { + return str + } + return ret +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_skip.go b/terraform-server/vendor/github.com/json-iterator/go/iter_skip.go new file mode 100644 index 00000000..e91eefb1 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_skip.go @@ -0,0 +1,130 @@ +package jsoniter + +import "fmt" + +// ReadNil reads a json object as nil and +// returns whether it's a nil or not +func (iter *Iterator) ReadNil() (ret bool) { + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') // null + return true + } + iter.unreadByte() + return false +} + +// ReadBool reads a json object as BoolValue +func (iter *Iterator) ReadBool() (ret bool) { + c := iter.nextToken() + if c == 't' { + iter.skipThreeBytes('r', 'u', 'e') + return true + } + if c == 'f' { + iter.skipFourBytes('a', 'l', 's', 'e') + return false + } + iter.ReportError("ReadBool", "expect t or f, but found "+string([]byte{c})) + return +} + +// SkipAndReturnBytes skip next JSON element, and return its content as []byte. +// The []byte can be kept, it is a copy of data. +func (iter *Iterator) SkipAndReturnBytes() []byte { + iter.startCapture(iter.head) + iter.Skip() + return iter.stopCapture() +} + +// SkipAndAppendBytes skips next JSON element and appends its content to +// buffer, returning the result. +func (iter *Iterator) SkipAndAppendBytes(buf []byte) []byte { + iter.startCaptureTo(buf, iter.head) + iter.Skip() + return iter.stopCapture() +} + +func (iter *Iterator) startCaptureTo(buf []byte, captureStartedAt int) { + if iter.captured != nil { + panic("already in capture mode") + } + iter.captureStartedAt = captureStartedAt + iter.captured = buf +} + +func (iter *Iterator) startCapture(captureStartedAt int) { + iter.startCaptureTo(make([]byte, 0, 32), captureStartedAt) +} + +func (iter *Iterator) stopCapture() []byte { + if iter.captured == nil { + panic("not in capture mode") + } + captured := iter.captured + remaining := iter.buf[iter.captureStartedAt:iter.head] + iter.captureStartedAt = -1 + iter.captured = nil + return append(captured, remaining...) +} + +// Skip skips a json object and positions to relatively the next json object +func (iter *Iterator) Skip() { + c := iter.nextToken() + switch c { + case '"': + iter.skipString() + case 'n': + iter.skipThreeBytes('u', 'l', 'l') // null + case 't': + iter.skipThreeBytes('r', 'u', 'e') // true + case 'f': + iter.skipFourBytes('a', 'l', 's', 'e') // false + case '0': + iter.unreadByte() + iter.ReadFloat32() + case '-', '1', '2', '3', '4', '5', '6', '7', '8', '9': + iter.skipNumber() + case '[': + iter.skipArray() + case '{': + iter.skipObject() + default: + iter.ReportError("Skip", fmt.Sprintf("do not know how to skip: %v", c)) + return + } +} + +func (iter *Iterator) skipFourBytes(b1, b2, b3, b4 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } + if iter.readByte() != b4 { + iter.ReportError("skipFourBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3, b4}))) + return + } +} + +func (iter *Iterator) skipThreeBytes(b1, b2, b3 byte) { + if iter.readByte() != b1 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b2 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } + if iter.readByte() != b3 { + iter.ReportError("skipThreeBytes", fmt.Sprintf("expect %s", string([]byte{b1, b2, b3}))) + return + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_skip_sloppy.go b/terraform-server/vendor/github.com/json-iterator/go/iter_skip_sloppy.go new file mode 100644 index 00000000..9303de41 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_skip_sloppy.go @@ -0,0 +1,163 @@ +//+build jsoniter_sloppy + +package jsoniter + +// sloppy but faster implementation, do not validate the input json + +func (iter *Iterator) skipNumber() { + for { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case ' ', '\n', '\r', '\t', ',', '}', ']': + iter.head = i + return + } + } + if !iter.loadMore() { + return + } + } +} + +func (iter *Iterator) skipArray() { + level := 1 + if !iter.incrementDepth() { + return + } + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '[': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case ']': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete array") + return + } + } +} + +func (iter *Iterator) skipObject() { + level := 1 + if !iter.incrementDepth() { + return + } + + for { + for i := iter.head; i < iter.tail; i++ { + switch iter.buf[i] { + case '"': // If inside string, skip it + iter.head = i + 1 + iter.skipString() + i = iter.head - 1 // it will be i++ soon + case '{': // If open symbol, increase level + level++ + if !iter.incrementDepth() { + return + } + case '}': // If close symbol, increase level + level-- + if !iter.decrementDepth() { + return + } + + // If we have returned to the original level, we're done + if level == 0 { + iter.head = i + 1 + return + } + } + } + if !iter.loadMore() { + iter.ReportError("skipObject", "incomplete object") + return + } + } +} + +func (iter *Iterator) skipString() { + for { + end, escaped := iter.findStringEnd() + if end == -1 { + if !iter.loadMore() { + iter.ReportError("skipString", "incomplete string") + return + } + if escaped { + iter.head = 1 // skip the first char as last char read is \ + } + } else { + iter.head = end + return + } + } +} + +// adapted from: https://github.com/buger/jsonparser/blob/master/parser.go +// Tries to find the end of string +// Support if string contains escaped quote symbols. +func (iter *Iterator) findStringEnd() (int, bool) { + escaped := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + if !escaped { + return i + 1, false + } + j := i - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return i + 1, true + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + } + } else if c == '\\' { + escaped = true + } + } + j := iter.tail - 1 + for { + if j < iter.head || iter.buf[j] != '\\' { + // even number of backslashes + // either end of buffer, or " found + return -1, false // do not end with \ + } + j-- + if j < iter.head || iter.buf[j] != '\\' { + // odd number of backslashes + // it is \" or \\\" + break + } + j-- + + } + return -1, true // end with \ +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_skip_strict.go b/terraform-server/vendor/github.com/json-iterator/go/iter_skip_strict.go new file mode 100644 index 00000000..6cf66d04 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_skip_strict.go @@ -0,0 +1,99 @@ +//+build !jsoniter_sloppy + +package jsoniter + +import ( + "fmt" + "io" +) + +func (iter *Iterator) skipNumber() { + if !iter.trySkipNumber() { + iter.unreadByte() + if iter.Error != nil && iter.Error != io.EOF { + return + } + iter.ReadFloat64() + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = nil + iter.ReadBigFloat() + } + } +} + +func (iter *Iterator) trySkipNumber() bool { + dotFound := false + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + case '.': + if dotFound { + iter.ReportError("validateNumber", `more than one dot found in number`) + return true // already failed + } + if i+1 == iter.tail { + return false + } + c = iter.buf[i+1] + switch c { + case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9': + default: + iter.ReportError("validateNumber", `missing digit after dot`) + return true // already failed + } + dotFound = true + default: + switch c { + case ',', ']', '}', ' ', '\t', '\n', '\r': + if iter.head == i { + return false // if - without following digits + } + iter.head = i + return true // must be valid + } + return false // may be invalid + } + } + return false +} + +func (iter *Iterator) skipString() { + if !iter.trySkipString() { + iter.unreadByte() + iter.ReadString() + } +} + +func (iter *Iterator) trySkipString() bool { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + iter.head = i + 1 + return true // valid + } else if c == '\\' { + return false + } else if c < ' ' { + iter.ReportError("trySkipString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return true // already failed + } + } + return false +} + +func (iter *Iterator) skipObject() { + iter.unreadByte() + iter.ReadObjectCB(func(iter *Iterator, field string) bool { + iter.Skip() + return true + }) +} + +func (iter *Iterator) skipArray() { + iter.unreadByte() + iter.ReadArrayCB(func(iter *Iterator) bool { + iter.Skip() + return true + }) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/iter_str.go b/terraform-server/vendor/github.com/json-iterator/go/iter_str.go new file mode 100644 index 00000000..adc487ea --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/iter_str.go @@ -0,0 +1,215 @@ +package jsoniter + +import ( + "fmt" + "unicode/utf16" +) + +// ReadString read string from iterator +func (iter *Iterator) ReadString() (ret string) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + c := iter.buf[i] + if c == '"' { + ret = string(iter.buf[iter.head:i]) + iter.head = i + 1 + return ret + } else if c == '\\' { + break + } else if c < ' ' { + iter.ReportError("ReadString", + fmt.Sprintf(`invalid control character found: %d`, c)) + return + } + } + return iter.readStringSlowPath() + } else if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return "" + } + iter.ReportError("ReadString", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readStringSlowPath() (ret string) { + var str []byte + var c byte + for iter.Error == nil { + c = iter.readByte() + if c == '"' { + return string(str) + } + if c == '\\' { + c = iter.readByte() + str = iter.readEscapedChar(c, str) + } else { + str = append(str, c) + } + } + iter.ReportError("readStringSlowPath", "unexpected end of input") + return +} + +func (iter *Iterator) readEscapedChar(c byte, str []byte) []byte { + switch c { + case 'u': + r := iter.readU4() + if utf16.IsSurrogate(r) { + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != '\\' { + iter.unreadByte() + str = appendRune(str, r) + return str + } + c = iter.readByte() + if iter.Error != nil { + return nil + } + if c != 'u' { + str = appendRune(str, r) + return iter.readEscapedChar(c, str) + } + r2 := iter.readU4() + if iter.Error != nil { + return nil + } + combined := utf16.DecodeRune(r, r2) + if combined == '\uFFFD' { + str = appendRune(str, r) + str = appendRune(str, r2) + } else { + str = appendRune(str, combined) + } + } else { + str = appendRune(str, r) + } + case '"': + str = append(str, '"') + case '\\': + str = append(str, '\\') + case '/': + str = append(str, '/') + case 'b': + str = append(str, '\b') + case 'f': + str = append(str, '\f') + case 'n': + str = append(str, '\n') + case 'r': + str = append(str, '\r') + case 't': + str = append(str, '\t') + default: + iter.ReportError("readEscapedChar", + `invalid escape char after \`) + return nil + } + return str +} + +// ReadStringAsSlice read string from iterator without copying into string form. +// The []byte can not be kept, as it will change after next iterator call. +func (iter *Iterator) ReadStringAsSlice() (ret []byte) { + c := iter.nextToken() + if c == '"' { + for i := iter.head; i < iter.tail; i++ { + // require ascii string and no escape + // for: field name, base64, number + if iter.buf[i] == '"' { + // fast path: reuse the underlying buffer + ret = iter.buf[iter.head:i] + iter.head = i + 1 + return ret + } + } + readLen := iter.tail - iter.head + copied := make([]byte, readLen, readLen*2) + copy(copied, iter.buf[iter.head:iter.tail]) + iter.head = iter.tail + for iter.Error == nil { + c := iter.readByte() + if c == '"' { + return copied + } + copied = append(copied, c) + } + return copied + } + iter.ReportError("ReadStringAsSlice", `expects " or n, but found `+string([]byte{c})) + return +} + +func (iter *Iterator) readU4() (ret rune) { + for i := 0; i < 4; i++ { + c := iter.readByte() + if iter.Error != nil { + return + } + if c >= '0' && c <= '9' { + ret = ret*16 + rune(c-'0') + } else if c >= 'a' && c <= 'f' { + ret = ret*16 + rune(c-'a'+10) + } else if c >= 'A' && c <= 'F' { + ret = ret*16 + rune(c-'A'+10) + } else { + iter.ReportError("readU4", "expects 0~9 or a~f, but found "+string([]byte{c})) + return + } + } + return ret +} + +const ( + t1 = 0x00 // 0000 0000 + tx = 0x80 // 1000 0000 + t2 = 0xC0 // 1100 0000 + t3 = 0xE0 // 1110 0000 + t4 = 0xF0 // 1111 0000 + t5 = 0xF8 // 1111 1000 + + maskx = 0x3F // 0011 1111 + mask2 = 0x1F // 0001 1111 + mask3 = 0x0F // 0000 1111 + mask4 = 0x07 // 0000 0111 + + rune1Max = 1<<7 - 1 + rune2Max = 1<<11 - 1 + rune3Max = 1<<16 - 1 + + surrogateMin = 0xD800 + surrogateMax = 0xDFFF + + maxRune = '\U0010FFFF' // Maximum valid Unicode code point. + runeError = '\uFFFD' // the "error" Rune or "Unicode replacement character" +) + +func appendRune(p []byte, r rune) []byte { + // Negative values are erroneous. Making it unsigned addresses the problem. + switch i := uint32(r); { + case i <= rune1Max: + p = append(p, byte(r)) + return p + case i <= rune2Max: + p = append(p, t2|byte(r>>6)) + p = append(p, tx|byte(r)&maskx) + return p + case i > maxRune, surrogateMin <= i && i <= surrogateMax: + r = runeError + fallthrough + case i <= rune3Max: + p = append(p, t3|byte(r>>12)) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + default: + p = append(p, t4|byte(r>>18)) + p = append(p, tx|byte(r>>12)&maskx) + p = append(p, tx|byte(r>>6)&maskx) + p = append(p, tx|byte(r)&maskx) + return p + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/jsoniter.go b/terraform-server/vendor/github.com/json-iterator/go/jsoniter.go new file mode 100644 index 00000000..c2934f91 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/jsoniter.go @@ -0,0 +1,18 @@ +// Package jsoniter implements encoding and decoding of JSON as defined in +// RFC 4627 and provides interfaces with identical syntax of standard lib encoding/json. +// Converting from encoding/json to jsoniter is no more than replacing the package with jsoniter +// and variable type declarations (if any). +// jsoniter interfaces gives 100% compatibility with code using standard lib. +// +// "JSON and Go" +// (https://golang.org/doc/articles/json_and_go.html) +// gives a description of how Marshal/Unmarshal operate +// between arbitrary or predefined json objects and bytes, +// and it applies to jsoniter.Marshal/Unmarshal as well. +// +// Besides, jsoniter.Iterator provides a different set of interfaces +// iterating given bytes/string/reader +// and yielding parsed elements one by one. +// This set of interfaces reads input as required and gives +// better performance. +package jsoniter diff --git a/terraform-server/vendor/github.com/json-iterator/go/pool.go b/terraform-server/vendor/github.com/json-iterator/go/pool.go new file mode 100644 index 00000000..e2389b56 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/pool.go @@ -0,0 +1,42 @@ +package jsoniter + +import ( + "io" +) + +// IteratorPool a thread safe pool of iterators with same configuration +type IteratorPool interface { + BorrowIterator(data []byte) *Iterator + ReturnIterator(iter *Iterator) +} + +// StreamPool a thread safe pool of streams with same configuration +type StreamPool interface { + BorrowStream(writer io.Writer) *Stream + ReturnStream(stream *Stream) +} + +func (cfg *frozenConfig) BorrowStream(writer io.Writer) *Stream { + stream := cfg.streamPool.Get().(*Stream) + stream.Reset(writer) + return stream +} + +func (cfg *frozenConfig) ReturnStream(stream *Stream) { + stream.out = nil + stream.Error = nil + stream.Attachment = nil + cfg.streamPool.Put(stream) +} + +func (cfg *frozenConfig) BorrowIterator(data []byte) *Iterator { + iter := cfg.iteratorPool.Get().(*Iterator) + iter.ResetBytes(data) + return iter +} + +func (cfg *frozenConfig) ReturnIterator(iter *Iterator) { + iter.Error = nil + iter.Attachment = nil + cfg.iteratorPool.Put(iter) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect.go b/terraform-server/vendor/github.com/json-iterator/go/reflect.go new file mode 100644 index 00000000..74974ba7 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect.go @@ -0,0 +1,337 @@ +package jsoniter + +import ( + "fmt" + "reflect" + "unsafe" + + "github.com/modern-go/reflect2" +) + +// ValDecoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValDecoder with json.Decoder. +// For json.Decoder's adapter, refer to jsoniter.AdapterDecoder(todo link). +// +// Reflection on type to create decoders, which is then cached +// Reflection on value is avoided as we can, as the reflect.Value itself will allocate, with following exceptions +// 1. create instance of new value, for example *int will need a int to be allocated +// 2. append to slice, if the existing cap is not enough, allocate will be done using Reflect.New +// 3. assignment to map, both key and value will be reflect.Value +// For a simple struct binding, it will be reflect.Value free and allocation free +type ValDecoder interface { + Decode(ptr unsafe.Pointer, iter *Iterator) +} + +// ValEncoder is an internal type registered to cache as needed. +// Don't confuse jsoniter.ValEncoder with json.Encoder. +// For json.Encoder's adapter, refer to jsoniter.AdapterEncoder(todo godoc link). +type ValEncoder interface { + IsEmpty(ptr unsafe.Pointer) bool + Encode(ptr unsafe.Pointer, stream *Stream) +} + +type checkIsEmpty interface { + IsEmpty(ptr unsafe.Pointer) bool +} + +type ctx struct { + *frozenConfig + prefix string + encoders map[reflect2.Type]ValEncoder + decoders map[reflect2.Type]ValDecoder +} + +func (b *ctx) caseSensitive() bool { + if b.frozenConfig == nil { + // default is case-insensitive + return false + } + return b.frozenConfig.caseSensitive +} + +func (b *ctx) append(prefix string) *ctx { + return &ctx{ + frozenConfig: b.frozenConfig, + prefix: b.prefix + " " + prefix, + encoders: b.encoders, + decoders: b.decoders, + } +} + +// ReadVal copy the underlying JSON into go interface, same as json.Unmarshal +func (iter *Iterator) ReadVal(obj interface{}) { + depth := iter.depth + cacheKey := reflect2.RTypeOf(obj) + decoder := iter.cfg.getDecoderFromCache(cacheKey) + if decoder == nil { + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + iter.ReportError("ReadVal", "can only unmarshal into pointer") + return + } + decoder = iter.cfg.DecoderOf(typ) + } + ptr := reflect2.PtrOf(obj) + if ptr == nil { + iter.ReportError("ReadVal", "can not read into nil pointer") + return + } + decoder.Decode(ptr, iter) + if iter.depth != depth { + iter.ReportError("ReadVal", "unexpected mismatched nesting") + return + } +} + +// WriteVal copy the go interface into underlying JSON, same as json.Marshal +func (stream *Stream) WriteVal(val interface{}) { + if nil == val { + stream.WriteNil() + return + } + cacheKey := reflect2.RTypeOf(val) + encoder := stream.cfg.getEncoderFromCache(cacheKey) + if encoder == nil { + typ := reflect2.TypeOf(val) + encoder = stream.cfg.EncoderOf(typ) + } + encoder.Encode(reflect2.PtrOf(val), stream) +} + +func (cfg *frozenConfig) DecoderOf(typ reflect2.Type) ValDecoder { + cacheKey := typ.RType() + decoder := cfg.getDecoderFromCache(cacheKey) + if decoder != nil { + return decoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + ptrType := typ.(*reflect2.UnsafePtrType) + decoder = decoderOfType(ctx, ptrType.Elem()) + cfg.addDecoderToCache(cacheKey, decoder) + return decoder +} + +func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfType(ctx, typ) + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + return decoder +} + +func createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoders[typ] + if decoder != nil { + return decoder + } + placeholder := &placeholderDecoder{} + ctx.decoders[typ] = placeholder + decoder = _createDecoderOfType(ctx, typ) + placeholder.decoder = decoder + return decoder +} + +func _createDecoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := createDecoderOfJsonRawMessage(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfJsonNumber(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfMarshaler(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfAny(ctx, typ) + if decoder != nil { + return decoder + } + decoder = createDecoderOfNative(ctx, typ) + if decoder != nil { + return decoder + } + switch typ.Kind() { + case reflect.Interface: + ifaceType, isIFace := typ.(*reflect2.UnsafeIFaceType) + if isIFace { + return &ifaceDecoder{valType: ifaceType} + } + return &efaceDecoder{} + case reflect.Struct: + return decoderOfStruct(ctx, typ) + case reflect.Array: + return decoderOfArray(ctx, typ) + case reflect.Slice: + return decoderOfSlice(ctx, typ) + case reflect.Map: + return decoderOfMap(ctx, typ) + case reflect.Ptr: + return decoderOfOptional(ctx, typ) + default: + return &lazyErrorDecoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +func (cfg *frozenConfig) EncoderOf(typ reflect2.Type) ValEncoder { + cacheKey := typ.RType() + encoder := cfg.getEncoderFromCache(cacheKey) + if encoder != nil { + return encoder + } + ctx := &ctx{ + frozenConfig: cfg, + prefix: "", + decoders: map[reflect2.Type]ValDecoder{}, + encoders: map[reflect2.Type]ValEncoder{}, + } + encoder = encoderOfType(ctx, typ) + if typ.LikePtr() { + encoder = &onePtrEncoder{encoder} + } + cfg.addEncoderToCache(cacheKey, encoder) + return encoder +} + +type onePtrEncoder struct { + encoder ValEncoder +} + +func (encoder *onePtrEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +func (encoder *onePtrEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfType(ctx, typ) + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + return encoder +} + +func createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoders[typ] + if encoder != nil { + return encoder + } + placeholder := &placeholderEncoder{} + ctx.encoders[typ] = placeholder + encoder = _createEncoderOfType(ctx, typ) + placeholder.encoder = encoder + return encoder +} +func _createEncoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := createEncoderOfJsonRawMessage(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfJsonNumber(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfMarshaler(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfAny(ctx, typ) + if encoder != nil { + return encoder + } + encoder = createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return encoderOfStruct(ctx, typ) + case reflect.Array: + return encoderOfArray(ctx, typ) + case reflect.Slice: + return encoderOfSlice(ctx, typ) + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return encoderOfOptional(ctx, typ) + default: + return &lazyErrorEncoder{err: fmt.Errorf("%s%s is unsupported type", ctx.prefix, typ.String())} + } +} + +type lazyErrorDecoder struct { + err error +} + +func (decoder *lazyErrorDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.WhatIsNext() != NilValue { + if iter.Error == nil { + iter.Error = decoder.err + } + } else { + iter.Skip() + } +} + +type lazyErrorEncoder struct { + err error +} + +func (encoder *lazyErrorEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if ptr == nil { + stream.WriteNil() + } else if stream.Error == nil { + stream.Error = encoder.err + } +} + +func (encoder *lazyErrorEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type placeholderDecoder struct { + decoder ValDecoder +} + +func (decoder *placeholderDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(ptr, iter) +} + +type placeholderEncoder struct { + encoder ValEncoder +} + +func (encoder *placeholderEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(ptr, stream) +} + +func (encoder *placeholderEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(ptr) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_array.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_array.go new file mode 100644 index 00000000..13a0b7b0 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_array.go @@ -0,0 +1,104 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfArray(ctx *ctx, typ reflect2.Type) ValDecoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + decoder := decoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayDecoder{arrayType, decoder} +} + +func encoderOfArray(ctx *ctx, typ reflect2.Type) ValEncoder { + arrayType := typ.(*reflect2.UnsafeArrayType) + if arrayType.Len() == 0 { + return emptyArrayEncoder{} + } + encoder := encoderOfType(ctx.append("[arrayElem]"), arrayType.Elem()) + return &arrayEncoder{arrayType, encoder} +} + +type emptyArrayEncoder struct{} + +func (encoder emptyArrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyArray() +} + +func (encoder emptyArrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return true +} + +type arrayEncoder struct { + arrayType *reflect2.UnsafeArrayType + elemEncoder ValEncoder +} + +func (encoder *arrayEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteArrayStart() + elemPtr := unsafe.Pointer(ptr) + encoder.elemEncoder.Encode(elemPtr, stream) + for i := 1; i < encoder.arrayType.Len(); i++ { + stream.WriteMore() + elemPtr = encoder.arrayType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.arrayType, stream.Error.Error()) + } +} + +func (encoder *arrayEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type arrayDecoder struct { + arrayType *reflect2.UnsafeArrayType + elemDecoder ValDecoder +} + +func (decoder *arrayDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.arrayType, iter.Error.Error()) + } +} + +func (decoder *arrayDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + arrayType := decoder.arrayType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + return + } + if c != '[' { + iter.ReportError("decode array", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + return + } + iter.unreadByte() + elemPtr := arrayType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + if length >= arrayType.Len() { + iter.Skip() + continue + } + idx := length + length += 1 + elemPtr = arrayType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode array", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_dynamic.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_dynamic.go new file mode 100644 index 00000000..8b6bc8b4 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_dynamic.go @@ -0,0 +1,70 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +type dynamicEncoder struct { + valType reflect2.Type +} + +func (encoder *dynamicEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + stream.WriteVal(obj) +} + +func (encoder *dynamicEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.valType.UnsafeIndirect(ptr) == nil +} + +type efaceDecoder struct { +} + +func (decoder *efaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + pObj := (*interface{})(ptr) + obj := *pObj + if obj == nil { + *pObj = iter.Read() + return + } + typ := reflect2.TypeOf(obj) + if typ.Kind() != reflect.Ptr { + *pObj = iter.Read() + return + } + ptrType := typ.(*reflect2.UnsafePtrType) + ptrElemType := ptrType.Elem() + if iter.WhatIsNext() == NilValue { + if ptrElemType.Kind() != reflect.Ptr { + iter.skipFourBytes('n', 'u', 'l', 'l') + *pObj = nil + return + } + } + if reflect2.IsNil(obj) { + obj := ptrElemType.New() + iter.ReadVal(obj) + *pObj = obj + return + } + iter.ReadVal(obj) +} + +type ifaceDecoder struct { + valType *reflect2.UnsafeIFaceType +} + +func (decoder *ifaceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + decoder.valType.UnsafeSet(ptr, decoder.valType.UnsafeNew()) + return + } + obj := decoder.valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + iter.ReportError("decode non empty interface", "can not unmarshal into nil") + return + } + iter.ReadVal(obj) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_extension.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_extension.go new file mode 100644 index 00000000..80320cd6 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_extension.go @@ -0,0 +1,483 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "reflect" + "sort" + "strings" + "unicode" + "unsafe" +) + +var typeDecoders = map[string]ValDecoder{} +var fieldDecoders = map[string]ValDecoder{} +var typeEncoders = map[string]ValEncoder{} +var fieldEncoders = map[string]ValEncoder{} +var extensions = []Extension{} + +// StructDescriptor describe how should we encode/decode the struct +type StructDescriptor struct { + Type reflect2.Type + Fields []*Binding +} + +// GetField get one field from the descriptor by its name. +// Can not use map here to keep field orders. +func (structDescriptor *StructDescriptor) GetField(fieldName string) *Binding { + for _, binding := range structDescriptor.Fields { + if binding.Field.Name() == fieldName { + return binding + } + } + return nil +} + +// Binding describe how should we encode/decode the struct field +type Binding struct { + levels []int + Field reflect2.StructField + FromNames []string + ToNames []string + Encoder ValEncoder + Decoder ValDecoder +} + +// Extension the one for all SPI. Customize encoding/decoding by specifying alternate encoder/decoder. +// Can also rename fields by UpdateStructDescriptor. +type Extension interface { + UpdateStructDescriptor(structDescriptor *StructDescriptor) + CreateMapKeyDecoder(typ reflect2.Type) ValDecoder + CreateMapKeyEncoder(typ reflect2.Type) ValEncoder + CreateDecoder(typ reflect2.Type) ValDecoder + CreateEncoder(typ reflect2.Type) ValEncoder + DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder + DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder +} + +// DummyExtension embed this type get dummy implementation for all methods of Extension +type DummyExtension struct { +} + +// UpdateStructDescriptor No-op +func (extension *DummyExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension *DummyExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension *DummyExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder No-op +func (extension *DummyExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder No-op +func (extension *DummyExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension *DummyExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension *DummyExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type EncoderExtension map[reflect2.Type]ValEncoder + +// UpdateStructDescriptor No-op +func (extension EncoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateDecoder No-op +func (extension EncoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateEncoder get encoder from map +func (extension EncoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return extension[typ] +} + +// CreateMapKeyDecoder No-op +func (extension EncoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension EncoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension EncoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension EncoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type DecoderExtension map[reflect2.Type]ValDecoder + +// UpdateStructDescriptor No-op +func (extension DecoderExtension) UpdateStructDescriptor(structDescriptor *StructDescriptor) { +} + +// CreateMapKeyDecoder No-op +func (extension DecoderExtension) CreateMapKeyDecoder(typ reflect2.Type) ValDecoder { + return nil +} + +// CreateMapKeyEncoder No-op +func (extension DecoderExtension) CreateMapKeyEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// CreateDecoder get decoder from map +func (extension DecoderExtension) CreateDecoder(typ reflect2.Type) ValDecoder { + return extension[typ] +} + +// CreateEncoder No-op +func (extension DecoderExtension) CreateEncoder(typ reflect2.Type) ValEncoder { + return nil +} + +// DecorateDecoder No-op +func (extension DecoderExtension) DecorateDecoder(typ reflect2.Type, decoder ValDecoder) ValDecoder { + return decoder +} + +// DecorateEncoder No-op +func (extension DecoderExtension) DecorateEncoder(typ reflect2.Type, encoder ValEncoder) ValEncoder { + return encoder +} + +type funcDecoder struct { + fun DecoderFunc +} + +func (decoder *funcDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.fun(ptr, iter) +} + +type funcEncoder struct { + fun EncoderFunc + isEmptyFunc func(ptr unsafe.Pointer) bool +} + +func (encoder *funcEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.fun(ptr, stream) +} + +func (encoder *funcEncoder) IsEmpty(ptr unsafe.Pointer) bool { + if encoder.isEmptyFunc == nil { + return false + } + return encoder.isEmptyFunc(ptr) +} + +// DecoderFunc the function form of TypeDecoder +type DecoderFunc func(ptr unsafe.Pointer, iter *Iterator) + +// EncoderFunc the function form of TypeEncoder +type EncoderFunc func(ptr unsafe.Pointer, stream *Stream) + +// RegisterTypeDecoderFunc register TypeDecoder for a type with function +func RegisterTypeDecoderFunc(typ string, fun DecoderFunc) { + typeDecoders[typ] = &funcDecoder{fun} +} + +// RegisterTypeDecoder register TypeDecoder for a typ +func RegisterTypeDecoder(typ string, decoder ValDecoder) { + typeDecoders[typ] = decoder +} + +// RegisterFieldDecoderFunc register TypeDecoder for a struct field with function +func RegisterFieldDecoderFunc(typ string, field string, fun DecoderFunc) { + RegisterFieldDecoder(typ, field, &funcDecoder{fun}) +} + +// RegisterFieldDecoder register TypeDecoder for a struct field +func RegisterFieldDecoder(typ string, field string, decoder ValDecoder) { + fieldDecoders[fmt.Sprintf("%s/%s", typ, field)] = decoder +} + +// RegisterTypeEncoderFunc register TypeEncoder for a type with encode/isEmpty function +func RegisterTypeEncoderFunc(typ string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + typeEncoders[typ] = &funcEncoder{fun, isEmptyFunc} +} + +// RegisterTypeEncoder register TypeEncoder for a type +func RegisterTypeEncoder(typ string, encoder ValEncoder) { + typeEncoders[typ] = encoder +} + +// RegisterFieldEncoderFunc register TypeEncoder for a struct field with encode/isEmpty function +func RegisterFieldEncoderFunc(typ string, field string, fun EncoderFunc, isEmptyFunc func(unsafe.Pointer) bool) { + RegisterFieldEncoder(typ, field, &funcEncoder{fun, isEmptyFunc}) +} + +// RegisterFieldEncoder register TypeEncoder for a struct field +func RegisterFieldEncoder(typ string, field string, encoder ValEncoder) { + fieldEncoders[fmt.Sprintf("%s/%s", typ, field)] = encoder +} + +// RegisterExtension register extension +func RegisterExtension(extension Extension) { + extensions = append(extensions, extension) +} + +func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := _getTypeDecoderFromExtension(ctx, typ) + if decoder != nil { + for _, extension := range extensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder) + for _, extension := range ctx.extraExtensions { + decoder = extension.DecorateDecoder(typ, decoder) + } + } + return decoder +} +func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder { + for _, extension := range extensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + decoder := ctx.decoderExtension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateDecoder(typ) + if decoder != nil { + return decoder + } + } + typeName := typ.String() + decoder = typeDecoders[typeName] + if decoder != nil { + return decoder + } + if typ.Kind() == reflect.Ptr { + ptrType := typ.(*reflect2.UnsafePtrType) + decoder := typeDecoders[ptrType.Elem().String()] + if decoder != nil { + return &OptionalDecoder{ptrType.Elem(), decoder} + } + } + return nil +} + +func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := _getTypeEncoderFromExtension(ctx, typ) + if encoder != nil { + for _, extension := range extensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder) + for _, extension := range ctx.extraExtensions { + encoder = extension.DecorateEncoder(typ, encoder) + } + } + return encoder +} + +func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder { + for _, extension := range extensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + encoder := ctx.encoderExtension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateEncoder(typ) + if encoder != nil { + return encoder + } + } + typeName := typ.String() + encoder = typeEncoders[typeName] + if encoder != nil { + return encoder + } + if typ.Kind() == reflect.Ptr { + typePtr := typ.(*reflect2.UnsafePtrType) + encoder := typeEncoders[typePtr.Elem().String()] + if encoder != nil { + return &OptionalEncoder{encoder} + } + } + return nil +} + +func describeStruct(ctx *ctx, typ reflect2.Type) *StructDescriptor { + structType := typ.(*reflect2.UnsafeStructType) + embeddedBindings := []*Binding{} + bindings := []*Binding{} + for i := 0; i < structType.NumField(); i++ { + field := structType.Field(i) + tag, hastag := field.Tag().Lookup(ctx.getTagKey()) + if ctx.onlyTaggedField && !hastag && !field.Anonymous() { + continue + } + if tag == "-" || field.Name() == "_" { + continue + } + tagParts := strings.Split(tag, ",") + if field.Anonymous() && (tag == "" || tagParts[0] == "") { + if field.Type().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, field.Type()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } else if field.Type().Kind() == reflect.Ptr { + ptrType := field.Type().(*reflect2.UnsafePtrType) + if ptrType.Elem().Kind() == reflect.Struct { + structDescriptor := describeStruct(ctx, ptrType.Elem()) + for _, binding := range structDescriptor.Fields { + binding.levels = append([]int{i}, binding.levels...) + omitempty := binding.Encoder.(*structFieldEncoder).omitempty + binding.Encoder = &dereferenceEncoder{binding.Encoder} + binding.Encoder = &structFieldEncoder{field, binding.Encoder, omitempty} + binding.Decoder = &dereferenceDecoder{ptrType.Elem(), binding.Decoder} + binding.Decoder = &structFieldDecoder{field, binding.Decoder} + embeddedBindings = append(embeddedBindings, binding) + } + continue + } + } + } + fieldNames := calcFieldNames(field.Name(), tagParts[0], tag) + fieldCacheKey := fmt.Sprintf("%s/%s", typ.String(), field.Name()) + decoder := fieldDecoders[fieldCacheKey] + if decoder == nil { + decoder = decoderOfType(ctx.append(field.Name()), field.Type()) + } + encoder := fieldEncoders[fieldCacheKey] + if encoder == nil { + encoder = encoderOfType(ctx.append(field.Name()), field.Type()) + } + binding := &Binding{ + Field: field, + FromNames: fieldNames, + ToNames: fieldNames, + Decoder: decoder, + Encoder: encoder, + } + binding.levels = []int{i} + bindings = append(bindings, binding) + } + return createStructDescriptor(ctx, typ, bindings, embeddedBindings) +} +func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, embeddedBindings []*Binding) *StructDescriptor { + structDescriptor := &StructDescriptor{ + Type: typ, + Fields: bindings, + } + for _, extension := range extensions { + extension.UpdateStructDescriptor(structDescriptor) + } + ctx.encoderExtension.UpdateStructDescriptor(structDescriptor) + ctx.decoderExtension.UpdateStructDescriptor(structDescriptor) + for _, extension := range ctx.extraExtensions { + extension.UpdateStructDescriptor(structDescriptor) + } + processTags(structDescriptor, ctx.frozenConfig) + // merge normal & embedded bindings & sort with original order + allBindings := sortableBindings(append(embeddedBindings, structDescriptor.Fields...)) + sort.Sort(allBindings) + structDescriptor.Fields = allBindings + return structDescriptor +} + +type sortableBindings []*Binding + +func (bindings sortableBindings) Len() int { + return len(bindings) +} + +func (bindings sortableBindings) Less(i, j int) bool { + left := bindings[i].levels + right := bindings[j].levels + k := 0 + for { + if left[k] < right[k] { + return true + } else if left[k] > right[k] { + return false + } + k++ + } +} + +func (bindings sortableBindings) Swap(i, j int) { + bindings[i], bindings[j] = bindings[j], bindings[i] +} + +func processTags(structDescriptor *StructDescriptor, cfg *frozenConfig) { + for _, binding := range structDescriptor.Fields { + shouldOmitEmpty := false + tagParts := strings.Split(binding.Field.Tag().Get(cfg.getTagKey()), ",") + for _, tagPart := range tagParts[1:] { + if tagPart == "omitempty" { + shouldOmitEmpty = true + } else if tagPart == "string" { + if binding.Field.Type().Kind() == reflect.String { + binding.Decoder = &stringModeStringDecoder{binding.Decoder, cfg} + binding.Encoder = &stringModeStringEncoder{binding.Encoder, cfg} + } else { + binding.Decoder = &stringModeNumberDecoder{binding.Decoder} + binding.Encoder = &stringModeNumberEncoder{binding.Encoder} + } + } + } + binding.Decoder = &structFieldDecoder{binding.Field, binding.Decoder} + binding.Encoder = &structFieldEncoder{binding.Field, binding.Encoder, shouldOmitEmpty} + } +} + +func calcFieldNames(originalFieldName string, tagProvidedFieldName string, wholeTag string) []string { + // ignore? + if wholeTag == "-" { + return []string{} + } + // rename? + var fieldNames []string + if tagProvidedFieldName == "" { + fieldNames = []string{originalFieldName} + } else { + fieldNames = []string{tagProvidedFieldName} + } + // private? + isNotExported := unicode.IsLower(rune(originalFieldName[0])) + if isNotExported { + fieldNames = []string{} + } + return fieldNames +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_json_number.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_json_number.go new file mode 100644 index 00000000..98d45c1e --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_json_number.go @@ -0,0 +1,112 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "strconv" + "unsafe" +) + +type Number string + +// String returns the literal text of the number. +func (n Number) String() string { return string(n) } + +// Float64 returns the number as a float64. +func (n Number) Float64() (float64, error) { + return strconv.ParseFloat(string(n), 64) +} + +// Int64 returns the number as an int64. +func (n Number) Int64() (int64, error) { + return strconv.ParseInt(string(n), 10, 64) +} + +func CastJsonNumber(val interface{}) (string, bool) { + switch typedVal := val.(type) { + case json.Number: + return string(typedVal), true + case Number: + return string(typedVal), true + } + return "", false +} + +var jsonNumberType = reflect2.TypeOfPtr((*json.Number)(nil)).Elem() +var jsoniterNumberType = reflect2.TypeOfPtr((*Number)(nil)).Elem() + +func createDecoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +func createEncoderOfJsonNumber(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.AssignableTo(jsonNumberType) { + return &jsonNumberCodec{} + } + if typ.AssignableTo(jsoniterNumberType) { + return &jsoniterNumberCodec{} + } + return nil +} + +type jsonNumberCodec struct { +} + +func (codec *jsonNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*json.Number)(ptr)) = json.Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*json.Number)(ptr)) = "" + default: + *((*json.Number)(ptr)) = json.Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsonNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*json.Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsonNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.Number)(ptr))) == 0 +} + +type jsoniterNumberCodec struct { +} + +func (codec *jsoniterNumberCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + switch iter.WhatIsNext() { + case StringValue: + *((*Number)(ptr)) = Number(iter.ReadString()) + case NilValue: + iter.skipFourBytes('n', 'u', 'l', 'l') + *((*Number)(ptr)) = "" + default: + *((*Number)(ptr)) = Number([]byte(iter.readNumberAsString())) + } +} + +func (codec *jsoniterNumberCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + number := *((*Number)(ptr)) + if len(number) == 0 { + stream.writeByte('0') + } else { + stream.WriteRaw(string(number)) + } +} + +func (codec *jsoniterNumberCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*Number)(ptr))) == 0 +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_json_raw_message.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_json_raw_message.go new file mode 100644 index 00000000..f2619936 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_json_raw_message.go @@ -0,0 +1,60 @@ +package jsoniter + +import ( + "encoding/json" + "github.com/modern-go/reflect2" + "unsafe" +) + +var jsonRawMessageType = reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem() +var jsoniterRawMessageType = reflect2.TypeOfPtr((*RawMessage)(nil)).Elem() + +func createEncoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +func createDecoderOfJsonRawMessage(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ == jsonRawMessageType { + return &jsonRawMessageCodec{} + } + if typ == jsoniterRawMessageType { + return &jsoniterRawMessageCodec{} + } + return nil +} + +type jsonRawMessageCodec struct { +} + +func (codec *jsonRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*json.RawMessage)(ptr)) = json.RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsonRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*json.RawMessage)(ptr)))) +} + +func (codec *jsonRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*json.RawMessage)(ptr))) == 0 +} + +type jsoniterRawMessageCodec struct { +} + +func (codec *jsoniterRawMessageCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*RawMessage)(ptr)) = RawMessage(iter.SkipAndReturnBytes()) +} + +func (codec *jsoniterRawMessageCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteRaw(string(*((*RawMessage)(ptr)))) +} + +func (codec *jsoniterRawMessageCodec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*RawMessage)(ptr))) == 0 +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_map.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_map.go new file mode 100644 index 00000000..9e2b623f --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_map.go @@ -0,0 +1,346 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "sort" + "unsafe" +) + +func decoderOfMap(ctx *ctx, typ reflect2.Type) ValDecoder { + mapType := typ.(*reflect2.UnsafeMapType) + keyDecoder := decoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()) + elemDecoder := decoderOfType(ctx.append("[mapElem]"), mapType.Elem()) + return &mapDecoder{ + mapType: mapType, + keyType: mapType.Key(), + elemType: mapType.Elem(), + keyDecoder: keyDecoder, + elemDecoder: elemDecoder, + } +} + +func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder { + mapType := typ.(*reflect2.UnsafeMapType) + if ctx.sortMapKeys { + return &sortKeysMapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } + } + return &mapEncoder{ + mapType: mapType, + keyEncoder: encoderOfMapKey(ctx.append("[mapKey]"), mapType.Key()), + elemEncoder: encoderOfType(ctx.append("[mapElem]"), mapType.Elem()), + } +} + +func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder { + decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + for _, extension := range ctx.extraExtensions { + decoder := extension.CreateMapKeyDecoder(typ) + if decoder != nil { + return decoder + } + } + switch typ.Kind() { + case reflect.String: + return decoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyDecoder{decoderOfType(ctx, typ)} + default: + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(unmarshalerType) { + return &unmarshalerDecoder{ + valType: typ, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ + valType: ptrType, + }, + } + } + if typ.Implements(textUnmarshalerType) { + return &textUnmarshalerDecoder{ + valType: typ, + } + } + return &lazyErrorDecoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder { + encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + for _, extension := range ctx.extraExtensions { + encoder := extension.CreateMapKeyEncoder(typ) + if encoder != nil { + return encoder + } + } + switch typ.Kind() { + case reflect.String: + return encoderOfType(ctx, reflect2.DefaultTypeOfKind(reflect.String)) + case reflect.Bool, + reflect.Uint8, reflect.Int8, + reflect.Uint16, reflect.Int16, + reflect.Uint32, reflect.Int32, + reflect.Uint64, reflect.Int64, + reflect.Uint, reflect.Int, + reflect.Float32, reflect.Float64, + reflect.Uintptr: + typ = reflect2.DefaultTypeOfKind(typ.Kind()) + return &numericMapKeyEncoder{encoderOfType(ctx, typ)} + default: + if typ == textMarshalerType { + return &directTextMarshalerEncoder{ + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Implements(textMarshalerType) { + return &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + } + if typ.Kind() == reflect.Interface { + return &dynamicMapKeyEncoder{ctx, typ} + } + return &lazyErrorEncoder{err: fmt.Errorf("unsupported map key type: %v", typ)} + } +} + +type mapDecoder struct { + mapType *reflect2.UnsafeMapType + keyType reflect2.Type + elemType reflect2.Type + keyDecoder ValDecoder + elemDecoder ValDecoder +} + +func (decoder *mapDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + mapType := decoder.mapType + c := iter.nextToken() + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + *(*unsafe.Pointer)(ptr) = nil + mapType.UnsafeSet(ptr, mapType.UnsafeNew()) + return + } + if mapType.UnsafeIsNil(ptr) { + mapType.UnsafeSet(ptr, mapType.UnsafeMakeMap(0)) + } + if c != '{' { + iter.ReportError("ReadMapCB", `expect { or n, but found `+string([]byte{c})) + return + } + c = iter.nextToken() + if c == '}' { + return + } + if c != '"' { + iter.ReportError("ReadMapCB", `expect " after }, but found `+string([]byte{c})) + return + } + iter.unreadByte() + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + key := decoder.keyType.UnsafeNew() + decoder.keyDecoder.Decode(key, iter) + c = iter.nextToken() + if c != ':' { + iter.ReportError("ReadMapCB", "expect : after object field, but found "+string([]byte{c})) + return + } + elem := decoder.elemType.UnsafeNew() + decoder.elemDecoder.Decode(elem, iter) + decoder.mapType.UnsafeSetIndex(ptr, key, elem) + } + if c != '}' { + iter.ReportError("ReadMapCB", `expect }, but found `+string([]byte{c})) + } +} + +type numericMapKeyDecoder struct { + decoder ValDecoder +} + +func (decoder *numericMapKeyDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } + decoder.decoder.Decode(ptr, iter) + c = iter.nextToken() + if c != '"' { + iter.ReportError("ReadMapCB", `expect ", but found `+string([]byte{c})) + return + } +} + +type numericMapKeyEncoder struct { + encoder ValEncoder +} + +func (encoder *numericMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.encoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *numericMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type dynamicMapKeyEncoder struct { + ctx *ctx + valType reflect2.Type +} + +func (encoder *dynamicMapKeyEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).Encode(reflect2.PtrOf(obj), stream) +} + +func (encoder *dynamicMapKeyEncoder) IsEmpty(ptr unsafe.Pointer) bool { + obj := encoder.valType.UnsafeIndirect(ptr) + return encoderOfMapKey(encoder.ctx, reflect2.TypeOf(obj)).IsEmpty(reflect2.PtrOf(obj)) +} + +type mapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *mapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + iter := encoder.mapType.UnsafeIterate(ptr) + for i := 0; iter.HasNext(); i++ { + if i != 0 { + stream.WriteMore() + } + key, elem := iter.UnsafeNext() + encoder.keyEncoder.Encode(key, stream) + if stream.indention > 0 { + stream.writeTwoBytes(byte(':'), byte(' ')) + } else { + stream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, stream) + } + stream.WriteObjectEnd() +} + +func (encoder *mapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type sortKeysMapEncoder struct { + mapType *reflect2.UnsafeMapType + keyEncoder ValEncoder + elemEncoder ValEncoder +} + +func (encoder *sortKeysMapEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *(*unsafe.Pointer)(ptr) == nil { + stream.WriteNil() + return + } + stream.WriteObjectStart() + mapIter := encoder.mapType.UnsafeIterate(ptr) + subStream := stream.cfg.BorrowStream(nil) + subStream.Attachment = stream.Attachment + subIter := stream.cfg.BorrowIterator(nil) + keyValues := encodedKeyValues{} + for mapIter.HasNext() { + key, elem := mapIter.UnsafeNext() + subStreamIndex := subStream.Buffered() + encoder.keyEncoder.Encode(key, subStream) + if subStream.Error != nil && subStream.Error != io.EOF && stream.Error == nil { + stream.Error = subStream.Error + } + encodedKey := subStream.Buffer()[subStreamIndex:] + subIter.ResetBytes(encodedKey) + decodedKey := subIter.ReadString() + if stream.indention > 0 { + subStream.writeTwoBytes(byte(':'), byte(' ')) + } else { + subStream.writeByte(':') + } + encoder.elemEncoder.Encode(elem, subStream) + keyValues = append(keyValues, encodedKV{ + key: decodedKey, + keyValue: subStream.Buffer()[subStreamIndex:], + }) + } + sort.Sort(keyValues) + for i, keyValue := range keyValues { + if i != 0 { + stream.WriteMore() + } + stream.Write(keyValue.keyValue) + } + if subStream.Error != nil && stream.Error == nil { + stream.Error = subStream.Error + } + stream.WriteObjectEnd() + stream.cfg.ReturnStream(subStream) + stream.cfg.ReturnIterator(subIter) +} + +func (encoder *sortKeysMapEncoder) IsEmpty(ptr unsafe.Pointer) bool { + iter := encoder.mapType.UnsafeIterate(ptr) + return !iter.HasNext() +} + +type encodedKeyValues []encodedKV + +type encodedKV struct { + key string + keyValue []byte +} + +func (sv encodedKeyValues) Len() int { return len(sv) } +func (sv encodedKeyValues) Swap(i, j int) { sv[i], sv[j] = sv[j], sv[i] } +func (sv encodedKeyValues) Less(i, j int) bool { return sv[i].key < sv[j].key } diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_marshaler.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_marshaler.go new file mode 100644 index 00000000..3e21f375 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_marshaler.go @@ -0,0 +1,225 @@ +package jsoniter + +import ( + "encoding" + "encoding/json" + "unsafe" + + "github.com/modern-go/reflect2" +) + +var marshalerType = reflect2.TypeOfPtr((*json.Marshaler)(nil)).Elem() +var unmarshalerType = reflect2.TypeOfPtr((*json.Unmarshaler)(nil)).Elem() +var textMarshalerType = reflect2.TypeOfPtr((*encoding.TextMarshaler)(nil)).Elem() +var textUnmarshalerType = reflect2.TypeOfPtr((*encoding.TextUnmarshaler)(nil)).Elem() + +func createDecoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := reflect2.PtrTo(typ) + if ptrType.Implements(unmarshalerType) { + return &referenceDecoder{ + &unmarshalerDecoder{ptrType}, + } + } + if ptrType.Implements(textUnmarshalerType) { + return &referenceDecoder{ + &textUnmarshalerDecoder{ptrType}, + } + } + return nil +} + +func createEncoderOfMarshaler(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ == marshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + } + return encoder + } + if typ.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &marshalerEncoder{ + valType: typ, + checkIsEmpty: checkIsEmpty, + } + return encoder + } + ptrType := reflect2.PtrTo(typ) + if ctx.prefix != "" && ptrType.Implements(marshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &marshalerEncoder{ + valType: ptrType, + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + if typ == textMarshalerType { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &directTextMarshalerEncoder{ + checkIsEmpty: checkIsEmpty, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + } + return encoder + } + if typ.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, typ) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: typ, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return encoder + } + // if prefix is empty, the type is the root type + if ctx.prefix != "" && ptrType.Implements(textMarshalerType) { + checkIsEmpty := createCheckIsEmpty(ctx, ptrType) + var encoder ValEncoder = &textMarshalerEncoder{ + valType: ptrType, + stringEncoder: ctx.EncoderOf(reflect2.TypeOf("")), + checkIsEmpty: checkIsEmpty, + } + return &referenceEncoder{encoder} + } + return nil +} + +type marshalerEncoder struct { + checkIsEmpty checkIsEmpty + valType reflect2.Type +} + +func (encoder *marshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := obj.(json.Marshaler) + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + // html escape was already done by jsoniter + // but the extra '\n' should be trimed + l := len(bytes) + if l > 0 && bytes[l-1] == '\n' { + bytes = bytes[:l-1] + } + stream.Write(bytes) + } +} + +func (encoder *marshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directMarshalerEncoder struct { + checkIsEmpty checkIsEmpty +} + +func (encoder *directMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*json.Marshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalJSON() + if err != nil { + stream.Error = err + } else { + stream.Write(bytes) + } +} + +func (encoder *directMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type textMarshalerEncoder struct { + valType reflect2.Type + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *textMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + obj := encoder.valType.UnsafeIndirect(ptr) + if encoder.valType.IsNullable() && reflect2.IsNil(obj) { + stream.WriteNil() + return + } + marshaler := (obj).(encoding.TextMarshaler) + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *textMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type directTextMarshalerEncoder struct { + stringEncoder ValEncoder + checkIsEmpty checkIsEmpty +} + +func (encoder *directTextMarshalerEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + marshaler := *(*encoding.TextMarshaler)(ptr) + if marshaler == nil { + stream.WriteNil() + return + } + bytes, err := marshaler.MarshalText() + if err != nil { + stream.Error = err + } else { + str := string(bytes) + encoder.stringEncoder.Encode(unsafe.Pointer(&str), stream) + } +} + +func (encoder *directTextMarshalerEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.checkIsEmpty.IsEmpty(ptr) +} + +type unmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *unmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + unmarshaler := obj.(json.Unmarshaler) + iter.nextToken() + iter.unreadByte() // skip spaces + bytes := iter.SkipAndReturnBytes() + err := unmarshaler.UnmarshalJSON(bytes) + if err != nil { + iter.ReportError("unmarshalerDecoder", err.Error()) + } +} + +type textUnmarshalerDecoder struct { + valType reflect2.Type +} + +func (decoder *textUnmarshalerDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valType := decoder.valType + obj := valType.UnsafeIndirect(ptr) + if reflect2.IsNil(obj) { + ptrType := valType.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elem := elemType.UnsafeNew() + ptrType.UnsafeSet(ptr, unsafe.Pointer(&elem)) + obj = valType.UnsafeIndirect(ptr) + } + unmarshaler := (obj).(encoding.TextUnmarshaler) + str := iter.ReadString() + err := unmarshaler.UnmarshalText([]byte(str)) + if err != nil { + iter.ReportError("textUnmarshalerDecoder", err.Error()) + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_native.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_native.go new file mode 100644 index 00000000..f88722d1 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_native.go @@ -0,0 +1,453 @@ +package jsoniter + +import ( + "encoding/base64" + "reflect" + "strconv" + "unsafe" + + "github.com/modern-go/reflect2" +) + +const ptrSize = 32 << uintptr(^uintptr(0)>>63) + +func createEncoderOfNative(ctx *ctx, typ reflect2.Type) ValEncoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + kind := typ.Kind() + switch kind { + case reflect.String: + if typeName != "string" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return encoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +func createDecoderOfNative(ctx *ctx, typ reflect2.Type) ValDecoder { + if typ.Kind() == reflect.Slice && typ.(reflect2.SliceType).Elem().Kind() == reflect.Uint8 { + sliceDecoder := decoderOfSlice(ctx, typ) + return &base64Codec{sliceDecoder: sliceDecoder} + } + typeName := typ.String() + switch typ.Kind() { + case reflect.String: + if typeName != "string" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*string)(nil)).Elem()) + } + return &stringCodec{} + case reflect.Int: + if typeName != "int" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &int32Codec{} + } + return &int64Codec{} + case reflect.Int8: + if typeName != "int8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int8)(nil)).Elem()) + } + return &int8Codec{} + case reflect.Int16: + if typeName != "int16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int16)(nil)).Elem()) + } + return &int16Codec{} + case reflect.Int32: + if typeName != "int32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int32)(nil)).Elem()) + } + return &int32Codec{} + case reflect.Int64: + if typeName != "int64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*int64)(nil)).Elem()) + } + return &int64Codec{} + case reflect.Uint: + if typeName != "uint" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint)(nil)).Elem()) + } + if strconv.IntSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint8: + if typeName != "uint8" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint8)(nil)).Elem()) + } + return &uint8Codec{} + case reflect.Uint16: + if typeName != "uint16" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint16)(nil)).Elem()) + } + return &uint16Codec{} + case reflect.Uint32: + if typeName != "uint32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint32)(nil)).Elem()) + } + return &uint32Codec{} + case reflect.Uintptr: + if typeName != "uintptr" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uintptr)(nil)).Elem()) + } + if ptrSize == 32 { + return &uint32Codec{} + } + return &uint64Codec{} + case reflect.Uint64: + if typeName != "uint64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*uint64)(nil)).Elem()) + } + return &uint64Codec{} + case reflect.Float32: + if typeName != "float32" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float32)(nil)).Elem()) + } + return &float32Codec{} + case reflect.Float64: + if typeName != "float64" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*float64)(nil)).Elem()) + } + return &float64Codec{} + case reflect.Bool: + if typeName != "bool" { + return decoderOfType(ctx, reflect2.TypeOfPtr((*bool)(nil)).Elem()) + } + return &boolCodec{} + } + return nil +} + +type stringCodec struct { +} + +func (codec *stringCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + *((*string)(ptr)) = iter.ReadString() +} + +func (codec *stringCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + str := *((*string)(ptr)) + stream.WriteString(str) +} + +func (codec *stringCodec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*string)(ptr)) == "" +} + +type int8Codec struct { +} + +func (codec *int8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int8)(ptr)) = iter.ReadInt8() + } +} + +func (codec *int8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt8(*((*int8)(ptr))) +} + +func (codec *int8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int8)(ptr)) == 0 +} + +type int16Codec struct { +} + +func (codec *int16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int16)(ptr)) = iter.ReadInt16() + } +} + +func (codec *int16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt16(*((*int16)(ptr))) +} + +func (codec *int16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int16)(ptr)) == 0 +} + +type int32Codec struct { +} + +func (codec *int32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int32)(ptr)) = iter.ReadInt32() + } +} + +func (codec *int32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt32(*((*int32)(ptr))) +} + +func (codec *int32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int32)(ptr)) == 0 +} + +type int64Codec struct { +} + +func (codec *int64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*int64)(ptr)) = iter.ReadInt64() + } +} + +func (codec *int64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteInt64(*((*int64)(ptr))) +} + +func (codec *int64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*int64)(ptr)) == 0 +} + +type uint8Codec struct { +} + +func (codec *uint8Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint8)(ptr)) = iter.ReadUint8() + } +} + +func (codec *uint8Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint8(*((*uint8)(ptr))) +} + +func (codec *uint8Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint8)(ptr)) == 0 +} + +type uint16Codec struct { +} + +func (codec *uint16Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint16)(ptr)) = iter.ReadUint16() + } +} + +func (codec *uint16Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint16(*((*uint16)(ptr))) +} + +func (codec *uint16Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint16)(ptr)) == 0 +} + +type uint32Codec struct { +} + +func (codec *uint32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint32)(ptr)) = iter.ReadUint32() + } +} + +func (codec *uint32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint32(*((*uint32)(ptr))) +} + +func (codec *uint32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint32)(ptr)) == 0 +} + +type uint64Codec struct { +} + +func (codec *uint64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*uint64)(ptr)) = iter.ReadUint64() + } +} + +func (codec *uint64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteUint64(*((*uint64)(ptr))) +} + +func (codec *uint64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*uint64)(ptr)) == 0 +} + +type float32Codec struct { +} + +func (codec *float32Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float32)(ptr)) = iter.ReadFloat32() + } +} + +func (codec *float32Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat32(*((*float32)(ptr))) +} + +func (codec *float32Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float32)(ptr)) == 0 +} + +type float64Codec struct { +} + +func (codec *float64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*float64)(ptr)) = iter.ReadFloat64() + } +} + +func (codec *float64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteFloat64(*((*float64)(ptr))) +} + +func (codec *float64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return *((*float64)(ptr)) == 0 +} + +type boolCodec struct { +} + +func (codec *boolCodec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.ReadNil() { + *((*bool)(ptr)) = iter.ReadBool() + } +} + +func (codec *boolCodec) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteBool(*((*bool)(ptr))) +} + +func (codec *boolCodec) IsEmpty(ptr unsafe.Pointer) bool { + return !(*((*bool)(ptr))) +} + +type base64Codec struct { + sliceType *reflect2.UnsafeSliceType + sliceDecoder ValDecoder +} + +func (codec *base64Codec) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + codec.sliceType.UnsafeSetNil(ptr) + return + } + switch iter.WhatIsNext() { + case StringValue: + src := iter.ReadString() + dst, err := base64.StdEncoding.DecodeString(src) + if err != nil { + iter.ReportError("decode base64", err.Error()) + } else { + codec.sliceType.UnsafeSet(ptr, unsafe.Pointer(&dst)) + } + case ArrayValue: + codec.sliceDecoder.Decode(ptr, iter) + default: + iter.ReportError("base64Codec", "invalid input") + } +} + +func (codec *base64Codec) Encode(ptr unsafe.Pointer, stream *Stream) { + if codec.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + src := *((*[]byte)(ptr)) + encoding := base64.StdEncoding + stream.writeByte('"') + if len(src) != 0 { + size := encoding.EncodedLen(len(src)) + buf := make([]byte, size) + encoding.Encode(buf, src) + stream.buf = append(stream.buf, buf...) + } + stream.writeByte('"') +} + +func (codec *base64Codec) IsEmpty(ptr unsafe.Pointer) bool { + return len(*((*[]byte)(ptr))) == 0 +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_optional.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_optional.go new file mode 100644 index 00000000..43ec71d6 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_optional.go @@ -0,0 +1,133 @@ +package jsoniter + +import ( + "github.com/modern-go/reflect2" + "reflect" + "unsafe" +) + +func decoderOfOptional(ctx *ctx, typ reflect2.Type) ValDecoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + decoder := decoderOfType(ctx, elemType) + if ctx.prefix == "" && elemType.Kind() == reflect.Ptr { + return &dereferenceDecoder{elemType, decoder} + } + return &OptionalDecoder{elemType, decoder} +} + +func encoderOfOptional(ctx *ctx, typ reflect2.Type) ValEncoder { + ptrType := typ.(*reflect2.UnsafePtrType) + elemType := ptrType.Elem() + elemEncoder := encoderOfType(ctx, elemType) + encoder := &OptionalEncoder{elemEncoder} + return encoder +} + +type OptionalDecoder struct { + ValueType reflect2.Type + ValueDecoder ValDecoder +} + +func (decoder *OptionalDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if iter.ReadNil() { + *((*unsafe.Pointer)(ptr)) = nil + } else { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.ValueType.UnsafeNew() + decoder.ValueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.ValueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } + } +} + +type dereferenceDecoder struct { + // only to deference a pointer + valueType reflect2.Type + valueDecoder ValDecoder +} + +func (decoder *dereferenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if *((*unsafe.Pointer)(ptr)) == nil { + //pointer to null, we have to allocate memory to hold the value + newPtr := decoder.valueType.UnsafeNew() + decoder.valueDecoder.Decode(newPtr, iter) + *((*unsafe.Pointer)(ptr)) = newPtr + } else { + //reuse existing instance + decoder.valueDecoder.Decode(*((*unsafe.Pointer)(ptr)), iter) + } +} + +type OptionalEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *OptionalEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *OptionalEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return *((*unsafe.Pointer)(ptr)) == nil +} + +type dereferenceEncoder struct { + ValueEncoder ValEncoder +} + +func (encoder *dereferenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if *((*unsafe.Pointer)(ptr)) == nil { + stream.WriteNil() + } else { + encoder.ValueEncoder.Encode(*((*unsafe.Pointer)(ptr)), stream) + } +} + +func (encoder *dereferenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + dePtr := *((*unsafe.Pointer)(ptr)) + if dePtr == nil { + return true + } + return encoder.ValueEncoder.IsEmpty(dePtr) +} + +func (encoder *dereferenceEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + deReferenced := *((*unsafe.Pointer)(ptr)) + if deReferenced == nil { + return true + } + isEmbeddedPtrNil, converted := encoder.ValueEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := unsafe.Pointer(deReferenced) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type referenceEncoder struct { + encoder ValEncoder +} + +func (encoder *referenceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + encoder.encoder.Encode(unsafe.Pointer(&ptr), stream) +} + +func (encoder *referenceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.encoder.IsEmpty(unsafe.Pointer(&ptr)) +} + +type referenceDecoder struct { + decoder ValDecoder +} + +func (decoder *referenceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.decoder.Decode(unsafe.Pointer(&ptr), iter) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_slice.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_slice.go new file mode 100644 index 00000000..9441d79d --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_slice.go @@ -0,0 +1,99 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "unsafe" +) + +func decoderOfSlice(ctx *ctx, typ reflect2.Type) ValDecoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + decoder := decoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceDecoder{sliceType, decoder} +} + +func encoderOfSlice(ctx *ctx, typ reflect2.Type) ValEncoder { + sliceType := typ.(*reflect2.UnsafeSliceType) + encoder := encoderOfType(ctx.append("[sliceElem]"), sliceType.Elem()) + return &sliceEncoder{sliceType, encoder} +} + +type sliceEncoder struct { + sliceType *reflect2.UnsafeSliceType + elemEncoder ValEncoder +} + +func (encoder *sliceEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + if encoder.sliceType.UnsafeIsNil(ptr) { + stream.WriteNil() + return + } + length := encoder.sliceType.UnsafeLengthOf(ptr) + if length == 0 { + stream.WriteEmptyArray() + return + } + stream.WriteArrayStart() + encoder.elemEncoder.Encode(encoder.sliceType.UnsafeGetIndex(ptr, 0), stream) + for i := 1; i < length; i++ { + stream.WriteMore() + elemPtr := encoder.sliceType.UnsafeGetIndex(ptr, i) + encoder.elemEncoder.Encode(elemPtr, stream) + } + stream.WriteArrayEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v: %s", encoder.sliceType, stream.Error.Error()) + } +} + +func (encoder *sliceEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.sliceType.UnsafeLengthOf(ptr) == 0 +} + +type sliceDecoder struct { + sliceType *reflect2.UnsafeSliceType + elemDecoder ValDecoder +} + +func (decoder *sliceDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.doDecode(ptr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v: %s", decoder.sliceType, iter.Error.Error()) + } +} + +func (decoder *sliceDecoder) doDecode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + sliceType := decoder.sliceType + if c == 'n' { + iter.skipThreeBytes('u', 'l', 'l') + sliceType.UnsafeSetNil(ptr) + return + } + if c != '[' { + iter.ReportError("decode slice", "expect [ or n, but found "+string([]byte{c})) + return + } + c = iter.nextToken() + if c == ']' { + sliceType.UnsafeSet(ptr, sliceType.UnsafeMakeSlice(0, 0)) + return + } + iter.unreadByte() + sliceType.UnsafeGrow(ptr, 1) + elemPtr := sliceType.UnsafeGetIndex(ptr, 0) + decoder.elemDecoder.Decode(elemPtr, iter) + length := 1 + for c = iter.nextToken(); c == ','; c = iter.nextToken() { + idx := length + length += 1 + sliceType.UnsafeGrow(ptr, length) + elemPtr = sliceType.UnsafeGetIndex(ptr, idx) + decoder.elemDecoder.Decode(elemPtr, iter) + } + if c != ']' { + iter.ReportError("decode slice", "expect ], but found "+string([]byte{c})) + return + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_struct_decoder.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_struct_decoder.go new file mode 100644 index 00000000..5ad5cc56 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_struct_decoder.go @@ -0,0 +1,1092 @@ +package jsoniter + +import ( + "fmt" + "io" + "strings" + "unsafe" + + "github.com/modern-go/reflect2" +) + +func decoderOfStruct(ctx *ctx, typ reflect2.Type) ValDecoder { + bindings := map[string]*Binding{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, fromName := range binding.FromNames { + old := bindings[fromName] + if old == nil { + bindings[fromName] = binding + continue + } + ignoreOld, ignoreNew := resolveConflictBinding(ctx.frozenConfig, old, binding) + if ignoreOld { + delete(bindings, fromName) + } + if !ignoreNew { + bindings[fromName] = binding + } + } + } + fields := map[string]*structFieldDecoder{} + for k, binding := range bindings { + fields[k] = binding.Decoder.(*structFieldDecoder) + } + + if !ctx.caseSensitive() { + for k, binding := range bindings { + if _, found := fields[strings.ToLower(k)]; !found { + fields[strings.ToLower(k)] = binding.Decoder.(*structFieldDecoder) + } + } + } + + return createStructDecoder(ctx, typ, fields) +} + +func createStructDecoder(ctx *ctx, typ reflect2.Type, fields map[string]*structFieldDecoder) ValDecoder { + if ctx.disallowUnknownFields { + return &generalStructDecoder{typ: typ, fields: fields, disallowUnknownFields: true} + } + knownHash := map[int64]struct{}{ + 0: {}, + } + + switch len(fields) { + case 0: + return &skipObjectDecoder{typ} + case 1: + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + return &oneFieldStructDecoder{typ, fieldHash, fieldDecoder} + } + case 2: + var fieldHash1 int64 + var fieldHash2 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldHash1 == 0 { + fieldHash1 = fieldHash + fieldDecoder1 = fieldDecoder + } else { + fieldHash2 = fieldHash + fieldDecoder2 = fieldDecoder + } + } + return &twoFieldsStructDecoder{typ, fieldHash1, fieldDecoder1, fieldHash2, fieldDecoder2} + case 3: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } + } + return &threeFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3} + case 4: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } + } + return &fourFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4} + case 5: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } + } + return &fiveFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5} + case 6: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } + } + return &sixFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6} + case 7: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } + } + return &sevenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7} + case 8: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } + } + return &eightFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8} + case 9: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } + } + return &nineFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9} + case 10: + var fieldName1 int64 + var fieldName2 int64 + var fieldName3 int64 + var fieldName4 int64 + var fieldName5 int64 + var fieldName6 int64 + var fieldName7 int64 + var fieldName8 int64 + var fieldName9 int64 + var fieldName10 int64 + var fieldDecoder1 *structFieldDecoder + var fieldDecoder2 *structFieldDecoder + var fieldDecoder3 *structFieldDecoder + var fieldDecoder4 *structFieldDecoder + var fieldDecoder5 *structFieldDecoder + var fieldDecoder6 *structFieldDecoder + var fieldDecoder7 *structFieldDecoder + var fieldDecoder8 *structFieldDecoder + var fieldDecoder9 *structFieldDecoder + var fieldDecoder10 *structFieldDecoder + for fieldName, fieldDecoder := range fields { + fieldHash := calcHash(fieldName, ctx.caseSensitive()) + _, known := knownHash[fieldHash] + if known { + return &generalStructDecoder{typ, fields, false} + } + knownHash[fieldHash] = struct{}{} + if fieldName1 == 0 { + fieldName1 = fieldHash + fieldDecoder1 = fieldDecoder + } else if fieldName2 == 0 { + fieldName2 = fieldHash + fieldDecoder2 = fieldDecoder + } else if fieldName3 == 0 { + fieldName3 = fieldHash + fieldDecoder3 = fieldDecoder + } else if fieldName4 == 0 { + fieldName4 = fieldHash + fieldDecoder4 = fieldDecoder + } else if fieldName5 == 0 { + fieldName5 = fieldHash + fieldDecoder5 = fieldDecoder + } else if fieldName6 == 0 { + fieldName6 = fieldHash + fieldDecoder6 = fieldDecoder + } else if fieldName7 == 0 { + fieldName7 = fieldHash + fieldDecoder7 = fieldDecoder + } else if fieldName8 == 0 { + fieldName8 = fieldHash + fieldDecoder8 = fieldDecoder + } else if fieldName9 == 0 { + fieldName9 = fieldHash + fieldDecoder9 = fieldDecoder + } else { + fieldName10 = fieldHash + fieldDecoder10 = fieldDecoder + } + } + return &tenFieldsStructDecoder{typ, + fieldName1, fieldDecoder1, + fieldName2, fieldDecoder2, + fieldName3, fieldDecoder3, + fieldName4, fieldDecoder4, + fieldName5, fieldDecoder5, + fieldName6, fieldDecoder6, + fieldName7, fieldDecoder7, + fieldName8, fieldDecoder8, + fieldName9, fieldDecoder9, + fieldName10, fieldDecoder10} + } + return &generalStructDecoder{typ, fields, false} +} + +type generalStructDecoder struct { + typ reflect2.Type + fields map[string]*structFieldDecoder + disallowUnknownFields bool +} + +func (decoder *generalStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + var c byte + for c = ','; c == ','; c = iter.nextToken() { + decoder.decodeOneField(ptr, iter) + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + if c != '}' { + iter.ReportError("struct Decode", `expect }, but found `+string([]byte{c})) + } + iter.decrementDepth() +} + +func (decoder *generalStructDecoder) decodeOneField(ptr unsafe.Pointer, iter *Iterator) { + var field string + var fieldDecoder *structFieldDecoder + if iter.cfg.objectFieldMustBeSimpleString { + fieldBytes := iter.ReadStringAsSlice() + field = *(*string)(unsafe.Pointer(&fieldBytes)) + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } else { + field = iter.ReadString() + fieldDecoder = decoder.fields[field] + if fieldDecoder == nil && !iter.cfg.caseSensitive { + fieldDecoder = decoder.fields[strings.ToLower(field)] + } + } + if fieldDecoder == nil { + if decoder.disallowUnknownFields { + msg := "found unknown field: " + field + iter.ReportError("ReadObject", msg) + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + iter.Skip() + return + } + c := iter.nextToken() + if c != ':' { + iter.ReportError("ReadObject", "expect : after object field, but found "+string([]byte{c})) + } + fieldDecoder.Decode(ptr, iter) +} + +type skipObjectDecoder struct { + typ reflect2.Type +} + +func (decoder *skipObjectDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + valueType := iter.WhatIsNext() + if valueType != ObjectValue && valueType != NilValue { + iter.ReportError("skipObjectDecoder", "expect object or null") + return + } + iter.Skip() +} + +type oneFieldStructDecoder struct { + typ reflect2.Type + fieldHash int64 + fieldDecoder *structFieldDecoder +} + +func (decoder *oneFieldStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + if iter.readFieldHash() == decoder.fieldHash { + decoder.fieldDecoder.Decode(ptr, iter) + } else { + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type twoFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder +} + +func (decoder *twoFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type threeFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder +} + +func (decoder *threeFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fourFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder +} + +func (decoder *fourFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type fiveFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder +} + +func (decoder *fiveFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sixFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder +} + +func (decoder *sixFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type sevenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder +} + +func (decoder *sevenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type eightFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder +} + +func (decoder *eightFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type nineFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder +} + +func (decoder *nineFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type tenFieldsStructDecoder struct { + typ reflect2.Type + fieldHash1 int64 + fieldDecoder1 *structFieldDecoder + fieldHash2 int64 + fieldDecoder2 *structFieldDecoder + fieldHash3 int64 + fieldDecoder3 *structFieldDecoder + fieldHash4 int64 + fieldDecoder4 *structFieldDecoder + fieldHash5 int64 + fieldDecoder5 *structFieldDecoder + fieldHash6 int64 + fieldDecoder6 *structFieldDecoder + fieldHash7 int64 + fieldDecoder7 *structFieldDecoder + fieldHash8 int64 + fieldDecoder8 *structFieldDecoder + fieldHash9 int64 + fieldDecoder9 *structFieldDecoder + fieldHash10 int64 + fieldDecoder10 *structFieldDecoder +} + +func (decoder *tenFieldsStructDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + if !iter.readObjectStart() { + return + } + if !iter.incrementDepth() { + return + } + for { + switch iter.readFieldHash() { + case decoder.fieldHash1: + decoder.fieldDecoder1.Decode(ptr, iter) + case decoder.fieldHash2: + decoder.fieldDecoder2.Decode(ptr, iter) + case decoder.fieldHash3: + decoder.fieldDecoder3.Decode(ptr, iter) + case decoder.fieldHash4: + decoder.fieldDecoder4.Decode(ptr, iter) + case decoder.fieldHash5: + decoder.fieldDecoder5.Decode(ptr, iter) + case decoder.fieldHash6: + decoder.fieldDecoder6.Decode(ptr, iter) + case decoder.fieldHash7: + decoder.fieldDecoder7.Decode(ptr, iter) + case decoder.fieldHash8: + decoder.fieldDecoder8.Decode(ptr, iter) + case decoder.fieldHash9: + decoder.fieldDecoder9.Decode(ptr, iter) + case decoder.fieldHash10: + decoder.fieldDecoder10.Decode(ptr, iter) + default: + iter.Skip() + } + if iter.isObjectEnd() { + break + } + } + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%v.%s", decoder.typ, iter.Error.Error()) + } + iter.decrementDepth() +} + +type structFieldDecoder struct { + field reflect2.StructField + fieldDecoder ValDecoder +} + +func (decoder *structFieldDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + fieldPtr := decoder.field.UnsafeGet(ptr) + decoder.fieldDecoder.Decode(fieldPtr, iter) + if iter.Error != nil && iter.Error != io.EOF { + iter.Error = fmt.Errorf("%s: %s", decoder.field.Name(), iter.Error.Error()) + } +} + +type stringModeStringDecoder struct { + elemDecoder ValDecoder + cfg *frozenConfig +} + +func (decoder *stringModeStringDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + decoder.elemDecoder.Decode(ptr, iter) + str := *((*string)(ptr)) + tempIter := decoder.cfg.BorrowIterator([]byte(str)) + defer decoder.cfg.ReturnIterator(tempIter) + *((*string)(ptr)) = tempIter.ReadString() +} + +type stringModeNumberDecoder struct { + elemDecoder ValDecoder +} + +func (decoder *stringModeNumberDecoder) Decode(ptr unsafe.Pointer, iter *Iterator) { + c := iter.nextToken() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } + decoder.elemDecoder.Decode(ptr, iter) + if iter.Error != nil { + return + } + c = iter.readByte() + if c != '"' { + iter.ReportError("stringModeNumberDecoder", `expect ", but found `+string([]byte{c})) + return + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/reflect_struct_encoder.go b/terraform-server/vendor/github.com/json-iterator/go/reflect_struct_encoder.go new file mode 100644 index 00000000..152e3ef5 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/reflect_struct_encoder.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "fmt" + "github.com/modern-go/reflect2" + "io" + "reflect" + "unsafe" +) + +func encoderOfStruct(ctx *ctx, typ reflect2.Type) ValEncoder { + type bindingTo struct { + binding *Binding + toName string + ignored bool + } + orderedBindings := []*bindingTo{} + structDescriptor := describeStruct(ctx, typ) + for _, binding := range structDescriptor.Fields { + for _, toName := range binding.ToNames { + new := &bindingTo{ + binding: binding, + toName: toName, + } + for _, old := range orderedBindings { + if old.toName != toName { + continue + } + old.ignored, new.ignored = resolveConflictBinding(ctx.frozenConfig, old.binding, new.binding) + } + orderedBindings = append(orderedBindings, new) + } + } + if len(orderedBindings) == 0 { + return &emptyStructEncoder{} + } + finalOrderedFields := []structFieldTo{} + for _, bindingTo := range orderedBindings { + if !bindingTo.ignored { + finalOrderedFields = append(finalOrderedFields, structFieldTo{ + encoder: bindingTo.binding.Encoder.(*structFieldEncoder), + toName: bindingTo.toName, + }) + } + } + return &structEncoder{typ, finalOrderedFields} +} + +func createCheckIsEmpty(ctx *ctx, typ reflect2.Type) checkIsEmpty { + encoder := createEncoderOfNative(ctx, typ) + if encoder != nil { + return encoder + } + kind := typ.Kind() + switch kind { + case reflect.Interface: + return &dynamicEncoder{typ} + case reflect.Struct: + return &structEncoder{typ: typ} + case reflect.Array: + return &arrayEncoder{} + case reflect.Slice: + return &sliceEncoder{} + case reflect.Map: + return encoderOfMap(ctx, typ) + case reflect.Ptr: + return &OptionalEncoder{} + default: + return &lazyErrorEncoder{err: fmt.Errorf("unsupported type: %v", typ)} + } +} + +func resolveConflictBinding(cfg *frozenConfig, old, new *Binding) (ignoreOld, ignoreNew bool) { + newTagged := new.Field.Tag().Get(cfg.getTagKey()) != "" + oldTagged := old.Field.Tag().Get(cfg.getTagKey()) != "" + if newTagged { + if oldTagged { + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } else { + return true, false + } + } else { + if oldTagged { + return true, false + } + if len(old.levels) > len(new.levels) { + return true, false + } else if len(new.levels) > len(old.levels) { + return false, true + } else { + return true, true + } + } +} + +type structFieldEncoder struct { + field reflect2.StructField + fieldEncoder ValEncoder + omitempty bool +} + +func (encoder *structFieldEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + fieldPtr := encoder.field.UnsafeGet(ptr) + encoder.fieldEncoder.Encode(fieldPtr, stream) + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%s: %s", encoder.field.Name(), stream.Error.Error()) + } +} + +func (encoder *structFieldEncoder) IsEmpty(ptr unsafe.Pointer) bool { + fieldPtr := encoder.field.UnsafeGet(ptr) + return encoder.fieldEncoder.IsEmpty(fieldPtr) +} + +func (encoder *structFieldEncoder) IsEmbeddedPtrNil(ptr unsafe.Pointer) bool { + isEmbeddedPtrNil, converted := encoder.fieldEncoder.(IsEmbeddedPtrNil) + if !converted { + return false + } + fieldPtr := encoder.field.UnsafeGet(ptr) + return isEmbeddedPtrNil.IsEmbeddedPtrNil(fieldPtr) +} + +type IsEmbeddedPtrNil interface { + IsEmbeddedPtrNil(ptr unsafe.Pointer) bool +} + +type structEncoder struct { + typ reflect2.Type + fields []structFieldTo +} + +type structFieldTo struct { + encoder *structFieldEncoder + toName string +} + +func (encoder *structEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteObjectStart() + isNotFirst := false + for _, field := range encoder.fields { + if field.encoder.omitempty && field.encoder.IsEmpty(ptr) { + continue + } + if field.encoder.IsEmbeddedPtrNil(ptr) { + continue + } + if isNotFirst { + stream.WriteMore() + } + stream.WriteObjectField(field.toName) + field.encoder.Encode(ptr, stream) + isNotFirst = true + } + stream.WriteObjectEnd() + if stream.Error != nil && stream.Error != io.EOF { + stream.Error = fmt.Errorf("%v.%s", encoder.typ, stream.Error.Error()) + } +} + +func (encoder *structEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type emptyStructEncoder struct { +} + +func (encoder *emptyStructEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.WriteEmptyObject() +} + +func (encoder *emptyStructEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return false +} + +type stringModeNumberEncoder struct { + elemEncoder ValEncoder +} + +func (encoder *stringModeNumberEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + stream.writeByte('"') + encoder.elemEncoder.Encode(ptr, stream) + stream.writeByte('"') +} + +func (encoder *stringModeNumberEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} + +type stringModeStringEncoder struct { + elemEncoder ValEncoder + cfg *frozenConfig +} + +func (encoder *stringModeStringEncoder) Encode(ptr unsafe.Pointer, stream *Stream) { + tempStream := encoder.cfg.BorrowStream(nil) + tempStream.Attachment = stream.Attachment + defer encoder.cfg.ReturnStream(tempStream) + encoder.elemEncoder.Encode(ptr, tempStream) + stream.WriteString(string(tempStream.Buffer())) +} + +func (encoder *stringModeStringEncoder) IsEmpty(ptr unsafe.Pointer) bool { + return encoder.elemEncoder.IsEmpty(ptr) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/stream.go b/terraform-server/vendor/github.com/json-iterator/go/stream.go new file mode 100644 index 00000000..17662fde --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/stream.go @@ -0,0 +1,211 @@ +package jsoniter + +import ( + "io" +) + +// stream is a io.Writer like object, with JSON specific write functions. +// Error is not returned as return value, but stored as Error member on this stream instance. +type Stream struct { + cfg *frozenConfig + out io.Writer + buf []byte + Error error + indention int + Attachment interface{} // open for customized encoder +} + +// NewStream create new stream instance. +// cfg can be jsoniter.ConfigDefault. +// out can be nil if write to internal buffer. +// bufSize is the initial size for the internal buffer in bytes. +func NewStream(cfg API, out io.Writer, bufSize int) *Stream { + return &Stream{ + cfg: cfg.(*frozenConfig), + out: out, + buf: make([]byte, 0, bufSize), + Error: nil, + indention: 0, + } +} + +// Pool returns a pool can provide more stream with same configuration +func (stream *Stream) Pool() StreamPool { + return stream.cfg +} + +// Reset reuse this stream instance by assign a new writer +func (stream *Stream) Reset(out io.Writer) { + stream.out = out + stream.buf = stream.buf[:0] +} + +// Available returns how many bytes are unused in the buffer. +func (stream *Stream) Available() int { + return cap(stream.buf) - len(stream.buf) +} + +// Buffered returns the number of bytes that have been written into the current buffer. +func (stream *Stream) Buffered() int { + return len(stream.buf) +} + +// Buffer if writer is nil, use this method to take the result +func (stream *Stream) Buffer() []byte { + return stream.buf +} + +// SetBuffer allows to append to the internal buffer directly +func (stream *Stream) SetBuffer(buf []byte) { + stream.buf = buf +} + +// Write writes the contents of p into the buffer. +// It returns the number of bytes written. +// If nn < len(p), it also returns an error explaining +// why the write is short. +func (stream *Stream) Write(p []byte) (nn int, err error) { + stream.buf = append(stream.buf, p...) + if stream.out != nil { + nn, err = stream.out.Write(stream.buf) + stream.buf = stream.buf[nn:] + return + } + return len(p), nil +} + +// WriteByte writes a single byte. +func (stream *Stream) writeByte(c byte) { + stream.buf = append(stream.buf, c) +} + +func (stream *Stream) writeTwoBytes(c1 byte, c2 byte) { + stream.buf = append(stream.buf, c1, c2) +} + +func (stream *Stream) writeThreeBytes(c1 byte, c2 byte, c3 byte) { + stream.buf = append(stream.buf, c1, c2, c3) +} + +func (stream *Stream) writeFourBytes(c1 byte, c2 byte, c3 byte, c4 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4) +} + +func (stream *Stream) writeFiveBytes(c1 byte, c2 byte, c3 byte, c4 byte, c5 byte) { + stream.buf = append(stream.buf, c1, c2, c3, c4, c5) +} + +// Flush writes any buffered data to the underlying io.Writer. +func (stream *Stream) Flush() error { + if stream.out == nil { + return nil + } + if stream.Error != nil { + return stream.Error + } + n, err := stream.out.Write(stream.buf) + if err != nil { + if stream.Error == nil { + stream.Error = err + } + return err + } + stream.buf = stream.buf[n:] + return nil +} + +// WriteRaw write string out without quotes, just like []byte +func (stream *Stream) WriteRaw(s string) { + stream.buf = append(stream.buf, s...) +} + +// WriteNil write null to stream +func (stream *Stream) WriteNil() { + stream.writeFourBytes('n', 'u', 'l', 'l') +} + +// WriteTrue write true to stream +func (stream *Stream) WriteTrue() { + stream.writeFourBytes('t', 'r', 'u', 'e') +} + +// WriteFalse write false to stream +func (stream *Stream) WriteFalse() { + stream.writeFiveBytes('f', 'a', 'l', 's', 'e') +} + +// WriteBool write true or false into stream +func (stream *Stream) WriteBool(val bool) { + if val { + stream.WriteTrue() + } else { + stream.WriteFalse() + } +} + +// WriteObjectStart write { with possible indention +func (stream *Stream) WriteObjectStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('{') + stream.writeIndention(0) +} + +// WriteObjectField write "field": with possible indention +func (stream *Stream) WriteObjectField(field string) { + stream.WriteString(field) + if stream.indention > 0 { + stream.writeTwoBytes(':', ' ') + } else { + stream.writeByte(':') + } +} + +// WriteObjectEnd write } with possible indention +func (stream *Stream) WriteObjectEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte('}') +} + +// WriteEmptyObject write {} +func (stream *Stream) WriteEmptyObject() { + stream.writeByte('{') + stream.writeByte('}') +} + +// WriteMore write , with possible indention +func (stream *Stream) WriteMore() { + stream.writeByte(',') + stream.writeIndention(0) + stream.Flush() +} + +// WriteArrayStart write [ with possible indention +func (stream *Stream) WriteArrayStart() { + stream.indention += stream.cfg.indentionStep + stream.writeByte('[') + stream.writeIndention(0) +} + +// WriteEmptyArray write [] +func (stream *Stream) WriteEmptyArray() { + stream.writeTwoBytes('[', ']') +} + +// WriteArrayEnd write ] with possible indention +func (stream *Stream) WriteArrayEnd() { + stream.writeIndention(stream.cfg.indentionStep) + stream.indention -= stream.cfg.indentionStep + stream.writeByte(']') +} + +func (stream *Stream) writeIndention(delta int) { + if stream.indention == 0 { + return + } + stream.writeByte('\n') + toWrite := stream.indention - delta + for i := 0; i < toWrite; i++ { + stream.buf = append(stream.buf, ' ') + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/stream_float.go b/terraform-server/vendor/github.com/json-iterator/go/stream_float.go new file mode 100644 index 00000000..826aa594 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/stream_float.go @@ -0,0 +1,111 @@ +package jsoniter + +import ( + "fmt" + "math" + "strconv" +) + +var pow10 []uint64 + +func init() { + pow10 = []uint64{1, 10, 100, 1000, 10000, 100000, 1000000} +} + +// WriteFloat32 write float32 to stream +func (stream *Stream) WriteFloat32(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(float64(val)) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if float32(abs) < 1e-6 || float32(abs) >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 32) +} + +// WriteFloat32Lossy write float32 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat32Lossy(val float32) { + if math.IsInf(float64(val), 0) || math.IsNaN(float64(val)) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat32(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(float64(val)*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} + +// WriteFloat64 write float64 to stream +func (stream *Stream) WriteFloat64(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + abs := math.Abs(val) + fmt := byte('f') + // Note: Must use float32 comparisons for underlying float32 value to get precise cutoffs right. + if abs != 0 { + if abs < 1e-6 || abs >= 1e21 { + fmt = 'e' + } + } + stream.buf = strconv.AppendFloat(stream.buf, float64(val), fmt, -1, 64) +} + +// WriteFloat64Lossy write float64 to stream with ONLY 6 digits precision although much much faster +func (stream *Stream) WriteFloat64Lossy(val float64) { + if math.IsInf(val, 0) || math.IsNaN(val) { + stream.Error = fmt.Errorf("unsupported value: %f", val) + return + } + if val < 0 { + stream.writeByte('-') + val = -val + } + if val > 0x4ffffff { + stream.WriteFloat64(val) + return + } + precision := 6 + exp := uint64(1000000) // 6 + lval := uint64(val*float64(exp) + 0.5) + stream.WriteUint64(lval / exp) + fval := lval % exp + if fval == 0 { + return + } + stream.writeByte('.') + for p := precision - 1; p > 0 && fval < pow10[p]; p-- { + stream.writeByte('0') + } + stream.WriteUint64(fval) + for stream.buf[len(stream.buf)-1] == '0' { + stream.buf = stream.buf[:len(stream.buf)-1] + } +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/stream_int.go b/terraform-server/vendor/github.com/json-iterator/go/stream_int.go new file mode 100644 index 00000000..d1059ee4 --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/stream_int.go @@ -0,0 +1,190 @@ +package jsoniter + +var digits []uint32 + +func init() { + digits = make([]uint32, 1000) + for i := uint32(0); i < 1000; i++ { + digits[i] = (((i / 100) + '0') << 16) + ((((i / 10) % 10) + '0') << 8) + i%10 + '0' + if i < 10 { + digits[i] += 2 << 24 + } else if i < 100 { + digits[i] += 1 << 24 + } + } +} + +func writeFirstBuf(space []byte, v uint32) []byte { + start := v >> 24 + if start == 0 { + space = append(space, byte(v>>16), byte(v>>8)) + } else if start == 1 { + space = append(space, byte(v>>8)) + } + space = append(space, byte(v)) + return space +} + +func writeBuf(buf []byte, v uint32) []byte { + return append(buf, byte(v>>16), byte(v>>8), byte(v)) +} + +// WriteUint8 write uint8 to stream +func (stream *Stream) WriteUint8(val uint8) { + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteInt8 write int8 to stream +func (stream *Stream) WriteInt8(nval int8) { + var val uint8 + if nval < 0 { + val = uint8(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint8(nval) + } + stream.buf = writeFirstBuf(stream.buf, digits[val]) +} + +// WriteUint16 write uint16 to stream +func (stream *Stream) WriteUint16(val uint16) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return +} + +// WriteInt16 write int16 to stream +func (stream *Stream) WriteInt16(nval int16) { + var val uint16 + if nval < 0 { + val = uint16(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint16(nval) + } + stream.WriteUint16(val) +} + +// WriteUint32 write uint32 to stream +func (stream *Stream) WriteUint32(val uint32) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + } else { + r3 := q2 - q3*1000 + stream.buf = append(stream.buf, byte(q3+'0')) + stream.buf = writeBuf(stream.buf, digits[r3]) + } + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt32 write int32 to stream +func (stream *Stream) WriteInt32(nval int32) { + var val uint32 + if nval < 0 { + val = uint32(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint32(nval) + } + stream.WriteUint32(val) +} + +// WriteUint64 write uint64 to stream +func (stream *Stream) WriteUint64(val uint64) { + q1 := val / 1000 + if q1 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[val]) + return + } + r1 := val - q1*1000 + q2 := q1 / 1000 + if q2 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q1]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r2 := q1 - q2*1000 + q3 := q2 / 1000 + if q3 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q2]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r3 := q2 - q3*1000 + q4 := q3 / 1000 + if q4 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q3]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r4 := q3 - q4*1000 + q5 := q4 / 1000 + if q5 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q4]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) + return + } + r5 := q4 - q5*1000 + q6 := q5 / 1000 + if q6 == 0 { + stream.buf = writeFirstBuf(stream.buf, digits[q5]) + } else { + stream.buf = writeFirstBuf(stream.buf, digits[q6]) + r6 := q5 - q6*1000 + stream.buf = writeBuf(stream.buf, digits[r6]) + } + stream.buf = writeBuf(stream.buf, digits[r5]) + stream.buf = writeBuf(stream.buf, digits[r4]) + stream.buf = writeBuf(stream.buf, digits[r3]) + stream.buf = writeBuf(stream.buf, digits[r2]) + stream.buf = writeBuf(stream.buf, digits[r1]) +} + +// WriteInt64 write int64 to stream +func (stream *Stream) WriteInt64(nval int64) { + var val uint64 + if nval < 0 { + val = uint64(-nval) + stream.buf = append(stream.buf, '-') + } else { + val = uint64(nval) + } + stream.WriteUint64(val) +} + +// WriteInt write int to stream +func (stream *Stream) WriteInt(val int) { + stream.WriteInt64(int64(val)) +} + +// WriteUint write uint to stream +func (stream *Stream) WriteUint(val uint) { + stream.WriteUint64(uint64(val)) +} diff --git a/terraform-server/vendor/github.com/json-iterator/go/stream_str.go b/terraform-server/vendor/github.com/json-iterator/go/stream_str.go new file mode 100644 index 00000000..54c2ba0b --- /dev/null +++ b/terraform-server/vendor/github.com/json-iterator/go/stream_str.go @@ -0,0 +1,372 @@ +package jsoniter + +import ( + "unicode/utf8" +) + +// htmlSafeSet holds the value true if the ASCII character with the given +// array position can be safely represented inside a JSON string, embedded +// inside of HTML - diff --git a/terraform-ui/src/pages/components/modal.vue b/terraform-ui/src/pages/components/modal.vue deleted file mode 100755 index c7055477..00000000 --- a/terraform-ui/src/pages/components/modal.vue +++ /dev/null @@ -1,511 +0,0 @@ -