From 8cbb6e93a8c2c7c4381c7c155bc408810d3886cf Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 11:02:47 +0200 Subject: [PATCH 001/201] Configure: refactoring to also ask for the scheduler Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 15 +++++++++++++-- cli/pcluster/utils.py | 8 ++++++++ 2 files changed, 21 insertions(+), 2 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index 2af4d7a527..da6b1bce0e 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -27,6 +27,7 @@ from botocore.exceptions import BotoCoreError, ClientError from . import cfnconfig +from pcluster.utils import get_supported_schedulers logger = logging.getLogger("pcluster.pcluster") unsupported_regions = ["ap-northeast-3"] @@ -56,7 +57,7 @@ def prompt(prompt, default_value=None, hidden=False, options=None, check_validit else: user_prompt = user_prompt + "]: " - if isinstance(options, list): + if isinstance(options, (list,tuple)): print("Acceptable Values for %s: " % prompt) for o in options: print(" %s" % o) @@ -169,6 +170,15 @@ def configure(args): # noqa: C901 FIXME!!! options=get_regions(), check_validity=True, ) + + scheduler = prompt( + "Scheduler", + config.get("cluster " + cluster_template, "scheduler") if config.has_option("cluster " + cluster_template, + "scheduler") else "sge", + options=get_supported_schedulers(), + check_validity=True, + ) + vpcname = prompt( "VPC Name", config.get("cluster " + cluster_template, "vpc_settings") @@ -209,7 +219,8 @@ def configure(args): # noqa: C901 FIXME!!! } s_aws = {"__name__": "aws", "aws_region_name": aws_region_name} s_aliases = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} - s_cluster = {"__name__": "cluster " + cluster_template, "key_name": key_name, "vpc_settings": vpcname} + s_cluster = {"__name__": "cluster " + cluster_template, "key_name": key_name, "vpc_settings": vpcname, + "scheduler": scheduler} s_vpc = {"__name__": "vpc " + vpcname, "vpc_id": vpc_id, "master_subnet_id": master_subnet_id} sections = [s_aws, s_cluster, s_vpc, s_global, s_aliases] diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index bd37f8fde6..df73b46ebe 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -180,3 +180,11 @@ def get_instance_vcpus(region, instance_type): vcpus = -1 return vcpus + + +def get_supported_schedulers(): + """ + Return a tuple of the scheduler supported by parallelcluster. + :return: a tuple of strings of the supported scheduler + """ + return "sge", "torque", "slurm", "awsbatch" From eaab6aea0b6c5ffc517bcf12e53eb32ccb6e5f56 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 12:37:54 +0200 Subject: [PATCH 002/201] Configure: fixed a bug that override .config even with errors Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 35 +++++++++++++++++++++++++++++++++-- 1 file changed, 33 insertions(+), 2 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index da6b1bce0e..a3ee3eb049 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -24,6 +24,7 @@ import boto3 import configparser +import tempfile from botocore.exceptions import BotoCoreError, ClientError from . import cfnconfig @@ -220,7 +221,7 @@ def configure(args): # noqa: C901 FIXME!!! s_aws = {"__name__": "aws", "aws_region_name": aws_region_name} s_aliases = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} s_cluster = {"__name__": "cluster " + cluster_template, "key_name": key_name, "vpc_settings": vpcname, - "scheduler": scheduler} + "scheduler": scheduler, "base_os": operating_system} s_vpc = {"__name__": "vpc " + vpcname, "vpc_id": vpc_id, "master_subnet_id": master_subnet_id} sections = [s_aws, s_cluster, s_vpc, s_global, s_aliases] @@ -244,11 +245,41 @@ def configure(args): # noqa: C901 FIXME!!! if e.errno != errno.EEXIST: raise # can safely ignore EEXISTS for this purpose... + if not _is_config_valid(args, config): + sys.exit(1) + + # If we are here, than the file it's correct and we can override it. # Write configuration to disk open(config_file, "a").close() os.chmod(config_file, stat.S_IRUSR | stat.S_IWUSR) with open(config_file, "w") as cf: config.write(cf) + +def _is_config_valid(args, config): + """ + Validate the configuration of the pcluster configure. + :param args: the arguments passed with the command line + :param config: the configParser + :return True if the configuration is valid, false otherwise + """ + # We create a temp_file to validate before overriding the original config + path = os.path.join(tempfile.gettempdir(), "temp_config") + temp_file = path + temp_args = args + temp_args.config_file = path + open(temp_file, "a").close() + os.chmod(temp_file, stat.S_IRUSR | stat.S_IWUSR) + with open(temp_file, "w") as cf: + config.write(cf) # Verify the configuration - cfnconfig.ParallelClusterConfig(args) + is_file_ok = True + try: + cfnconfig.ParallelClusterConfig(temp_args) + except SystemExit as e: + is_file_ok = False + finally: + os.remove(path) + if is_file_ok: + return True + return False From be192ebfe4259f79ae0294bc854b019db11617ac Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 12:38:29 +0200 Subject: [PATCH 003/201] Pcluster: fixed a bug on region check Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 8 +++++++- 1 file changed, 7 insertions(+), 1 deletion(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index a3ee3eb049..e432a773fb 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -84,20 +84,26 @@ def get_regions(): @handle_client_exception -def ec2_conn(aws_region_name): +def ec2_get_region(aws_region_name): if aws_region_name: region = aws_region_name elif os.environ.get("AWS_DEFAULT_REGION"): region = os.environ.get("AWS_DEFAULT_REGION") else: region = "us-east-1" + return region + +@handle_client_exception +def ec2_conn(aws_region_name): + region = ec2_get_region(aws_region_name) ec2 = boto3.client("ec2", region_name=region) return ec2 @handle_client_exception def list_keys(aws_region_name): + aws_region_name = ec2_get_region(aws_region_name) # we get the default if not present conn = ec2_conn(aws_region_name) keypairs = conn.describe_key_pairs() keynames = [] From 6f4c0b49cc46002f9929ab1f0806a60c663662f7 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 12:48:03 +0200 Subject: [PATCH 004/201] Configure: added a default region for key-pair and aws-region Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 11 ++++++----- 1 file changed, 6 insertions(+), 5 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index e432a773fb..097321e37d 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -32,7 +32,7 @@ logger = logging.getLogger("pcluster.pcluster") unsupported_regions = ["ap-northeast-3"] - +default_region = "us-east-1" def handle_client_exception(func): @functools.wraps(func) @@ -90,7 +90,7 @@ def ec2_get_region(aws_region_name): elif os.environ.get("AWS_DEFAULT_REGION"): region = os.environ.get("AWS_DEFAULT_REGION") else: - region = "us-east-1" + region = default_region return region @@ -173,7 +173,7 @@ def configure(args): # noqa: C901 FIXME!!! # Use built in boto regions as an available option aws_region_name = prompt( "AWS Region ID", - config.get("aws", "aws_region_name") if config.has_option("aws", "aws_region_name") else None, + config.get("aws", "aws_region_name") if config.has_option("aws", "aws_region_name") else default_region, options=get_regions(), check_validity=True, ) @@ -193,13 +193,14 @@ def configure(args): # noqa: C901 FIXME!!! else "public", ) + keys = list_keys(aws_region_name) # Query EC2 for available keys as options key_name = prompt( "Key Name", config.get("cluster " + cluster_template, "key_name") if config.has_option("cluster " + cluster_template, "key_name") - else None, - options=list_keys(aws_region_name), + else keys[0], # it will always have at least one value, otherwise an exception before has been thrown + options=keys, check_validity=True, ) vpc_id = prompt( From c26c54295c33fbde196df7996c21996e5f58633a Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 14:17:23 +0200 Subject: [PATCH 005/201] Configure: add os request Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 25 +++++++++++++++++++++++++ cli/pcluster/utils.py | 8 ++++++++ 2 files changed, 33 insertions(+) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index 097321e37d..8f5e2a8e82 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -29,11 +29,13 @@ from . import cfnconfig from pcluster.utils import get_supported_schedulers +from pcluster.utils import get_supported_os logger = logging.getLogger("pcluster.pcluster") unsupported_regions = ["ap-northeast-3"] default_region = "us-east-1" + def handle_client_exception(func): @functools.wraps(func) def wrapper(*args, **kwargs): @@ -185,6 +187,18 @@ def configure(args): # noqa: C901 FIXME!!! options=get_supported_schedulers(), check_validity=True, ) + is_batch = _is_aws_batch(scheduler) + + if is_batch: + operating_system = "alinux" + else: + operating_system = prompt( + "Operating System", + config.get("cluster " + cluster_template, "base_os") if config.has_option("cluster " + cluster_template, + "base_os") else "alinux", + options=get_supported_os(), + check_validity=True, + ) vpcname = prompt( "VPC Name", @@ -290,3 +304,14 @@ def _is_config_valid(args, config): if is_file_ok: return True return False + + +def _is_aws_batch(scheduler): + """ + Return true if the scheduler is awsbatch + :param scheduler: the scheduler to check + :return: true if the scheduler is awsbatch + """ + if scheduler == "awsbatch": + return True + return False diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index df73b46ebe..d21c4b948f 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -182,6 +182,14 @@ def get_instance_vcpus(region, instance_type): return vcpus +def get_supported_os(): + """ + Return a tuple of the os supported by parallelcluster. + :return: a tuple of strings of the supported os + """ + return "alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604" + + def get_supported_schedulers(): """ Return a tuple of the scheduler supported by parallelcluster. From 04dc8db7895b49fd4cc43b8db2c4dbe806c24442 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 16:30:00 +0200 Subject: [PATCH 006/201] Configure: general refactoring to add cluster sizing questions Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 114 ++++++++++++++++++++++++++++--------- 1 file changed, 86 insertions(+), 28 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index 8f5e2a8e82..5b830894a7 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -161,73 +161,66 @@ def configure(args): # noqa: C901 FIXME!!! config_file = os.path.expanduser(os.path.join("~", ".parallelcluster", "config")) config = configparser.ConfigParser() - # Check if configuration file exists if os.path.isfile(config_file): config.read(config_file) + s_cluster = {} + # Prompt for required values, using existing as defaults cluster_template = prompt( - "Cluster Template", - config.get("global", "cluster_template") if config.has_option("global", "cluster_template") else "default", + "Cluster template", + get_parameter(config, "global", "cluster_template", "default"), ) + s_cluster["__name__"] = "cluster " + cluster_template # Use built in boto regions as an available option aws_region_name = prompt( "AWS Region ID", - config.get("aws", "aws_region_name") if config.has_option("aws", "aws_region_name") else default_region, + get_parameter(config, "aws", "aws_region_name", default_region), options=get_regions(), check_validity=True, ) scheduler = prompt( "Scheduler", - config.get("cluster " + cluster_template, "scheduler") if config.has_option("cluster " + cluster_template, - "scheduler") else "sge", + get_parameter(config, "cluster " + cluster_template, "scheduler", "sge"), options=get_supported_schedulers(), check_validity=True, ) - is_batch = _is_aws_batch(scheduler) + s_cluster["scheduler"] = scheduler - if is_batch: - operating_system = "alinux" + if _is_aws_batch(scheduler): + s_cluster.update(aws_batch_handler(config, cluster_template)) else: - operating_system = prompt( - "Operating System", - config.get("cluster " + cluster_template, "base_os") if config.has_option("cluster " + cluster_template, - "base_os") else "alinux", - options=get_supported_os(), - check_validity=True, - ) + s_cluster.update(general_scheduler_handler(config, cluster_template)) vpcname = prompt( "VPC Name", - config.get("cluster " + cluster_template, "vpc_settings") - if config.has_option("cluster " + cluster_template, "vpc_settings") - else "public", + get_parameter(config, "cluster " + cluster_template, "vpc_settings", "public"), ) + s_cluster["vpc_settings"] = vpcname keys = list_keys(aws_region_name) # Query EC2 for available keys as options key_name = prompt( "Key Name", - config.get("cluster " + cluster_template, "key_name") - if config.has_option("cluster " + cluster_template, "key_name") - else keys[0], # it will always have at least one value, otherwise an exception before has been thrown + get_parameter(config, "cluster " + cluster_template, "key_name", keys[0]), options=keys, check_validity=True, ) + s_cluster["key_name"] = key_name + vpc_id = prompt( "VPC ID", - config.get("vpc " + vpcname, "vpc_id") if config.has_option("vpc " + vpcname, "vpc_id") else None, + get_parameter(config, "vpc " + vpcname, "vpc_id", None), options=list_vpcs(aws_region_name), check_validity=True, ) + master_subnet_id = prompt( "Master Subnet ID", - config.get("vpc " + vpcname, "master_subnet_id") - if config.has_option("vpc " + vpcname, "master_subnet_id") - else None, + get_parameter(config, "vpc " + vpcname, "master_subnet_id", None), options=list_subnets(aws_region_name, vpc_id), check_validity=True, ) @@ -241,8 +234,6 @@ def configure(args): # noqa: C901 FIXME!!! } s_aws = {"__name__": "aws", "aws_region_name": aws_region_name} s_aliases = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} - s_cluster = {"__name__": "cluster " + cluster_template, "key_name": key_name, "vpc_settings": vpcname, - "scheduler": scheduler, "base_os": operating_system} s_vpc = {"__name__": "vpc " + vpcname, "vpc_id": vpc_id, "master_subnet_id": master_subnet_id} sections = [s_aws, s_cluster, s_vpc, s_global, s_aliases] @@ -306,6 +297,73 @@ def _is_config_valid(args, config): return False +def get_parameter(config, section, parameter_name, default_value): + """ + Prompt the user to ask question without validation + :param config the configuration parser + :param section the name of the section + :param parameter_name: the name of the parameter + :param default_value: the default string to ask the user + :return: + """ + return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value + + +def general_scheduler_handler(config, cluster_template): + """ + Return a dictionary containing the values asked to the user for a generic scheduler non aws_batch + :param config the configuration parser + :param cluster_template the name of the cluster + :return: a dictionary with the updated values + """ + scheduler_dict = {} + + # We first remove unnecessary parameters from the past configurations + batch_parameters = "max_vcpus", "desired_vcpus", "min_vcpus" + for par in batch_parameters: + config.remove_option("cluster " + cluster_template, par) + + operating_system = prompt( + "Operating System", + get_parameter(config, "cluster " + cluster_template, "base_os", "alinux"), + options=get_supported_os(), + check_validity=True, + ) + scheduler_dict["base_os"] = operating_system + + max_queue_size = prompt( + "Max Queue Size", + get_parameter(config, "cluster " + cluster_template, "max_queue_size", "10") + ) + + scheduler_dict["max_queue_size"] = max_queue_size + scheduler_dict["initial_queue_size"] = "1" + return scheduler_dict + + +def aws_batch_handler(config, cluster_template): + """ + Return a dictionary containing the values asked to the user for aws_batch + :param config the configuration parser + :param cluster_template the name of the cluster + :return: a dictionary with the updated values + """ + batch_dict = {"base_os": "alinux", "desired_vcpus": "1"} + + # We first remove unnecessary parameters from the past configurations + non_batch_parameters = "max_queue_size", "initial_queue_size", "maintain_initial_size" + for par in non_batch_parameters: + config.remove_option("cluster " + cluster_template, par) + # Ask the users for max_vcpus + max_vcpus = prompt( + "Max Queue Size", + get_parameter(config, "cluster " + cluster_template, "max_vcpus", "10") + ) + + batch_dict["max_vcpus"] = max_vcpus + return batch_dict + + def _is_aws_batch(scheduler): """ Return true if the scheduler is awsbatch From 85096a97f92e0d925beb54cdb0486e0c449f6c95 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 27 May 2019 16:47:18 +0200 Subject: [PATCH 007/201] Configure: added compute instance type support Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 30 ++++++++++++++++++++++++------ 1 file changed, 24 insertions(+), 6 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index 5b830894a7..3bcad9d19e 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -11,6 +11,7 @@ # fmt: off from __future__ import absolute_import, print_function # isort:skip from future import standard_library # isort:skip + standard_library.install_aliases() # fmt: on @@ -60,7 +61,7 @@ def prompt(prompt, default_value=None, hidden=False, options=None, check_validit else: user_prompt = user_prompt + "]: " - if isinstance(options, (list,tuple)): + if isinstance(options, (list, tuple)): print("Acceptable Values for %s: " % prompt) for o in options: print(" %s" % o) @@ -319,7 +320,7 @@ def general_scheduler_handler(config, cluster_template): scheduler_dict = {} # We first remove unnecessary parameters from the past configurations - batch_parameters = "max_vcpus", "desired_vcpus", "min_vcpus" + batch_parameters = "max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type" for par in batch_parameters: config.remove_option("cluster " + cluster_template, par) @@ -335,9 +336,21 @@ def general_scheduler_handler(config, cluster_template): "Max Queue Size", get_parameter(config, "cluster " + cluster_template, "max_queue_size", "10") ) - scheduler_dict["max_queue_size"] = max_queue_size scheduler_dict["initial_queue_size"] = "1" + + master_instance_type = prompt( + "Master instance type", + get_parameter(config, "cluster " + cluster_template, "master_instance_type", "t2.micro"), + ) + scheduler_dict["master_instance_type"] = master_instance_type + + compute_instance_type = prompt( + "Compute instance type", + get_parameter(config, "cluster " + cluster_template, "compute_instance_type", "t2.micro"), + ) + scheduler_dict["compute_instance_type"] = compute_instance_type + return scheduler_dict @@ -348,10 +361,10 @@ def aws_batch_handler(config, cluster_template): :param cluster_template the name of the cluster :return: a dictionary with the updated values """ - batch_dict = {"base_os": "alinux", "desired_vcpus": "1"} + batch_dict = {"base_os": "alinux", "desired_vcpus": "1", "compute_instance_type": "optimal"} # We first remove unnecessary parameters from the past configurations - non_batch_parameters = "max_queue_size", "initial_queue_size", "maintain_initial_size" + non_batch_parameters = "max_queue_size", "initial_queue_size", "maintain_initial_size", "compute_instance_type" for par in non_batch_parameters: config.remove_option("cluster " + cluster_template, par) # Ask the users for max_vcpus @@ -359,8 +372,13 @@ def aws_batch_handler(config, cluster_template): "Max Queue Size", get_parameter(config, "cluster " + cluster_template, "max_vcpus", "10") ) - batch_dict["max_vcpus"] = max_vcpus + + master_instance_type = prompt( + "Master instance type", + get_parameter(config, "cluster " + cluster_template, "master_instance_type", "t2.micro"), + ) + batch_dict["master_instance_type"] = master_instance_type return batch_dict From c4a0c1eb6b8d6eb58ee2f07b1f060b473a7f697f Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Tue, 28 May 2019 15:07:26 +0200 Subject: [PATCH 008/201] Configure: general refactoring of the code Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 276 ++++++++++++++++++------------------- cli/pcluster/utils.py | 9 +- 2 files changed, 142 insertions(+), 143 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index 3bcad9d19e..350d246fa4 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -15,26 +15,38 @@ standard_library.install_aliases() # fmt: on +import copy import errno import functools import logging import os import stat import sys +import tempfile from builtins import input import boto3 import configparser -import tempfile from botocore.exceptions import BotoCoreError, ClientError +from pcluster.utils import get_supported_os, get_supported_schedulers + from . import cfnconfig -from pcluster.utils import get_supported_schedulers -from pcluster.utils import get_supported_os logger = logging.getLogger("pcluster.pcluster") unsupported_regions = ["ap-northeast-3"] -default_region = "us-east-1" +DEFAULT_VALUES = { + "cluster_template": "default", + "aws_region_name": "us-east-1", + "scheduler": "sge", + "os": "alinux", + "max_queue_size": "10", + "master_instance_type": "t2.micro", + "compute_instance_type": "t2.micro", + "vpc_name": "public", + "initial_size": "1", +} +FORCED_BATCH_VALUES = {"os": "alinux", "compute_instance_type": "optimal"} def handle_client_exception(func): @@ -51,18 +63,18 @@ def wrapper(*args, **kwargs): return wrapper -def prompt(prompt, default_value=None, hidden=False, options=None, check_validity=False): +def prompt(message, default_value=None, hidden=False, options=None, check_validity=False): if hidden and default_value is not None: - user_prompt = prompt + " [*******" + default_value[-4:] + "]: " + user_prompt = message + " [*******" + default_value[-4:] + "]: " else: - user_prompt = prompt + " [" + user_prompt = message + " [" if default_value is not None: user_prompt = user_prompt + default_value + "]: " else: user_prompt = user_prompt + "]: " if isinstance(options, (list, tuple)): - print("Acceptable Values for %s: " % prompt) + print("Acceptable Values for %s: " % message) for o in options: print(" %s" % o) @@ -86,27 +98,25 @@ def get_regions(): return [region.get("RegionName") for region in regions if region.get("RegionName") not in unsupported_regions] -@handle_client_exception -def ec2_get_region(aws_region_name): +def _evaluate_aws_region(aws_region_name): if aws_region_name: region = aws_region_name elif os.environ.get("AWS_DEFAULT_REGION"): region = os.environ.get("AWS_DEFAULT_REGION") else: - region = default_region + region = DEFAULT_VALUES["aws_region_name"] return region @handle_client_exception def ec2_conn(aws_region_name): - region = ec2_get_region(aws_region_name) + region = _evaluate_aws_region(aws_region_name) ec2 = boto3.client("ec2", region_name=region) return ec2 @handle_client_exception def list_keys(aws_region_name): - aws_region_name = ec2_get_region(aws_region_name) # we get the default if not present conn = ec2_conn(aws_region_name) keypairs = conn.describe_key_pairs() keynames = [] @@ -154,91 +164,138 @@ def list_subnets(aws_region_name, vpc_id): def configure(args): # noqa: C901 FIXME!!! - # Determine config file name based on args or default - if args.config_file is not None: - config_file = args.config_file - else: - config_file = os.path.expanduser(os.path.join("~", ".parallelcluster", "config")) + config_file = ( + args.config_file if args.config_file else os.path.expanduser(os.path.join("~", ".parallelcluster", "config")) + ) config = configparser.ConfigParser() # Check if configuration file exists if os.path.isfile(config_file): config.read(config_file) - s_cluster = {} - # Prompt for required values, using existing as defaults cluster_template = prompt( - "Cluster template", - get_parameter(config, "global", "cluster_template", "default"), + "Cluster configuration label", + get_config_parameter( + config, + section="global", + parameter_name="cluster_template", + default_value=DEFAULT_VALUES["cluster_template"], + ), ) - s_cluster["__name__"] = "cluster " + cluster_template + cluster_label = "cluster " + cluster_template # Use built in boto regions as an available option aws_region_name = prompt( "AWS Region ID", - get_parameter(config, "aws", "aws_region_name", default_region), + get_config_parameter( + config, section="aws", parameter_name="aws_region_name", default_value=DEFAULT_VALUES["aws_region_name"] + ), options=get_regions(), check_validity=True, ) scheduler = prompt( "Scheduler", - get_parameter(config, "cluster " + cluster_template, "scheduler", "sge"), + get_config_parameter( + config, section=cluster_label, parameter_name="scheduler", default_value=DEFAULT_VALUES["scheduler"] + ), options=get_supported_schedulers(), check_validity=True, ) - s_cluster["scheduler"] = scheduler + scheduler_info = scheduler_handler(scheduler) + is_aws_batch = scheduler == "awsbatch" - if _is_aws_batch(scheduler): - s_cluster.update(aws_batch_handler(config, cluster_template)) + if is_aws_batch: + operating_system = FORCED_BATCH_VALUES["os"] else: - s_cluster.update(general_scheduler_handler(config, cluster_template)) + operating_system = prompt( + "Operating System", + get_config_parameter( + config, section=cluster_label, parameter_name="base_os", default_value=DEFAULT_VALUES["os"] + ), + options=get_supported_os(scheduler), + check_validity=True, + ) + + max_queue_size = prompt( + "Max Queue Size", + get_config_parameter(config, cluster_label, scheduler_info["max_size"], DEFAULT_VALUES["max_queue_size"]), + ) + + master_instance_type = prompt( + "Master instance type", + get_config_parameter( + config, + section=cluster_label, + parameter_name="master_instance_type", + default_value=DEFAULT_VALUES["master_instance_type"], + ), + ) - vpcname = prompt( - "VPC Name", - get_parameter(config, "cluster " + cluster_template, "vpc_settings", "public"), + if is_aws_batch: + compute_instance_type = FORCED_BATCH_VALUES["compute_instance_type"] + else: + compute_instance_type = prompt( + message="Compute instance type", default_value=DEFAULT_VALUES["compute_instance_type"] + ) + + vpc_name = prompt( + "VPC configuration label", + get_config_parameter( + config, section=cluster_label, parameter_name="vpc_settings", default_value=DEFAULT_VALUES["vpc_name"] + ), ) - s_cluster["vpc_settings"] = vpcname + vpc_label = "vpc " + vpc_name keys = list_keys(aws_region_name) - # Query EC2 for available keys as options key_name = prompt( "Key Name", - get_parameter(config, "cluster " + cluster_template, "key_name", keys[0]), + get_config_parameter(config, section=cluster_label, parameter_name="key_name", default_value=keys[0]), options=keys, check_validity=True, ) - s_cluster["key_name"] = key_name vpc_id = prompt( "VPC ID", - get_parameter(config, "vpc " + vpcname, "vpc_id", None), + get_config_parameter(config, section=vpc_label, parameter_name="vpc_id", default_value=None), options=list_vpcs(aws_region_name), check_validity=True, ) master_subnet_id = prompt( "Master Subnet ID", - get_parameter(config, "vpc " + vpcname, "master_subnet_id", None), + get_config_parameter(config, section=vpc_label, parameter_name="master_subnet_id", default_value=None), options=list_subnets(aws_region_name, vpc_id), check_validity=True, ) - # Dictionary of values we want to set - s_global = { + global_parameters = { "__name__": "global", "cluster_template": cluster_template, "update_check": "true", "sanity_check": "true", } - s_aws = {"__name__": "aws", "aws_region_name": aws_region_name} - s_aliases = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} - s_vpc = {"__name__": "vpc " + vpcname, "vpc_id": vpc_id, "master_subnet_id": master_subnet_id} - - sections = [s_aws, s_cluster, s_vpc, s_global, s_aliases] + aws_parameters = {"__name__": "aws", "aws_region_name": aws_region_name} + cluster_parameters = { + "__name__": cluster_label, + "key_name": key_name, + "vpc_settings": vpc_name, + "scheduler": scheduler, + "base_os": operating_system, + "compute_instance_type": compute_instance_type, + "master_instance_type": master_instance_type, + scheduler_info["max_size"]: max_queue_size, + scheduler_info["initial_size"]: DEFAULT_VALUES["initial_size"], + } + aliases_parameters = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} + vpc_parameters = {"__name__": vpc_label, "vpc_id": vpc_id, "master_subnet_id": master_subnet_id} + sections = [aws_parameters, cluster_parameters, vpc_parameters, global_parameters, aliases_parameters] + # We first remove unnecessary parameters from the past configurations + for par in scheduler_info["parameters_to_remove"]: + config.remove_option(cluster_label, par) # Loop through the configuration sections we care about for section in sections: try: @@ -272,122 +329,61 @@ def configure(args): # noqa: C901 FIXME!!! def _is_config_valid(args, config): """ Validate the configuration of the pcluster configure. + :param args: the arguments passed with the command line :param config: the configParser :return True if the configuration is valid, false otherwise """ - # We create a temp_file to validate before overriding the original config - path = os.path.join(tempfile.gettempdir(), "temp_config") - temp_file = path - temp_args = args - temp_args.config_file = path - open(temp_file, "a").close() - os.chmod(temp_file, stat.S_IRUSR | stat.S_IWUSR) - with open(temp_file, "w") as cf: + # We create a temp_file_path to validate before overriding the original config + temp_file_path = os.path.join(tempfile.gettempdir(), "temp_config") + temp_args = copy.copy(args) # Defensive copy is needed because we change config_file + + temp_args.config_file = temp_file_path + with open(temp_file_path, "w+") as cf: config.write(cf) # Verify the configuration - is_file_ok = True + is_valid = True try: cfnconfig.ParallelClusterConfig(temp_args) - except SystemExit as e: - is_file_ok = False + except SystemExit: + is_valid = False finally: - os.remove(path) - if is_file_ok: - return True - return False + os.remove(temp_file_path) + return is_valid -def get_parameter(config, section, parameter_name, default_value): +def get_config_parameter(config, section, parameter_name, default_value): """ - Prompt the user to ask question without validation + Get the parameter if present in the configuration otherwise returns default value. + :param config the configuration parser :param section the name of the section :param parameter_name: the name of the parameter - :param default_value: the default string to ask the user + :param default_value: the default to propose the user :return: """ return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value -def general_scheduler_handler(config, cluster_template): - """ - Return a dictionary containing the values asked to the user for a generic scheduler non aws_batch - :param config the configuration parser - :param cluster_template the name of the cluster - :return: a dictionary with the updated values - """ - scheduler_dict = {} - - # We first remove unnecessary parameters from the past configurations - batch_parameters = "max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type" - for par in batch_parameters: - config.remove_option("cluster " + cluster_template, par) - - operating_system = prompt( - "Operating System", - get_parameter(config, "cluster " + cluster_template, "base_os", "alinux"), - options=get_supported_os(), - check_validity=True, - ) - scheduler_dict["base_os"] = operating_system - - max_queue_size = prompt( - "Max Queue Size", - get_parameter(config, "cluster " + cluster_template, "max_queue_size", "10") - ) - scheduler_dict["max_queue_size"] = max_queue_size - scheduler_dict["initial_queue_size"] = "1" - - master_instance_type = prompt( - "Master instance type", - get_parameter(config, "cluster " + cluster_template, "master_instance_type", "t2.micro"), - ) - scheduler_dict["master_instance_type"] = master_instance_type - - compute_instance_type = prompt( - "Compute instance type", - get_parameter(config, "cluster " + cluster_template, "compute_instance_type", "t2.micro"), - ) - scheduler_dict["compute_instance_type"] = compute_instance_type - - return scheduler_dict - - -def aws_batch_handler(config, cluster_template): - """ - Return a dictionary containing the values asked to the user for aws_batch - :param config the configuration parser - :param cluster_template the name of the cluster - :return: a dictionary with the updated values +def scheduler_handler(scheduler): """ - batch_dict = {"base_os": "alinux", "desired_vcpus": "1", "compute_instance_type": "optimal"} - - # We first remove unnecessary parameters from the past configurations - non_batch_parameters = "max_queue_size", "initial_queue_size", "maintain_initial_size", "compute_instance_type" - for par in non_batch_parameters: - config.remove_option("cluster " + cluster_template, par) - # Ask the users for max_vcpus - max_vcpus = prompt( - "Max Queue Size", - get_parameter(config, "cluster " + cluster_template, "max_vcpus", "10") - ) - batch_dict["max_vcpus"] = max_vcpus + Return a dictionary containing information based on the scheduler. - master_instance_type = prompt( - "Master instance type", - get_parameter(config, "cluster " + cluster_template, "master_instance_type", "t2.micro"), - ) - batch_dict["master_instance_type"] = master_instance_type - return batch_dict - - -def _is_aws_batch(scheduler): - """ - Return true if the scheduler is awsbatch - :param scheduler: the scheduler to check - :return: true if the scheduler is awsbatch + :param scheduler the target scheduler + :return: a dictionary with containing the information """ + scheduler_info = {} if scheduler == "awsbatch": - return True - return False + scheduler_info["parameters_to_remove"] = ( + "max_queue_size", + "initial_queue_size", + "maintain_initial_size", + "compute_instance_type", + ) + scheduler_info["max_size"] = "max_vcpus" + scheduler_info["initial_size"] = "desired_vcpus" + else: + scheduler_info["parameters_to_remove"] = "max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type" + scheduler_info["max_size"] = "max_queue_size" + scheduler_info["initial_size"] = "initial_queue_size" + return scheduler_info diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index d21c4b948f..21fea24370 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -182,17 +182,20 @@ def get_instance_vcpus(region, instance_type): return vcpus -def get_supported_os(): +def get_supported_os(scheduler): """ - Return a tuple of the os supported by parallelcluster. + Return a tuple of the os supported by parallelcluster for the specific scheduler. + + :param scheduler: the scheduler for which we want to know the supported os :return: a tuple of strings of the supported os """ - return "alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604" + return "alinux" if scheduler == "awsbatch" else "alinux", "centos6", "centos7", "ubuntu1404", "ubuntu1604" def get_supported_schedulers(): """ Return a tuple of the scheduler supported by parallelcluster. + :return: a tuple of strings of the supported scheduler """ return "sge", "torque", "slurm", "awsbatch" From 530b7d2eca50d2289faf50935cbdb1b821e1c28b Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Fri, 7 Jun 2019 12:47:04 +0200 Subject: [PATCH 009/201] Extended pcluster configure to speed up the configuration Extended the pcluster configure command to also show vpc and subnet name. Added also the possibility to select an option with numbers on the left. Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig.py | 260 +++++++++++++++++++++++-------------- 1 file changed, 163 insertions(+), 97 deletions(-) diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig.py index 350d246fa4..926ec641a4 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig.py @@ -63,32 +63,38 @@ def wrapper(*args, **kwargs): return wrapper -def prompt(message, default_value=None, hidden=False, options=None, check_validity=False): - if hidden and default_value is not None: - user_prompt = message + " [*******" + default_value[-4:] + "]: " - else: - user_prompt = message + " [" - if default_value is not None: - user_prompt = user_prompt + default_value + "]: " - else: - user_prompt = user_prompt + "]: " - - if isinstance(options, (list, tuple)): - print("Acceptable Values for %s: " % message) - for o in options: - print(" %s" % o) - - var = input(user_prompt).strip() - - if var == "": - return default_value - else: - if check_validity and options is not None and var not in options: - print("ERROR: The value (%s) is not valid " % var) - print("Please select one of the Acceptable Values listed above.") - sys.exit(1) +def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, default_value=None, options_to_print=None): + """ + Prompt the user a message with optionally some options. + + :param message: the message to show to the user + :param validator: a function that predicates if the input is correct + :param input_to_option: a function that given the input transforms it in something else + :param default_value: the value to return as the default if the user doesn't insert anything + :param options_to_print: the options to print if necessary + :return: the value inserted by the user validated + """ + if options_to_print: + print("Allowed values for {0}:".format(message)) + for item in options_to_print: + print(item) + user_prompt = "{0} [{1}]: ".format(message, default_value or "") + + valid_user_input = False + result = default_value + # We give the user the possibility to try again if wrong + while not valid_user_input: + user_input = input(user_prompt).strip() + if user_input == "": + result = default_value + valid_user_input = True else: - return var + result = input_to_option(user_input) + if validator(result): + valid_user_input = True + else: + print("ERROR: {0} is not an acceptable value for {1}".format(user_input, message)) + return result @handle_client_exception @@ -115,52 +121,50 @@ def ec2_conn(aws_region_name): return ec2 -@handle_client_exception -def list_keys(aws_region_name): - conn = ec2_conn(aws_region_name) - keypairs = conn.describe_key_pairs() - keynames = [] - for key in keypairs.get("KeyPairs"): - keynames.append(key.get("KeyName")) +def extract_tag_from_resource(resource, tag_name): + tags = resource.get("Tags", []) + return next((item.get("Value") for item in tags if item.get("Key") == tag_name), None) - if not keynames: - print("ERROR: No Key Pairs found in region " + aws_region_name) - print("Please create an EC2 Key Pair before continuing") - sys.exit(1) - return keynames +def _list_resources(resources, resource_name, resource_id_name): + """Return a list of tuple containing the id of the resource and the name of it.""" + resource_options = [] + for resource in resources.get(resource_name): + keyid = resource.get(resource_id_name) + name = extract_tag_from_resource(resource, tag_name="Name") + resource_options.append((keyid, name)) if name else resource_options.append((keyid,)) + + return resource_options @handle_client_exception -def list_vpcs(aws_region_name): +def _list_keys(aws_region_name): + """Return a list of keys as a list of tuple of type (key-name,).""" conn = ec2_conn(aws_region_name) - vpcs = conn.describe_vpcs() - vpcids = [] - for vpc in vpcs.get("Vpcs"): - vpcids.append(vpc.get("VpcId")) + keypairs = conn.describe_key_pairs() + return _list_resources(keypairs, "KeyPairs", "KeyName") - if not vpcids: - print("ERROR: No VPCs found in region " + aws_region_name) - print("Please create a VPC before continuing") - sys.exit(1) - return vpcids +@handle_client_exception +def _list_vpcs(aws_region_name): + """Return a list of vpcs as a list of tuple of type (vpc-id, vpc-name (if present)).""" + conn = ec2_conn(aws_region_name) + vpcs = conn.describe_vpcs() + return _list_resources(vpcs, "Vpcs", "VpcId") @handle_client_exception -def list_subnets(aws_region_name, vpc_id): +def _list_subnets(aws_region_name, vpc_id): + """Return a list of subnet as a list of tuple of type (subnet-id, subnet-name (if present)).""" conn = ec2_conn(aws_region_name) subnets = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}]) - subnetids = [] - for subnet in subnets.get("Subnets"): - subnetids.append(subnet.get("SubnetId")) + return _list_resources(subnets, "Subnets", "SubnetId") - if not subnetids: - print("ERROR: No Subnets found in region " + aws_region_name) - print("Please create a VPC Subnet before continuing") - sys.exit(1) - return subnetids +@handle_client_exception +def _list_instances(): # Specifying the region does not make any difference + """Return a list of all the supported instance at the moment by aws, independent by the region.""" + return ec2_conn(DEFAULT_VALUES["aws_region_name"]).meta.service_model.shape_for("InstanceType").enum def configure(args): # noqa: C901 FIXME!!! @@ -177,7 +181,7 @@ def configure(args): # noqa: C901 FIXME!!! # Prompt for required values, using existing as defaults cluster_template = prompt( "Cluster configuration label", - get_config_parameter( + default_value=get_config_parameter( config, section="global", parameter_name="cluster_template", @@ -187,22 +191,20 @@ def configure(args): # noqa: C901 FIXME!!! cluster_label = "cluster " + cluster_template # Use built in boto regions as an available option - aws_region_name = prompt( + aws_region_name = _prompt_a_list( "AWS Region ID", - get_config_parameter( + get_regions(), + default_value=get_config_parameter( config, section="aws", parameter_name="aws_region_name", default_value=DEFAULT_VALUES["aws_region_name"] ), - options=get_regions(), - check_validity=True, ) - scheduler = prompt( + scheduler = _prompt_a_list( "Scheduler", - get_config_parameter( + get_supported_schedulers(), + default_value=get_config_parameter( config, section=cluster_label, parameter_name="scheduler", default_value=DEFAULT_VALUES["scheduler"] ), - options=get_supported_schedulers(), - check_validity=True, ) scheduler_info = scheduler_handler(scheduler) is_aws_batch = scheduler == "awsbatch" @@ -210,23 +212,26 @@ def configure(args): # noqa: C901 FIXME!!! if is_aws_batch: operating_system = FORCED_BATCH_VALUES["os"] else: - operating_system = prompt( + operating_system = _prompt_a_list( "Operating System", - get_config_parameter( + get_supported_os(scheduler), + default_value=get_config_parameter( config, section=cluster_label, parameter_name="base_os", default_value=DEFAULT_VALUES["os"] ), - options=get_supported_os(scheduler), - check_validity=True, ) max_queue_size = prompt( "Max Queue Size", - get_config_parameter(config, cluster_label, scheduler_info["max_size"], DEFAULT_VALUES["max_queue_size"]), + validator=lambda x: x.isdigit(), + default_value=get_config_parameter( + config, cluster_label, scheduler_info["max_size"], DEFAULT_VALUES["max_queue_size"] + ), ) master_instance_type = prompt( "Master instance type", - get_config_parameter( + lambda x: x in _list_instances(), + default_value=get_config_parameter( config, section=cluster_label, parameter_name="master_instance_type", @@ -238,38 +243,22 @@ def configure(args): # noqa: C901 FIXME!!! compute_instance_type = FORCED_BATCH_VALUES["compute_instance_type"] else: compute_instance_type = prompt( - message="Compute instance type", default_value=DEFAULT_VALUES["compute_instance_type"] + "Compute instance type", + lambda x: x in _list_instances(), + default_value=DEFAULT_VALUES["compute_instance_type"], ) vpc_name = prompt( "VPC configuration label", - get_config_parameter( + default_value=get_config_parameter( config, section=cluster_label, parameter_name="vpc_settings", default_value=DEFAULT_VALUES["vpc_name"] ), ) vpc_label = "vpc " + vpc_name - keys = list_keys(aws_region_name) - key_name = prompt( - "Key Name", - get_config_parameter(config, section=cluster_label, parameter_name="key_name", default_value=keys[0]), - options=keys, - check_validity=True, - ) - - vpc_id = prompt( - "VPC ID", - get_config_parameter(config, section=vpc_label, parameter_name="vpc_id", default_value=None), - options=list_vpcs(aws_region_name), - check_validity=True, - ) - - master_subnet_id = prompt( - "Master Subnet ID", - get_config_parameter(config, section=vpc_label, parameter_name="master_subnet_id", default_value=None), - options=list_subnets(aws_region_name, vpc_id), - check_validity=True, - ) + key_name = _prompt_a_list_of_tuple("Key Name", _list_keys(aws_region_name)) + vpc_id = _prompt_a_list_of_tuple("VPC ID", _list_vpcs(aws_region_name)) + master_subnet_id = _prompt_a_list_of_tuple("Master Subnet ID", _list_subnets(aws_region_name, vpc_id)) global_parameters = { "__name__": "global", @@ -294,8 +283,10 @@ def configure(args): # noqa: C901 FIXME!!! sections = [aws_parameters, cluster_parameters, vpc_parameters, global_parameters, aliases_parameters] # We first remove unnecessary parameters from the past configurations - for par in scheduler_info["parameters_to_remove"]: - config.remove_option(cluster_label, par) + if config.has_section(cluster_label): + for par in scheduler_info["parameters_to_remove"]: + config.remove_option(cluster_label, par) + # Loop through the configuration sections we care about for section in sections: try: @@ -383,7 +374,82 @@ def scheduler_handler(scheduler): scheduler_info["max_size"] = "max_vcpus" scheduler_info["initial_size"] = "desired_vcpus" else: - scheduler_info["parameters_to_remove"] = "max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type" + scheduler_info["parameters_to_remove"] = ("max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type") scheduler_info["max_size"] = "max_queue_size" scheduler_info["initial_size"] = "initial_queue_size" return scheduler_info + + +def _prompt_a_list(message, options, default_value=None): + """ + Wrap prompt to use it for list. + + :param message: the message to show the user + :param options: the list of item to show the user + :param default_value: the default value + :return: the validate value + """ + if not options: + print("ERROR: No options found for {0}".format(message)) + sys.exit(1) + if not default_value: + default_value = options[0] + + def input_to_parameter(to_transform): + try: + item = options[int(to_transform) - 1] + except ValueError: + item = to_transform + return item + + return prompt( + message, + validator=lambda x: x in options, + input_to_option=lambda x: input_to_parameter(x), + default_value=default_value, + options_to_print=_to_printable_list(options), + ) + + +def _prompt_a_list_of_tuple(message, options, default_value=None): + """ + Wrap prompt to use it over a list of tuple. + + The correct item will be the first element of each tuple. + :param message: the message to show to the user + :param options: the list of tuple + :param default_value: the default value + :return: the validated value + """ + if not options: + print("ERROR: No options found for {0}".format(message)) + sys.exit(1) + if not default_value: + default_value = options[0][0] + + def input_to_parameter(to_transform): + try: + item = options[int(to_transform) - 1][0] + except ValueError: + item = to_transform + return item + + valid_options = [item[0] for item in options] + + return prompt( + message, + validator=lambda x: x in valid_options, + input_to_option=lambda x: input_to_parameter(x), + default_value=default_value, + options_to_print=_to_printable_list(options), + ) + + +def _to_printable_list(items): + output = [] + for iterator, item in enumerate(items, start=1): + if isinstance(item, (list, tuple)): + output.append("{0}. {1}".format(iterator, " | ".join(item))) + else: + output.append("{0}. {1}".format(iterator, item)) + return output From 51e73a9fa84ed7c0f527c5f5388fedd494983d3a Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Thu, 30 May 2019 11:02:35 +0200 Subject: [PATCH 010/201] Created vpc_factory: manages creation, setting up and checks for vpc Signed-off-by: Matteo Fiordarancio --- cli/pcluster/networking/__init__.py | 10 ++++ cli/pcluster/networking/vpc_factory.py | 74 ++++++++++++++++++++++++++ 2 files changed, 84 insertions(+) create mode 100644 cli/pcluster/networking/__init__.py create mode 100644 cli/pcluster/networking/vpc_factory.py diff --git a/cli/pcluster/networking/__init__.py b/cli/pcluster/networking/__init__.py new file mode 100644 index 0000000000..53fcfb6817 --- /dev/null +++ b/cli/pcluster/networking/__init__.py @@ -0,0 +1,10 @@ +# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance +# with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cli/pcluster/networking/vpc_factory.py b/cli/pcluster/networking/vpc_factory.py new file mode 100644 index 0000000000..09bb702c45 --- /dev/null +++ b/cli/pcluster/networking/vpc_factory.py @@ -0,0 +1,74 @@ +import functools +import sys + +import boto3 +from botocore.exceptions import BotoCoreError, ClientError + + +class VpcFactory: + """This class handle vpc automation related to pcluster.""" + + class _ExceptionHandler: + @staticmethod + def handle_client_exception(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except (BotoCoreError, ClientError) as e: + print("ERROR during handling of the VPC in the {0} phase.".format(func.__name__)) + print(e.response["Error"]["Message"]) + sys.exit(1) + + return wrapper + + @_ExceptionHandler.handle_client_exception + def __init__(self, aws_region_name): + """ + Inizialize the VpcHandler with the specified region. + + :param aws_region_name: the region in which you want to use the VpcHandler + """ + self.__client = boto3.client("ec2", region_name=aws_region_name) + self.ec2 = boto3.resource("ec2", region_name=aws_region_name) + + @_ExceptionHandler.handle_client_exception + def create(self, cidr_block="10.0.0.0/16"): + """ + Create a vpc for the given region name. + + :return: the id of the created vpc + :raise RuntimeError: if some problems occurred during vpc creation + """ + response = self.__client.create_vpc(CidrBlock=cidr_block) + return response["Vpc"]["VpcId"] + + @_ExceptionHandler.handle_client_exception + def setup(self, vpc_id, name=None): + """ + Set the parameters necessary for a vpc to be pcluster-compatible. + + :param vpc_id: the target vpc_id + :param name: the name that you want to give to the vpc + :raise RuntimeError: if some problems occurred during the operation + """ + vpc = self.ec2.Vpc(vpc_id) + if name: + self.ec2.create_tags(Resources=[vpc_id], Tags=[{"Key": "Name", "Value": name}]) + vpc.modify_attribute(EnableDnsHostnames={"Value": True}) + vpc.modify_attribute(EnableDnsSupport={"Value": True}) + + @_ExceptionHandler.handle_client_exception + def check(self, vpc_id): + """ + Check whether the given vpc respects the condition needed for pcluster. + + :param vpc_id: the target vpc_id + :return: True if the vpc is pcluster compatible + :raise RuntimeError: if some problems occurred during the operation + """ + vpc = self.ec2.Vpc(vpc_id) + dns_resolution = vpc.describe_attribute(Attribute="enableDnsSupport")["EnableDnsSupport"]["Value"] + dns_hostnames = vpc.describe_attribute(Attribute="enableDnsHostnames")["EnableDnsHostnames"]["Value"] + # default is equal to NO dhcp options set + return dns_resolution and dns_hostnames and vpc.dhcp_options_id != "default" From 7c1a5f8ed8412b453cc07e749ae1542309c49544 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Tue, 18 Jun 2019 16:30:48 +0200 Subject: [PATCH 011/201] Updated vpc_builder Now it can generate any network configuration. Signed-off-by: Matteo Fiordarancio --- tests/integration-tests/conftest.py | 5 +- tests/integration-tests/vpc_builder.py | 132 ++++++++++++++++++------- 2 files changed, 97 insertions(+), 40 deletions(-) diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index 499ccc57cc..9cbd08083c 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -355,14 +355,12 @@ def vpc_stacks(cfn_stacks_factory, request): vpc_stacks = {} for region in regions: # defining subnets per region to allow AZs override - availability_zone = random.choice(_AVAILABILITY_ZONE_OVERRIDES.get(region, [None])) public_subnet = SubnetConfig( name="PublicSubnet", cidr="10.0.124.0/22", # 1,022 IPs map_public_ip_on_launch=True, has_nat_gateway=True, default_gateway=Gateways.INTERNET_GATEWAY, - availability_zone=availability_zone, ) private_subnet = SubnetConfig( name="PrivateSubnet", @@ -370,10 +368,9 @@ def vpc_stacks(cfn_stacks_factory, request): map_public_ip_on_launch=False, has_nat_gateway=False, default_gateway=Gateways.NAT_GATEWAY, - availability_zone=availability_zone, ) vpc_config = VPCConfig(subnets=[public_subnet, private_subnet]) - template = VPCTemplateBuilder(vpc_config).build() + template = VPCTemplateBuilder(vpc_configuration=vpc_config).build() vpc_stacks[region] = _create_vpc_stack(request, template, region, cfn_stacks_factory) return vpc_stacks diff --git a/tests/integration-tests/vpc_builder.py b/tests/integration-tests/vpc_builder.py index f9fc3376da..da8ca95880 100644 --- a/tests/integration-tests/vpc_builder.py +++ b/tests/integration-tests/vpc_builder.py @@ -10,9 +10,9 @@ # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. from enum import Enum, auto -from typing import List, NamedTuple, Optional +from typing import List, NamedTuple -from troposphere import GetAtt, Output, Ref, Sub, Tags, Template +from troposphere import Equals, GetAtt, If, Not, Output, Parameter, Ref, Sub, Tags, Template from troposphere.ec2 import ( EIP, VPC, @@ -38,15 +38,14 @@ class SubnetConfig(NamedTuple): """Configuration of a VPC Subnet""" name: str = "PublicSubnet" - cidr: str = "10.0.0.0/24" + cidr: object = None map_public_ip_on_launch: bool = True has_nat_gateway: bool = True default_gateway: Gateways = Gateways.INTERNET_GATEWAY - availability_zone: Optional[str] = None def tags(self): """Get the tags for the subnet""" - return Tags(Name=Sub("${AWS::StackName}-" + self.name + "_subnet"), Stack=Ref("AWS::StackId")) + return Tags(Name=Sub("${AWS::StackName}" + self.name + "Subnet"), Stack=Ref("AWS::StackId")) class VPCConfig(NamedTuple): @@ -64,11 +63,46 @@ class VPCConfig(NamedTuple): class VPCTemplateBuilder: """Build troposphere CFN templates for VPC creation.""" - def __init__(self, vpc_config, description="VPC built by VPCBuilder"): + def __init__( + self, + vpc_configuration: VPCConfig, + existing_vpc: bool = False, + availability_zone: str = None, + description="Network build by NetworkTemplateBuilder", + ): self.__template = Template() self.__template.set_version("2010-09-09") self.__template.set_description(description) - self.__vpc_config = vpc_config + if availability_zone: + self.__availability_zone = availability_zone + else: + self.__availability_zone = Ref( + self.__add_parameter( + name="AvailabilityZone", + description="(Optional) The zone in which you want to create your subnet(s)", + expected_input_type="String", + ) + ) + + if existing_vpc: + self.__vpc = self.__add_parameter(name="VpcId", description="The vpc id", expected_input_type="String") + self.__vpc_subnets = vpc_configuration.subnets + + else: + self.__vpc = self.__build_vpc(vpc_configuration) + self.__vpc_subnets = vpc_configuration.subnets + + self.__gateway_id = Ref( + self.__add_parameter( + name="InternetGatewayId", + description="(Optional) The id of the gateway (will be created if not specified)", + expected_input_type="String", + ) + ) + self.__create_ig = self.__template.add_condition("CreateInternetGateway", Equals(self.__gateway_id, "")) + self.__existing_ig = self.__template.add_condition( # can't negate above condition with Not() + "ExistingInternetGateway", Not(Equals(self.__gateway_id, "")) + ) def build(self): """Build the template.""" @@ -76,52 +110,69 @@ def build(self): return self.__template def __build_template(self): - vpc = self.__build_vpc() - internet_gateway = self.__build_internet_gateway(vpc) + internet_gateway = self.__build_internet_gateway(self.__vpc) nat_gateway = None subnet_refs = [] - for subnet in self.__vpc_config.subnets: - subnet_ref = self.__build_subnet(subnet, vpc) + for subnet in self.__vpc_subnets: + subnet_ref = self.__build_subnet(subnet, self.__vpc) subnet_refs.append(subnet_ref) if subnet.has_nat_gateway: nat_gateway = self.__build_nat_gateway(subnet, subnet_ref) - for subnet, subnet_ref in zip(self.__vpc_config.subnets, subnet_refs): - self.__build_route_table(subnet, subnet_ref, vpc, internet_gateway, nat_gateway) + for subnet, subnet_ref in zip(self.__vpc_subnets, subnet_refs): + self.__build_route_table(subnet, subnet_ref, self.__vpc, internet_gateway, nat_gateway) - def __build_vpc(self): - vpc_config = self.__vpc_config + def __build_vpc(self, vpc_config_new): vpc = self.__template.add_resource( VPC( - vpc_config.name, - CidrBlock=vpc_config.cidr, - EnableDnsSupport=vpc_config.enable_dns_support, - EnableDnsHostnames=vpc_config.enable_dns_hostnames, - Tags=vpc_config.tags, + vpc_config_new.name, + CidrBlock=vpc_config_new.cidr, + EnableDnsSupport=vpc_config_new.enable_dns_support, + EnableDnsHostnames=vpc_config_new.enable_dns_hostnames, + Tags=vpc_config_new.tags, ) ) - self.__template.add_output(Output("VpcId", Value=Ref(vpc), Description="VPC Id")) + self.__template.add_output(Output("VpcId", Value=Ref(vpc), Description="The Vpc Id")) return vpc def __build_internet_gateway(self, vpc: VPC): internet_gateway = self.__template.add_resource( - InternetGateway("InternetGateway", Tags=Tags(Name=Ref("AWS::StackName"), Stack=Ref("AWS::StackId"))) + InternetGateway( + "InternetGateway", + Tags=Tags(Name=Ref("AWS::StackName"), Stack=Ref("AWS::StackId")), + Condition=self.__create_ig, + ) ) self.__template.add_resource( - VPCGatewayAttachment("VPCGatewayAttachment", VpcId=Ref(vpc), InternetGatewayId=Ref(internet_gateway)) + VPCGatewayAttachment( + "VPCGatewayAttachment", + VpcId=Ref(vpc), + InternetGatewayId=Ref(internet_gateway), + Condition=self.__create_ig, + ) ) - return internet_gateway + return If(self.__create_ig, Ref(internet_gateway), self.__gateway_id) def __build_subnet(self, subnet_config: SubnetConfig, vpc: VPC): + if not subnet_config.cidr: + cidr = Ref( + self.__add_parameter( + name=f"{subnet_config.name}CIDR", + description=f"The CIDR of the {subnet_config.name}", + expected_input_type="String", + ) + ) + else: + cidr = subnet_config.cidr + subnet = Subnet( subnet_config.name, - CidrBlock=subnet_config.cidr, + CidrBlock=cidr, VpcId=Ref(vpc), MapPublicIpOnLaunch=subnet_config.map_public_ip_on_launch, Tags=subnet_config.tags(), + AvailabilityZone=self.__availability_zone, ) - if subnet_config.availability_zone: - subnet.AvailabilityZone = subnet_config.availability_zone self.__template.add_resource(subnet) self.__template.add_output(Output(subnet_config.name + "Id", Value=Ref(subnet))) return subnet @@ -137,18 +188,13 @@ def __build_nat_gateway(self, subnet_config: SubnetConfig, subnet_ref: Subnet): ) def __build_route_table( - self, - subnet_config: SubnetConfig, - subnet_ref: Subnet, - vpc: VPC, - internet_gateway: InternetGateway, - nat_gateway: NatGateway, + self, subnet_config: SubnetConfig, subnet_ref: Subnet, vpc: VPC, internet_gateway, nat_gateway: NatGateway ): route_table = self.__template.add_resource( RouteTable( "RouteTable" + subnet_config.name, VpcId=Ref(vpc), - Tags=Tags(Name=Sub("${AWS::StackName}_route_table_" + subnet_config.name), Stack=Ref("AWS::StackId")), + Tags=Tags(Name=Sub("${AWS::StackName}RouteTable" + subnet_config.name), Stack=Ref("AWS::StackId")), ) ) self.__template.add_resource( @@ -159,10 +205,21 @@ def __build_route_table( if subnet_config.default_gateway == Gateways.INTERNET_GATEWAY: self.__template.add_resource( Route( - "DefaultRoute" + subnet_config.name, + "DefaultRouteDependsOn" + subnet_config.name, + RouteTableId=Ref(route_table), + DestinationCidrBlock="0.0.0.0/0", + GatewayId=internet_gateway, + DependsOn="VPCGatewayAttachment", + Condition=self.__create_ig, + ) + ) + self.__template.add_resource( + Route( + "DefaultRouteNoDependsOn" + subnet_config.name, RouteTableId=Ref(route_table), DestinationCidrBlock="0.0.0.0/0", - GatewayId=Ref(internet_gateway), + GatewayId=internet_gateway, + Condition=self.__existing_ig, # cant use Not() ) ) elif subnet_config.default_gateway == Gateways.NAT_GATEWAY: @@ -174,3 +231,6 @@ def __build_route_table( NatGatewayId=Ref(nat_gateway), ) ) + + def __add_parameter(self, name, description, expected_input_type): + return self.__template.add_parameter(Parameter(name, Description=description, Type=expected_input_type)) From c34c8b8fb43e7d7cc53c25d8fea9802282d5aca5 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Tue, 18 Jun 2019 16:31:29 +0200 Subject: [PATCH 012/201] Created the cloudformation stacks, refactoring of names & dir Signed-off-by: Matteo Fiordarancio --- cli/pcluster/networking/vpc_factory.py | 12 +- cli/tox.ini | 5 + .../public-private.cfn.json | 295 ++++++++++++++++++ .../networking_configuration/public.cfn.json | 191 ++++++++++++ tests/integration-tests/conftest.py | 5 +- ...builder.py => network_template_builder.py} | 82 ++--- .../generate-ebs-substack.py | 0 .../generate-efs-substack.py | 0 .../generate-fsx-substack.py | 0 .../generate-network-configuration.py | 48 +++ .../generate-raid-substack.py | 0 11 files changed, 598 insertions(+), 40 deletions(-) create mode 100644 cloudformation/networking_configuration/public-private.cfn.json create mode 100644 cloudformation/networking_configuration/public.cfn.json rename tests/integration-tests/{vpc_builder.py => network_template_builder.py} (83%) rename util/{ => cfn-stacks-generators}/generate-ebs-substack.py (100%) rename util/{ => cfn-stacks-generators}/generate-efs-substack.py (100%) rename util/{ => cfn-stacks-generators}/generate-fsx-substack.py (100%) create mode 100644 util/cfn-stacks-generators/generate-network-configuration.py rename util/{ => cfn-stacks-generators}/generate-raid-substack.py (100%) diff --git a/cli/pcluster/networking/vpc_factory.py b/cli/pcluster/networking/vpc_factory.py index 09bb702c45..4d141a17d6 100644 --- a/cli/pcluster/networking/vpc_factory.py +++ b/cli/pcluster/networking/vpc_factory.py @@ -1,3 +1,13 @@ +# Copyright 2018-2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance +# with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. import functools import sys @@ -6,7 +16,7 @@ class VpcFactory: - """This class handle vpc automation related to pcluster.""" + """This class handles vpc automation related to pcluster.""" class _ExceptionHandler: @staticmethod diff --git a/cli/tox.ini b/cli/tox.ini index a99f4f96d8..aca94b14e5 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -210,12 +210,15 @@ deps = cfn-lint # E2504 disabled since does not allow two-digit numbers in ephemeral(n) # W2507 disabled since we want to have nullable String type parameters # E2523 disabled since we have both a Launch Template and Launch Configuration +# W2508 disabled since we don't want to always specify Availability Zone commands = cfn-lint -iE2504 -iW2507 -iE2523 aws-parallelcluster.cfn.json cfn-lint batch-substack.cfn.json cfn-lint ebs-substack.cfn.json cfn-lint efs-substack.cfn.json cfn-lint raid-substack.cfn.json + cfn-lint -iW2508 networking_configuration/public.cfn.json + cfn-lint -iW2508 networking_configuration/public-private.cfn.json # Validates that cfn json templates are correctly formatted. [testenv:cfn-format-check] @@ -225,6 +228,7 @@ changedir = ../cloudformation commands = python utils/json_formatter.py -c *.cfn.json + python utils/json_formatter.py -c networking_configuration/*.cfn.json # Formats all cfn.json files. [testenv:cfn-format] @@ -234,6 +238,7 @@ changedir = ../cloudformation commands = python utils/json_formatter.py *.cfn.json + python utils/json_formatter.py networking_configuration/*.cfn.json # Runs tests for cfn templates. [testenv:cfn-tests] diff --git a/cloudformation/networking_configuration/public-private.cfn.json b/cloudformation/networking_configuration/public-private.cfn.json new file mode 100644 index 0000000000..7648c400ff --- /dev/null +++ b/cloudformation/networking_configuration/public-private.cfn.json @@ -0,0 +1,295 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Conditions": { + "CreateInternetGateway": { + "Fn::Equals": [ + { + "Ref": "InternetGatewayId" + }, + "" + ] + }, + "ExistingInternetGateway": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Ref": "InternetGatewayId" + }, + "" + ] + } + ] + } + }, + "Description": "Network build by NetworkTemplateBuilder", + "Outputs": { + "PrivateSubnetId": { + "Value": { + "Ref": "Private" + } + }, + "PublicSubnetId": { + "Value": { + "Ref": "Public" + } + } + }, + "Parameters": { + "AvailabilityZone": { + "Description": "(Optional) The zone in which you want to create your subnet(s)", + "Type": "String" + }, + "InternetGatewayId": { + "Description": "(Optional) The id of the gateway (will be created if not specified)", + "Type": "String" + }, + "PrivateCIDR": { + "AllowedPattern": "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/(1[6-9]|2[0-9]|3[0-2])$", + "Description": "The CIDR of the Private", + "Type": "String" + }, + "PublicCIDR": { + "AllowedPattern": "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/(1[6-9]|2[0-9]|3[0-2])$", + "Description": "The CIDR of the Public", + "Type": "String" + }, + "VpcId": { + "Description": "The vpc id", + "Type": "String" + } + }, + "Resources": { + "DefaultRouteDependsOnPublic": { + "Condition": "CreateInternetGateway", + "DependsOn": "VPCGatewayAttachment", + "Properties": { + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Fn::If": [ + "CreateInternetGateway", + { + "Ref": "InternetGateway" + }, + { + "Ref": "InternetGatewayId" + } + ] + }, + "RouteTableId": { + "Ref": "RouteTablePublic" + } + }, + "Type": "AWS::EC2::Route" + }, + "DefaultRouteNoDependsOnPublic": { + "Condition": "ExistingInternetGateway", + "Properties": { + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Fn::If": [ + "CreateInternetGateway", + { + "Ref": "InternetGateway" + }, + { + "Ref": "InternetGatewayId" + } + ] + }, + "RouteTableId": { + "Ref": "RouteTablePublic" + } + }, + "Type": "AWS::EC2::Route" + }, + "InternetGateway": { + "Condition": "CreateInternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": { + "Ref": "AWS::StackName" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ] + }, + "Type": "AWS::EC2::InternetGateway" + }, + "NatEIPPublic": { + "Properties": { + "Domain": "vpc" + }, + "Type": "AWS::EC2::EIP" + }, + "NatGatewayPublic": { + "Properties": { + "AllocationId": { + "Fn::GetAtt": [ + "NatEIPPublic", + "AllocationId" + ] + }, + "SubnetId": { + "Ref": "Public" + } + }, + "Type": "AWS::EC2::NatGateway" + }, + "NatRoutePrivate": { + "Properties": { + "DestinationCidrBlock": "0.0.0.0/0", + "NatGatewayId": { + "Ref": "NatGatewayPublic" + }, + "RouteTableId": { + "Ref": "RouteTablePrivate" + } + }, + "Type": "AWS::EC2::Route" + }, + "Private": { + "Properties": { + "AvailabilityZone": { + "Ref": "AvailabilityZone" + }, + "CidrBlock": { + "Ref": "PrivateCIDR" + }, + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}PrivateSubnet" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::Subnet" + }, + "Public": { + "Properties": { + "AvailabilityZone": { + "Ref": "AvailabilityZone" + }, + "CidrBlock": { + "Ref": "PublicCIDR" + }, + "MapPublicIpOnLaunch": true, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}PublicSubnet" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::Subnet" + }, + "RouteAssociationPrivate": { + "Properties": { + "RouteTableId": { + "Ref": "RouteTablePrivate" + }, + "SubnetId": { + "Ref": "Private" + } + }, + "Type": "AWS::EC2::SubnetRouteTableAssociation" + }, + "RouteAssociationPublic": { + "Properties": { + "RouteTableId": { + "Ref": "RouteTablePublic" + }, + "SubnetId": { + "Ref": "Public" + } + }, + "Type": "AWS::EC2::SubnetRouteTableAssociation" + }, + "RouteTablePrivate": { + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}RouteTablePrivate" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::RouteTable" + }, + "RouteTablePublic": { + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}RouteTablePublic" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::RouteTable" + }, + "VPCGatewayAttachment": { + "Condition": "CreateInternetGateway", + "Properties": { + "InternetGatewayId": { + "Ref": "InternetGateway" + }, + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::VPCGatewayAttachment" + } + } +} diff --git a/cloudformation/networking_configuration/public.cfn.json b/cloudformation/networking_configuration/public.cfn.json new file mode 100644 index 0000000000..23f9b6acd6 --- /dev/null +++ b/cloudformation/networking_configuration/public.cfn.json @@ -0,0 +1,191 @@ +{ + "AWSTemplateFormatVersion": "2010-09-09", + "Conditions": { + "CreateInternetGateway": { + "Fn::Equals": [ + { + "Ref": "InternetGatewayId" + }, + "" + ] + }, + "ExistingInternetGateway": { + "Fn::Not": [ + { + "Fn::Equals": [ + { + "Ref": "InternetGatewayId" + }, + "" + ] + } + ] + } + }, + "Description": "Network build by NetworkTemplateBuilder", + "Outputs": { + "PublicSubnetId": { + "Value": { + "Ref": "Public" + } + } + }, + "Parameters": { + "AvailabilityZone": { + "Description": "(Optional) The zone in which you want to create your subnet(s)", + "Type": "String" + }, + "InternetGatewayId": { + "Description": "(Optional) The id of the gateway (will be created if not specified)", + "Type": "String" + }, + "PublicCIDR": { + "AllowedPattern": "\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}\\.\\d{1,3}/(1[6-9]|2[0-9]|3[0-2])$", + "Description": "The CIDR of the Public", + "Type": "String" + }, + "VpcId": { + "Description": "The vpc id", + "Type": "String" + } + }, + "Resources": { + "DefaultRouteDependsOnPublic": { + "Condition": "CreateInternetGateway", + "DependsOn": "VPCGatewayAttachment", + "Properties": { + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Fn::If": [ + "CreateInternetGateway", + { + "Ref": "InternetGateway" + }, + { + "Ref": "InternetGatewayId" + } + ] + }, + "RouteTableId": { + "Ref": "RouteTablePublic" + } + }, + "Type": "AWS::EC2::Route" + }, + "DefaultRouteNoDependsOnPublic": { + "Condition": "ExistingInternetGateway", + "Properties": { + "DestinationCidrBlock": "0.0.0.0/0", + "GatewayId": { + "Fn::If": [ + "CreateInternetGateway", + { + "Ref": "InternetGateway" + }, + { + "Ref": "InternetGatewayId" + } + ] + }, + "RouteTableId": { + "Ref": "RouteTablePublic" + } + }, + "Type": "AWS::EC2::Route" + }, + "InternetGateway": { + "Condition": "CreateInternetGateway", + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": { + "Ref": "AWS::StackName" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ] + }, + "Type": "AWS::EC2::InternetGateway" + }, + "Public": { + "Properties": { + "AvailabilityZone": { + "Ref": "AvailabilityZone" + }, + "CidrBlock": { + "Ref": "PublicCIDR" + }, + "MapPublicIpOnLaunch": false, + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}PublicSubnet" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::Subnet" + }, + "RouteAssociationPublic": { + "Properties": { + "RouteTableId": { + "Ref": "RouteTablePublic" + }, + "SubnetId": { + "Ref": "Public" + } + }, + "Type": "AWS::EC2::SubnetRouteTableAssociation" + }, + "RouteTablePublic": { + "Properties": { + "Tags": [ + { + "Key": "Name", + "Value": { + "Fn::Sub": "${AWS::StackName}RouteTablePublic" + } + }, + { + "Key": "Stack", + "Value": { + "Ref": "AWS::StackId" + } + } + ], + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::RouteTable" + }, + "VPCGatewayAttachment": { + "Condition": "CreateInternetGateway", + "Properties": { + "InternetGatewayId": { + "Ref": "InternetGateway" + }, + "VpcId": { + "Ref": "VpcId" + } + }, + "Type": "AWS::EC2::VPCGatewayAttachment" + } + } +} diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index 9cbd08083c..8a2492b825 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -16,7 +16,6 @@ import json import logging import os -import random import re from shutil import copyfile from traceback import format_tb @@ -44,7 +43,7 @@ to_snake_case, unset_credentials, ) -from vpc_builder import Gateways, SubnetConfig, VPCConfig, VPCTemplateBuilder +from network_template_builder import Gateways, NetworkTemplateBuilder, SubnetConfig, VPCConfig def pytest_addoption(parser): @@ -370,7 +369,7 @@ def vpc_stacks(cfn_stacks_factory, request): default_gateway=Gateways.NAT_GATEWAY, ) vpc_config = VPCConfig(subnets=[public_subnet, private_subnet]) - template = VPCTemplateBuilder(vpc_configuration=vpc_config).build() + template = NetworkTemplateBuilder(vpc_configuration=vpc_config).build() vpc_stacks[region] = _create_vpc_stack(request, template, region, cfn_stacks_factory) return vpc_stacks diff --git a/tests/integration-tests/vpc_builder.py b/tests/integration-tests/network_template_builder.py similarity index 83% rename from tests/integration-tests/vpc_builder.py rename to tests/integration-tests/network_template_builder.py index da8ca95880..0f72b295d0 100644 --- a/tests/integration-tests/vpc_builder.py +++ b/tests/integration-tests/network_template_builder.py @@ -37,7 +37,7 @@ class Gateways(Enum): class SubnetConfig(NamedTuple): """Configuration of a VPC Subnet""" - name: str = "PublicSubnet" + name: str = "Public" cidr: object = None map_public_ip_on_launch: bool = True has_nat_gateway: bool = True @@ -60,7 +60,7 @@ class VPCConfig(NamedTuple): tags: Tags = Tags(Name=Ref("AWS::StackName"), Stack=Ref("AWS::StackId")) -class VPCTemplateBuilder: +class NetworkTemplateBuilder: """Build troposphere CFN templates for VPC creation.""" def __init__( @@ -73,10 +73,27 @@ def __init__( self.__template = Template() self.__template.set_version("2010-09-09") self.__template.set_description(description) + self.__availability_zone = self.__get_availability_zone(availability_zone) + self.__vpc_config = vpc_configuration + self.__vpc = self.__get_vpc(existing_vpc) + self.__vpc_subnets = vpc_configuration.subnets + self.__gateway_id = self.__get_gateway_id() + self.__create_ig = self.__template.add_condition("CreateInternetGateway", Equals(self.__gateway_id, "")) + self.__existing_ig = self.__template.add_condition( # can't negate above condition with Not() + "ExistingInternetGateway", Not(Equals(self.__gateway_id, "")) + ) + + def __get_vpc(self, existing_vpc): + if existing_vpc: + return self.__add_parameter(name="VpcId", description="The vpc id", expected_input_type="String") + else: + return self.__build_vpc() + + def __get_availability_zone(self, availability_zone): if availability_zone: - self.__availability_zone = availability_zone + return availability_zone else: - self.__availability_zone = Ref( + return Ref( self.__add_parameter( name="AvailabilityZone", description="(Optional) The zone in which you want to create your subnet(s)", @@ -84,26 +101,6 @@ def __init__( ) ) - if existing_vpc: - self.__vpc = self.__add_parameter(name="VpcId", description="The vpc id", expected_input_type="String") - self.__vpc_subnets = vpc_configuration.subnets - - else: - self.__vpc = self.__build_vpc(vpc_configuration) - self.__vpc_subnets = vpc_configuration.subnets - - self.__gateway_id = Ref( - self.__add_parameter( - name="InternetGatewayId", - description="(Optional) The id of the gateway (will be created if not specified)", - expected_input_type="String", - ) - ) - self.__create_ig = self.__template.add_condition("CreateInternetGateway", Equals(self.__gateway_id, "")) - self.__existing_ig = self.__template.add_condition( # can't negate above condition with Not() - "ExistingInternetGateway", Not(Equals(self.__gateway_id, "")) - ) - def build(self): """Build the template.""" self.__build_template() @@ -122,14 +119,14 @@ def __build_template(self): for subnet, subnet_ref in zip(self.__vpc_subnets, subnet_refs): self.__build_route_table(subnet, subnet_ref, self.__vpc, internet_gateway, nat_gateway) - def __build_vpc(self, vpc_config_new): + def __build_vpc(self): vpc = self.__template.add_resource( VPC( - vpc_config_new.name, - CidrBlock=vpc_config_new.cidr, - EnableDnsSupport=vpc_config_new.enable_dns_support, - EnableDnsHostnames=vpc_config_new.enable_dns_hostnames, - Tags=vpc_config_new.tags, + self.__vpc_config.name, + CidrBlock=self.__vpc_config.cidr, + EnableDnsSupport=self.__vpc_config.enable_dns_support, + EnableDnsHostnames=self.__vpc_config.enable_dns_hostnames, + Tags=self.__vpc_config.tags, ) ) self.__template.add_output(Output("VpcId", Value=Ref(vpc), Description="The Vpc Id")) @@ -151,15 +148,27 @@ def __build_internet_gateway(self, vpc: VPC): Condition=self.__create_ig, ) ) - return If(self.__create_ig, Ref(internet_gateway), self.__gateway_id) + return Ref(internet_gateway) + + def __get_gateway_id(self): + return Ref( + self.__add_parameter( + name="InternetGatewayId", + description="(Optional) The id of the gateway (will be created if not specified)", + expected_input_type="String", + ) + ) def __build_subnet(self, subnet_config: SubnetConfig, vpc: VPC): if not subnet_config.cidr: cidr = Ref( - self.__add_parameter( - name=f"{subnet_config.name}CIDR", - description=f"The CIDR of the {subnet_config.name}", - expected_input_type="String", + self.__template.add_parameter( + Parameter( + f"{subnet_config.name}CIDR", + Description=f"The CIDR of the {subnet_config.name}", + Type="String", + AllowedPattern=r"\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}/(1[6-9]|2[0-9]|3[0-2])$", + ) ) ) else: @@ -174,7 +183,7 @@ def __build_subnet(self, subnet_config: SubnetConfig, vpc: VPC): AvailabilityZone=self.__availability_zone, ) self.__template.add_resource(subnet) - self.__template.add_output(Output(subnet_config.name + "Id", Value=Ref(subnet))) + self.__template.add_output(Output(subnet_config.name + "SubnetId", Value=Ref(subnet))) return subnet def __build_nat_gateway(self, subnet_config: SubnetConfig, subnet_ref: Subnet): @@ -190,6 +199,7 @@ def __build_nat_gateway(self, subnet_config: SubnetConfig, subnet_ref: Subnet): def __build_route_table( self, subnet_config: SubnetConfig, subnet_ref: Subnet, vpc: VPC, internet_gateway, nat_gateway: NatGateway ): + internet_gateway = If(self.__create_ig, internet_gateway, self.__gateway_id) route_table = self.__template.add_resource( RouteTable( "RouteTable" + subnet_config.name, diff --git a/util/generate-ebs-substack.py b/util/cfn-stacks-generators/generate-ebs-substack.py similarity index 100% rename from util/generate-ebs-substack.py rename to util/cfn-stacks-generators/generate-ebs-substack.py diff --git a/util/generate-efs-substack.py b/util/cfn-stacks-generators/generate-efs-substack.py similarity index 100% rename from util/generate-efs-substack.py rename to util/cfn-stacks-generators/generate-efs-substack.py diff --git a/util/generate-fsx-substack.py b/util/cfn-stacks-generators/generate-fsx-substack.py similarity index 100% rename from util/generate-fsx-substack.py rename to util/cfn-stacks-generators/generate-fsx-substack.py diff --git a/util/cfn-stacks-generators/generate-network-configuration.py b/util/cfn-stacks-generators/generate-network-configuration.py new file mode 100644 index 0000000000..76275384f0 --- /dev/null +++ b/util/cfn-stacks-generators/generate-network-configuration.py @@ -0,0 +1,48 @@ +from network_template_builder import Gateways, NetworkTemplateBuilder, SubnetConfig, VPCConfig + + +def generate_public_template(path): + """ + Generate a template for a network configuration with one public subnet. + + The generated template will obligatory ask for the CIDR of the subnet and the vpc-id. If the vpc has already an + internet gateway, you must pass it as a parameter. You can optionally specify the availability zone if you need a + specific one. + :param path: the path in which write the file + """ + public_subnet = SubnetConfig( + name="Public", map_public_ip_on_launch=False, has_nat_gateway=False, default_gateway=Gateways.INTERNET_GATEWAY + ) + existing_vpc = VPCConfig(subnets=[public_subnet]) + template = NetworkTemplateBuilder(vpc_configuration=existing_vpc, existing_vpc=True).build() + _write_json_to_file(template, path) + + +def generate_public_private_template(path): + """ + Generate a template for a network configuration with one public subnet and one private subnet with NAT. + + The generated template will obligatory ask for both CIDR of the public subnet and the private one. It will also ask + for the vpc-id. If the vpc has already an internet gateway, you must pass it as a parameter. You can optionally + specify the availability zone if you need a specific one. + :param path: the path in which write the file + """ + public_subnet = SubnetConfig( + name="Public", map_public_ip_on_launch=True, has_nat_gateway=True, default_gateway=Gateways.INTERNET_GATEWAY + ) + private_subnet = SubnetConfig( + name="Private", map_public_ip_on_launch=False, has_nat_gateway=False, default_gateway=Gateways.NAT_GATEWAY + ) + existing_vpc = VPCConfig(subnets=[public_subnet, private_subnet]) + template = NetworkTemplateBuilder(vpc_configuration=existing_vpc, existing_vpc=True).build() + _write_json_to_file(template, path) + + +def _write_json_to_file(template, path): + with open(path, "w+") as file: + file.write(template.to_json()) + + +if __name__ == "__main__": + generate_public_private_template("public-private.cfn.json") + generate_public_template("public.cfn.json") diff --git a/util/generate-raid-substack.py b/util/cfn-stacks-generators/generate-raid-substack.py similarity index 100% rename from util/generate-raid-substack.py rename to util/cfn-stacks-generators/generate-raid-substack.py From ddd24d942d200afafdc444490428c1d0370362af Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Wed, 3 Jul 2019 09:43:29 +0200 Subject: [PATCH 013/201] Created Integration tests for Networking cloudformation Signed-off-by: Matteo Fiordarancio --- tests/integration-tests/README.md | 4 +- tests/integration-tests/cfn_stacks_factory.py | 18 +- tests/integration-tests/conftest.py | 2 +- .../network_template_builder.py | 22 ++- .../tests/networking/test_networking.py | 173 ++++++++++++++++++ 5 files changed, 205 insertions(+), 14 deletions(-) create mode 100644 tests/integration-tests/tests/networking/test_networking.py diff --git a/tests/integration-tests/README.md b/tests/integration-tests/README.md index 9aeae5a68d..d02a25e756 100644 --- a/tests/integration-tests/README.md +++ b/tests/integration-tests/README.md @@ -450,7 +450,7 @@ vpc = VPCConfig( ) ``` -Behind the scenes a CloudFormation template is dynamically generated by the `VPCTemplateBuilder` +Behind the scenes a CloudFormation template is dynamically generated by the `NetworkTemplateBuilder` (leveraging a tool called [Troposphere](https://github.com/cloudtools/troposphere)) and a VPC is created in each region under test by the `vpc_stacks` autouse session fixture. @@ -542,7 +542,7 @@ An example is given by this piece of code that handles the creation of a test VP @pytest.fixture(autouse=True) def vpc(cfn_stacks_factory): # ... lines removed - template = VPCTemplateBuilder(vpc_config).build() + template = NetworkTemplateBuilder(vpc_config).build() stack = CfnStack(name="integ-tests-vpc-" + random_alphanumeric(), region=region, template=template.to_json()) cfn_stacks_factory.create_stack(stack) return stack diff --git a/tests/integration-tests/cfn_stacks_factory.py b/tests/integration-tests/cfn_stacks_factory.py index aba6565919..25c39898ba 100644 --- a/tests/integration-tests/cfn_stacks_factory.py +++ b/tests/integration-tests/cfn_stacks_factory.py @@ -14,18 +14,20 @@ from botocore.exceptions import ClientError from retrying import retry -from utils import retrieve_cfn_outputs, set_credentials, unset_credentials +from utils import retrieve_cfn_outputs, set_credentials, unset_credentials, retrieve_cfn_resources class CfnStack: """Identify a CloudFormation stack.""" - def __init__(self, name, region, template): + def __init__(self, name, region, template, parameters=None): self.name = name self.region = region self.template = template + self.parameters = parameters or [] self.cfn_stack_id = None self.__cfn_outputs = None + self.__cfn_resources = None @property def cfn_outputs(self): @@ -37,6 +39,16 @@ def cfn_outputs(self): self.__cfn_outputs = retrieve_cfn_outputs(self.name, self.region) return self.__cfn_outputs + @property + def cfn_resources(self): + """ + Return the CloudFormation stack resources for the stack. + Resources are retrieved only once and then cached. + """ + if not self.__cfn_resources: + self.__cfn_resources = retrieve_cfn_resources(self.name, self.region) + return self.__cfn_resources + class CfnStacksFactory: """Manage creation and deletion of CloudFormation stacks.""" @@ -64,7 +76,7 @@ def create_stack(self, stack): self.__created_stacks[id] = stack try: cfn_client = boto3.client("cloudformation", region_name=region) - result = cfn_client.create_stack(StackName=name, TemplateBody=stack.template) + result = cfn_client.create_stack(StackName=name, TemplateBody=stack.template, Parameters=stack.parameters) stack.cfn_stack_id = result["StackId"] final_status = self.__wait_for_stack_creation(stack.cfn_stack_id, cfn_client) self.__assert_stack_status(final_status, "CREATE_COMPLETE") diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index 8a2492b825..adcec239f2 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -320,7 +320,7 @@ def cfn_stacks_factory(request): # FIXME: we need to find a better solution to this since AZs are independently mapped to names for each AWS account. # https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/using-regions-availability-zones.html -_AVAILABILITY_ZONE_OVERRIDES = { +AVAILABILITY_ZONE_OVERRIDES = { # c5.xlarge is not supported in us-east-1e # FSx Lustre file system creation is currently not supported for us-east-1e "us-east-1": ["us-east-1a", "us-east-1b", "us-east-1c", "us-east-1d", "us-east-1f"], diff --git a/tests/integration-tests/network_template_builder.py b/tests/integration-tests/network_template_builder.py index 0f72b295d0..f4fe4d9778 100644 --- a/tests/integration-tests/network_template_builder.py +++ b/tests/integration-tests/network_template_builder.py @@ -94,10 +94,13 @@ def __get_availability_zone(self, availability_zone): return availability_zone else: return Ref( - self.__add_parameter( - name="AvailabilityZone", - description="(Optional) The zone in which you want to create your subnet(s)", - expected_input_type="String", + self.__template.add_parameter( + Parameter( + "AvailabilityZone", + Description="(Optional) The zone in which you want to create your subnet(s)", + Type="String", + Default="", + ) ) ) @@ -152,10 +155,13 @@ def __build_internet_gateway(self, vpc: VPC): def __get_gateway_id(self): return Ref( - self.__add_parameter( - name="InternetGatewayId", - description="(Optional) The id of the gateway (will be created if not specified)", - expected_input_type="String", + self.__template.add_parameter( + Parameter( + "InternetGatewayId", + Description="(Optional) The id of the gateway (will be created if not specified)", + Type="String", + Default="", + ) ) ) diff --git a/tests/integration-tests/tests/networking/test_networking.py b/tests/integration-tests/tests/networking/test_networking.py new file mode 100644 index 0000000000..70f23304eb --- /dev/null +++ b/tests/integration-tests/tests/networking/test_networking.py @@ -0,0 +1,173 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import os + +import boto3 +import pytest + +from assertpy import assert_that +from cfn_stacks_factory import CfnStack, CfnStacksFactory +from conftest import AVAILABILITY_ZONE_OVERRIDES +from utils import random_alphanumeric + + +@pytest.fixture() +def networking_stack_factory(request): + """Define a fixture to manage the creation and destruction of CloudFormation stacks.""" + factory = CfnStacksFactory() + + def _create_network(region, template_path, parameters): + file_content = extract_template(template_path) + stack = CfnStack( + name="integ-tests-networking-" + random_alphanumeric(), + region=region, + template=file_content, + parameters=parameters, + ) + factory.create_stack(stack) + return stack + + def extract_template(template_path): + with open(template_path) as cfn_file: + file_content = cfn_file.read() + return file_content + + yield _create_network + factory.delete_all_stacks() + + +@pytest.fixture() +def vpc_stack(vpc_stacks, region): + return vpc_stacks[region] + + +def test_public_network_topology(region, vpc_stack, networking_stack_factory): + ec2_client = boto3.client("ec2", region_name=region) + vpc_id = vpc_stack.cfn_outputs["VpcId"] + public_subnet_cidr = "10.0.3.0/24" + availability_zone = AVAILABILITY_ZONE_OVERRIDES.get(region, "") + internet_gateway_id = vpc_stack.cfn_resources["InternetGateway"] + + parameters = _get_cfn_parameters( + availability_zone, internet_gateway_id=internet_gateway_id, vpc_id=vpc_id, public_cidr=public_subnet_cidr + ) + path = os.path.join("..", "..", "cloudformation", "networking_configuration", "public.cfn.json") + stack = networking_stack_factory(region, path, parameters) + + public_subnet_id = stack.cfn_outputs["PublicSubnetId"] + _assert_subnet_cidr(ec2_client, public_subnet_id, expected_subnet_cidr=public_subnet_cidr) + _assert_internet_gateway_id(ec2_client, vpc_id, expected_internet_gateway_id=internet_gateway_id) + _assert_internet_gateway_in_subnet_route(ec2_client, public_subnet_id, internet_gateway_id) + _assert_subnet_property( + region, public_subnet_id, expected_autoassign_ip_value=False, expected_availability_zone=availability_zone + ) + + +def test_public_private_network_topology(region, vpc_stack, networking_stack_factory): + ec2_client = boto3.client("ec2", region_name=region) + vpc_id = vpc_stack.cfn_outputs["VpcId"] + public_subnet_cidr = "10.0.5.0/24" + private_subnet_cidr = "10.0.4.0/24" + availability_zone = AVAILABILITY_ZONE_OVERRIDES.get(region, "") + internet_gateway_id = vpc_stack.cfn_resources["InternetGateway"] + + parameters = _get_cfn_parameters( + availability_zone, + internet_gateway_id=internet_gateway_id, + vpc_id=vpc_id, + public_cidr=public_subnet_cidr, + private_cidr=private_subnet_cidr, + ) + path = os.path.join("..", "..", "cloudformation", "networking_configuration", "public-private.cfn.json") + stack = networking_stack_factory(region, path, parameters) + + public_subnet_id = stack.cfn_outputs["PublicSubnetId"] + private_subnet_id = stack.cfn_outputs["PrivateSubnetId"] + _assert_subnet_cidr(ec2_client, public_subnet_id, expected_subnet_cidr=public_subnet_cidr) + _assert_subnet_cidr(ec2_client, private_subnet_id, expected_subnet_cidr=private_subnet_cidr) + _assert_internet_gateway_id(ec2_client, vpc_id, expected_internet_gateway_id=internet_gateway_id) + _assert_internet_gateway_in_subnet_route(ec2_client, public_subnet_id, internet_gateway_id) + _assert_subnet_property( + region, public_subnet_id, expected_autoassign_ip_value=True, expected_availability_zone=availability_zone + ) + _assert_subnet_property( + region, private_subnet_id, expected_autoassign_ip_value=False, expected_availability_zone=availability_zone + ) + _assert_nat_in_subnet(ec2_client, public_subnet_id) + _assert_nat_in_subnet_route(ec2_client, private_subnet_id) + + +def _get_cfn_parameters(availability_zone, internet_gateway_id, public_cidr, vpc_id, private_cidr=None): + """Create cloudformation-compatible stack parameter given the variables.""" + parameters = [ + {"ParameterKey": "AvailabilityZone", "ParameterValue": availability_zone}, + {"ParameterKey": "InternetGatewayId", "ParameterValue": internet_gateway_id}, + {"ParameterKey": "PublicCIDR", "ParameterValue": public_cidr}, + {"ParameterKey": "VpcId", "ParameterValue": vpc_id}, + ] + if private_cidr: + parameters.append({"ParameterKey": "PrivateCIDR", "ParameterValue": private_cidr}) + return parameters + + +def _assert_internet_gateway_in_subnet_route(ec2_client, subnet_id, expected_internet_gateway_id): + """ + Check that the given internet_gateway is associated with the route of the subnet. + + :param ec2_client: the boto3 client to which make requests + :param subnet_id: the subnet associated with the route we want to verify + :param expected_internet_gateway_id: the gateway we expect to find in the route + """ + response = ec2_client.describe_route_tables(Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]) + routes = response["RouteTables"][0]["Routes"] + # Routes[1] because 0 is always local + assert_that(routes[1]["DestinationCidrBlock"]).is_equal_to("0.0.0.0/0") + if expected_internet_gateway_id: + assert_that(routes[1]["GatewayId"]).is_equal_to(expected_internet_gateway_id) + + +def _assert_subnet_cidr(ec2_client, subnet_id, expected_subnet_cidr): + """Check that the given subnet has the same subnet cidr.""" + response = ec2_client.describe_subnets(Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]) + subnet_cidr = response["Subnets"][0]["CidrBlock"] + assert_that(subnet_cidr).is_equal_to(expected_subnet_cidr) + + +def _assert_internet_gateway_id(ec2_client, vpc_id, expected_internet_gateway_id): + """Check that the vpc contains the given internet gateway.""" + if expected_internet_gateway_id: + response = ec2_client.describe_internet_gateways(Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}]) + internet_gateway_id = response["InternetGateways"][0]["InternetGatewayId"] + assert_that(internet_gateway_id).is_equal_to(expected_internet_gateway_id) + + +def _assert_subnet_property(region, subnet_id, expected_autoassign_ip_value, expected_availability_zone=""): + """Check that the subnet has the given property.""" + subnet = boto3.resource("ec2", region_name=region).Subnet(subnet_id) + assert_that(subnet.map_public_ip_on_launch).is_equal_to(expected_autoassign_ip_value) + if expected_availability_zone: + assert_that(subnet.availability_zone).is_equal_to(expected_availability_zone) + + +def _assert_nat_in_subnet(ec2_client, subnet_id): + """Check that there is a nat in the given subnet.""" + response = ec2_client.describe_nat_gateways(Filters=[{"Name": "subnet-id", "Values": [subnet_id]}]) + assert_that(len(response["NatGateways"])).is_greater_than(0) + + +def _assert_nat_in_subnet_route(ec2_client, subnet_id): + """Check that the route of the given subnet contains a Nat Gateway.""" + response = ec2_client.describe_route_tables(Filters=[{"Name": "association.subnet-id", "Values": [subnet_id]}]) + routes = response["RouteTables"][0]["Routes"] + assert_that(next(route for route in routes if route["DestinationCidrBlock"] == "0.0.0.0/0")).contains( + "NatGatewayId" + ) From 95d7bb364f05746c3ab07198a6c30d18523aef69 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Fri, 21 Jun 2019 12:02:57 +0200 Subject: [PATCH 014/201] Updated stack names Signed-off-by: Matteo Fiordarancio --- .../public-private.cfn.json | 20 +++++-------------- .../networking_configuration/public.cfn.json | 12 +++-------- .../network_template_builder.py | 10 ++++++---- 3 files changed, 14 insertions(+), 28 deletions(-) diff --git a/cloudformation/networking_configuration/public-private.cfn.json b/cloudformation/networking_configuration/public-private.cfn.json index 7648c400ff..6a455c96bc 100644 --- a/cloudformation/networking_configuration/public-private.cfn.json +++ b/cloudformation/networking_configuration/public-private.cfn.json @@ -109,9 +109,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Ref": "AWS::StackName" - } + "Value": "ParallelClusterIG" }, { "Key": "Stack", @@ -167,9 +165,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Fn::Sub": "${AWS::StackName}PrivateSubnet" - } + "Value": "ParallelClusterPrivateSubnet" }, { "Key": "Stack", @@ -196,9 +192,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Fn::Sub": "${AWS::StackName}PublicSubnet" - } + "Value": "ParallelClusterPublicSubnet" }, { "Key": "Stack", @@ -240,9 +234,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Fn::Sub": "${AWS::StackName}RouteTablePrivate" - } + "Value": "ParallelClusterRouteTablePrivate" }, { "Key": "Stack", @@ -262,9 +254,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Fn::Sub": "${AWS::StackName}RouteTablePublic" - } + "Value": "ParallelClusterRouteTablePublic" }, { "Key": "Stack", diff --git a/cloudformation/networking_configuration/public.cfn.json b/cloudformation/networking_configuration/public.cfn.json index 23f9b6acd6..6376e2e3d3 100644 --- a/cloudformation/networking_configuration/public.cfn.json +++ b/cloudformation/networking_configuration/public.cfn.json @@ -99,9 +99,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Ref": "AWS::StackName" - } + "Value": "ParallelClusterIG" }, { "Key": "Stack", @@ -125,9 +123,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Fn::Sub": "${AWS::StackName}PublicSubnet" - } + "Value": "ParallelClusterPublicSubnet" }, { "Key": "Stack", @@ -158,9 +154,7 @@ "Tags": [ { "Key": "Name", - "Value": { - "Fn::Sub": "${AWS::StackName}RouteTablePublic" - } + "Value": "ParallelClusterRouteTablePublic" }, { "Key": "Stack", diff --git a/tests/integration-tests/network_template_builder.py b/tests/integration-tests/network_template_builder.py index f4fe4d9778..33ea1c5110 100644 --- a/tests/integration-tests/network_template_builder.py +++ b/tests/integration-tests/network_template_builder.py @@ -12,7 +12,7 @@ from enum import Enum, auto from typing import List, NamedTuple -from troposphere import Equals, GetAtt, If, Not, Output, Parameter, Ref, Sub, Tags, Template +from troposphere import Equals, GetAtt, If, Not, Output, Parameter, Ref, Tags, Template from troposphere.ec2 import ( EIP, VPC, @@ -25,6 +25,8 @@ VPCGatewayAttachment, ) +PREPENDNAME = "ParallelCluster" + class Gateways(Enum): """Define gateways to use for default traffic in a subnet.""" @@ -45,7 +47,7 @@ class SubnetConfig(NamedTuple): def tags(self): """Get the tags for the subnet""" - return Tags(Name=Sub("${AWS::StackName}" + self.name + "Subnet"), Stack=Ref("AWS::StackId")) + return Tags(Name=PREPENDNAME + self.name + "Subnet", Stack=Ref("AWS::StackId")) class VPCConfig(NamedTuple): @@ -139,7 +141,7 @@ def __build_internet_gateway(self, vpc: VPC): internet_gateway = self.__template.add_resource( InternetGateway( "InternetGateway", - Tags=Tags(Name=Ref("AWS::StackName"), Stack=Ref("AWS::StackId")), + Tags=Tags(Name=PREPENDNAME + "IG", Stack=Ref("AWS::StackId")), Condition=self.__create_ig, ) ) @@ -210,7 +212,7 @@ def __build_route_table( RouteTable( "RouteTable" + subnet_config.name, VpcId=Ref(vpc), - Tags=Tags(Name=Sub("${AWS::StackName}RouteTable" + subnet_config.name), Stack=Ref("AWS::StackId")), + Tags=Tags(Name=PREPENDNAME + "RouteTable" + subnet_config.name, Stack=Ref("AWS::StackId")), ) ) self.__template.add_resource( From a6854bca752282144bdc296da16bf4f5e771f7bb Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Fri, 21 Jun 2019 10:37:46 +0200 Subject: [PATCH 015/201] Create a describe cidr function that find a suitable subnet in vpc Signed-off-by: Matteo Fiordarancio --- cli/.flake8 | 1 + cli/pcluster/utils.py | 126 +++++++++++++++++++++++++ cli/setup.py | 2 +- cli/tests/pcluster/test_subnet_cidr.py | 81 ++++++++++++++++ 4 files changed, 209 insertions(+), 1 deletion(-) create mode 100644 cli/tests/pcluster/test_subnet_cidr.py diff --git a/cli/.flake8 b/cli/.flake8 index 26265ce3ca..37ac11bacb 100644 --- a/cli/.flake8 +++ b/cli/.flake8 @@ -18,6 +18,7 @@ per-file-ignores = pcluster/easyconfig.py: E402, D103 pcluster/cfnconfig.py: E402, D103 tests/pcluster/pcluster-unittest.py: D101, D102, D103 + tests/pcluster/test_*.py: D101, D102, D103 tests/awsbatch/test_*.py: D101, D102 ../tests/integration-tests/tests/*: D103 ../tests/integration-tests/*: D205, D400, D401 diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index 21fea24370..7dd62891db 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -12,8 +12,11 @@ import json import os +import socket +import struct import zipfile from io import BytesIO +from ipaddress import ip_address, ip_network, summarize_address_range import boto3 from botocore.exceptions import ClientError @@ -199,3 +202,126 @@ def get_supported_schedulers(): :return: a tuple of strings of the supported scheduler """ return "sge", "torque", "slurm", "awsbatch" + + +def next_power_of_2(x): + """Given a number returns the following power of 2 of that number.""" + return 1 if x == 0 else 2 ** (x - 1).bit_length() + + +def get_subnet_cidr(vpc_cidr, occupied_cidr, max_queue_size): + """ + Decide the parallelcluster subnet size of the compute fleet. + :param vpc_cidr: the vpc_cidr in which the suitable subnet should be + :param occupied_cidr: a list of cidr of the already occupied subnets in the vpc + :param max_queue_size: the max nodes / vcpus that the user has set + :return: + """ + target_size = max(4000, 2 * max_queue_size) + cidr = decide_cidr(vpc_cidr, occupied_cidr, target_size) + while cidr is None: + if target_size < max_queue_size: + return None + target_size = target_size // 2 + cidr = decide_cidr(vpc_cidr, occupied_cidr, target_size) + return cidr + + +# This code is complex, get ready +def decide_cidr(vpc_cidr, occupied_cidr, target_size): + """ + Decide the smallest suitable CIDR for a subnet with size >= target_size. + + :param vpc_cidr: the vpc_cidr in which the suitable subnet should be + :param occupied_cidr: a list of cidr of the already occupied subnets in the vpc + :param target_size: the minimum target size of the subnet + :return: the suitable CIDR if found, else None + """ + # How the algorithm works: If we want to find a suitable CIDR inside a vpc with already some subnet inside, we first + # have to check wheter the size of the subnet we want to create is greater than the minimum Cidr (/16, /24, ecc...). + # If it is, we have to transform all the occupied_cidr into subnets that have at least the cidr of the instance we + # want to allocate. To do that, we use _promote_cidrs(). + # + # Why doing that? + # + # Well, the function summarize_address_range() returns a iterator of all the cidr needed to encapsulate the given + # begin ip and the end ip strictly. So for example, from 10.0.0.0 to 10.0.1.1, the function will return[10.0.0.0/24, + # 10.0.1.0/31]. We therefore need to give to that function an ip range that can be compressed in just one cidr. In + # order to do that, we basically expand all the cidr and then eliminate all the duplicates. + + # Once we have the target cidr (which is 32 - the power of 2 that is equal to subnet_size ) to be the minimum + # of all the occupied_cidr, we create a list of tuple (beginip, endip) that are sorted by endip. We then compare + # each beginip with the endip of the previous one looking for a space greater than the one of subnet_size. + # If we found it, we convert it to a cidr using the summarize_address_range() function. + # Function cost: O(nlogn), where n is the size of occupied cidr + # Understanding cost: O(over9000) + aws_reserved_ip = 6 + min_bitmask_length = 28 + target_bitmask_length = min( + 32 - ((next_power_of_2(target_size + aws_reserved_ip) - 1).bit_length()), min_bitmask_length + ) + subnet_size = 2 ** (32 - target_bitmask_length) + vpc_begin_address_decimal, vpc_end_address_decimal = _get_cidr_limits_as_decimal(vpc_cidr) + + if vpc_end_address_decimal - vpc_begin_address_decimal + 1 < subnet_size: # if we do not have enough space + return None + + if not occupied_cidr: # if we have space and no occupied cidr + return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size) + + occupied_cidr_max_bitmask = max([int(subnet_cidr.split("/")[1]) for subnet_cidr in occupied_cidr]) + if occupied_cidr_max_bitmask > target_bitmask_length: + # This means that it's smaller, so we need to make it bigger + occupied_cidr = _expand_cidrs(occupied_cidr, min_size=target_bitmask_length) + + # subnets_number is a list of pair(begin ip, end ip) obtained from the cidr. So for example + # 10.0.0.0/17 = 10.0.0.0, 10.0.127.255 + begin_ip_index = 0 + end_ip_index = 1 + subnets_limits = [_get_cidr_limits_as_decimal(subnet) for subnet in occupied_cidr] + subnets_limits.sort(key=lambda x: x[1]) # sort by ending numbers, sorting by beginning is the same + # to check for space between the last occupied and the end of the vpc + subnets_limits.append((vpc_end_address_decimal, vpc_end_address_decimal)) + + if (subnets_limits[0][begin_ip_index] - vpc_begin_address_decimal) >= subnet_size: + return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size) + + # Looking at space between occupied cidrs + for index in range(1, len(subnets_limits)): + begin_number = subnets_limits[index][begin_ip_index] + end_previous_number = subnets_limits[index - 1][end_ip_index] + if begin_number - end_previous_number > subnet_size: + return _decimal_ip_limits_to_cidr(end_previous_number + 1, end_previous_number + subnet_size) + return None + + +def _decimal_ip_limits_to_cidr(begin, end): + """Given begin and end ip (as decimals number), return the CIDR that begins with begin ip and ends with end ip.""" + return str( + summarize_address_range( + ip_address(socket.inet_ntoa(struct.pack("!L", begin))), ip_address(socket.inet_ntoa(struct.pack("!L", end))) + ).__next__() + ) + + +def _get_cidr_limits_as_decimal(cidr): + """Given a cidr, return the begin ip and the end ip as decimal.""" + address = ip_network(cidr) + return _ip_to_decimal(str(address[0])), _ip_to_decimal(str(address[-1])) + + +def _ip_to_decimal(ip): + """Transform an ip into its decimal representantion.""" + return int(bin(struct.unpack("!I", socket.inet_aton(ip))[0]), 2) + + +def _expand_cidrs(occupied_cidrs, min_size): + """Given a list of cidrs, it upgrade the netmask of each one to min_size and returns the updated cidrs.""" + new_cidrs = set() + for cidr in occupied_cidrs: + if int(cidr.split("/")[1]) > min_size: + ip_addr = ip_network(u"{0}".format(cidr)) + new_cidrs.add(str(ip_addr.supernet(new_prefix=min_size))) + else: + new_cidrs.add(cidr) + return list(new_cidrs) diff --git a/cli/setup.py b/cli/setup.py index ae66026763..6bf12f0017 100644 --- a/cli/setup.py +++ b/cli/setup.py @@ -22,7 +22,7 @@ def readme(): VERSION = "2.4.0" -REQUIRES = ["boto3>=1.9.54", "future>=0.16.0,<=0.17.1", "tabulate>=0.8.2,<=0.8.3"] +REQUIRES = ["boto3>=1.9.54", "future>=0.16.0,<=0.17.1", "tabulate>=0.8.2,<=0.8.3", "ipaddress"] if sys.version_info[:2] == (2, 6): # For python2.6 we have to require argparse since it diff --git a/cli/tests/pcluster/test_subnet_cidr.py b/cli/tests/pcluster/test_subnet_cidr.py new file mode 100644 index 0000000000..65ba97e681 --- /dev/null +++ b/cli/tests/pcluster/test_subnet_cidr.py @@ -0,0 +1,81 @@ +from assertpy import assert_that +from pcluster.utils import decide_cidr, get_subnet_cidr + + +def test_empty_vpc(): + assert_that(decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=[], target_size=250)).is_equal_to("10.0.0.0/24") + assert_that(decide_cidr(vpc_cidr="10.0.0.0/8", occupied_cidr=[], target_size=250)).is_equal_to("10.0.0.0/24") + assert_that(decide_cidr(vpc_cidr="10.2.0.0/16", occupied_cidr=[], target_size=250)).is_equal_to("10.2.0.0/24") + assert_that(decide_cidr(vpc_cidr="10.2.0.0/25", occupied_cidr=[], target_size=500)).is_none() + assert_that(decide_cidr(vpc_cidr="10.2.0.0/25", occupied_cidr=[], target_size=100)).is_equal_to("10.2.0.0/25") + + +def test_no_space_vpc(): + assert_that(decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/16"], target_size=1)).is_none() + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/17", "10.0.128.0/17"], target_size=1) + ).is_none() + assert_that( + decide_cidr( + vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], target_size=16385 + ) + ).is_none() + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.128.0/17"], target_size=16385) + ).is_none() + + +def test_space_needed_bigger_than_allocated_subnets(): + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.3.0/24"], target_size=500) + ).is_equal_to("10.0.4.0/23") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.4.0/24"], target_size=500) + ).is_equal_to("10.0.2.0/23") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.4.0/24"], target_size=1000) + ).is_equal_to("10.0.8.0/22") + + +def test_space_needed_smaller_than_allocated_subnets(): + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.3.0/24"], target_size=250) + ).is_equal_to("10.0.0.0/24") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.2.0/24"], target_size=250) + ).is_equal_to("10.0.0.0/24") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.3.0/24", "10.0.2.0/24"], target_size=250) + ).is_equal_to("10.0.0.0/24") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.4.0/24", "10.0.2.0/24"], target_size=250) + ).is_equal_to("10.0.0.0/24") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.4.0/24", "10.0.2.0/24"], target_size=120) + ).is_equal_to("10.0.0.0/25") + assert_that( + decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.0.0/24"], target_size=120) + ).is_equal_to("10.0.2.0/25") + + +def test_get_subnet_cidr(): + assert_that( + get_subnet_cidr( + vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], max_queue_size=17000 + ) + ).is_none() + assert_that( + get_subnet_cidr( + vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], max_queue_size=100 + ) + ).is_equal_to("10.0.192.0/20") + assert_that( + get_subnet_cidr( + vpc_cidr="10.0.0.0/16", + occupied_cidr=["10.0.0.0/19", "10.0.32.0/20", "10.0.48.0/21", "10.0.64.0/18", "10.0.128.0/17"], + max_queue_size=100, + ) + ).is_equal_to("10.0.56.0/21") + assert_that( + get_subnet_cidr("10.0.0.0/16", ["10.0.0.0/24"], 256) + ).is_equal_to("10.0.16.0/20") From 90b1b384a84961a799d2c666939f4a1fa9d6edbe Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Wed, 19 Jun 2019 10:54:28 +0200 Subject: [PATCH 016/201] Automated Vpc and network creation Signed-off-by: Matteo Fiordarancio --- cli/pcluster/easyconfig/__init__.py | 10 + cli/pcluster/{ => easyconfig}/easyconfig.py | 367 ++++++++---------- .../easyconfig/easyconfig_networking.py | 257 ++++++++++++ cli/pcluster/easyconfig/easyconfig_utils.py | 130 +++++++ 4 files changed, 555 insertions(+), 209 deletions(-) create mode 100644 cli/pcluster/easyconfig/__init__.py rename cli/pcluster/{ => easyconfig}/easyconfig.py (55%) create mode 100644 cli/pcluster/easyconfig/easyconfig_networking.py create mode 100644 cli/pcluster/easyconfig/easyconfig_utils.py diff --git a/cli/pcluster/easyconfig/__init__.py b/cli/pcluster/easyconfig/__init__.py new file mode 100644 index 0000000000..1d44f1e10c --- /dev/null +++ b/cli/pcluster/easyconfig/__init__.py @@ -0,0 +1,10 @@ +# Copyright 2013-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance +# with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. diff --git a/cli/pcluster/easyconfig.py b/cli/pcluster/easyconfig/easyconfig.py similarity index 55% rename from cli/pcluster/easyconfig.py rename to cli/pcluster/easyconfig/easyconfig.py index 926ec641a4..7d20125412 100644 --- a/cli/pcluster/easyconfig.py +++ b/cli/pcluster/easyconfig/easyconfig.py @@ -10,91 +10,51 @@ # limitations under the License. # fmt: off from __future__ import absolute_import, print_function # isort:skip -from future import standard_library # isort:skip -standard_library.install_aliases() -# fmt: on import copy import errno -import functools import logging import os import stat -import sys import tempfile -from builtins import input import boto3 import configparser -from botocore.exceptions import BotoCoreError, ClientError -from pcluster.utils import get_supported_os, get_supported_schedulers +from pcluster import cfnconfig +from pcluster.easyconfig.easyconfig_networking import ( + _choose_network_configuration, + automate_creation_of_subnet, + automate_creation_of_vpc_and_subnet, + ec2_conn, + handle_client_exception, +) +from pcluster.easyconfig.easyconfig_utils import _prompt_a_list, _prompt_a_list_of_tuple, prompt +from pcluster.utils import get_subnet_cidr, get_supported_os, get_supported_schedulers + +from future import standard_library # isort:skip + + +standard_library.install_aliases() +# fmt: on -from . import cfnconfig -logger = logging.getLogger("pcluster.pcluster") +LOGGER = logging.getLogger("pcluster.pcluster") unsupported_regions = ["ap-northeast-3"] DEFAULT_VALUES = { - "cluster_template": "default", "aws_region_name": "us-east-1", + "cluster_template": "default", "scheduler": "sge", "os": "alinux", "max_queue_size": "10", "master_instance_type": "t2.micro", "compute_instance_type": "t2.micro", "vpc_name": "public", - "initial_size": "1", + "min_size": "0", } FORCED_BATCH_VALUES = {"os": "alinux", "compute_instance_type": "optimal"} - - -def handle_client_exception(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except (BotoCoreError, ClientError) as e: - print("Failed with error: %s" % e) - print("Hint: please check your AWS credentials.") - print("Run `aws configure` or set the credentials as environment variables.") - sys.exit(1) - - return wrapper - - -def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, default_value=None, options_to_print=None): - """ - Prompt the user a message with optionally some options. - - :param message: the message to show to the user - :param validator: a function that predicates if the input is correct - :param input_to_option: a function that given the input transforms it in something else - :param default_value: the value to return as the default if the user doesn't insert anything - :param options_to_print: the options to print if necessary - :return: the value inserted by the user validated - """ - if options_to_print: - print("Allowed values for {0}:".format(message)) - for item in options_to_print: - print(item) - user_prompt = "{0} [{1}]: ".format(message, default_value or "") - - valid_user_input = False - result = default_value - # We give the user the possibility to try again if wrong - while not valid_user_input: - user_input = input(user_prompt).strip() - if user_input == "": - result = default_value - valid_user_input = True - else: - result = input_to_option(user_input) - if validator(result): - valid_user_input = True - else: - print("ERROR: {0} is not an acceptable value for {1}".format(user_input, message)) - return result +VPC_PARAMETERS_TO_REMOVE = "vpc-id", "master_subnet_id", "compute_subnet_id", "use_public_ips", "compute_subnet_cidr" @handle_client_exception @@ -104,61 +64,67 @@ def get_regions(): return [region.get("RegionName") for region in regions if region.get("RegionName") not in unsupported_regions] -def _evaluate_aws_region(aws_region_name): - if aws_region_name: - region = aws_region_name - elif os.environ.get("AWS_DEFAULT_REGION"): - region = os.environ.get("AWS_DEFAULT_REGION") - else: - region = DEFAULT_VALUES["aws_region_name"] - return region - - -@handle_client_exception -def ec2_conn(aws_region_name): - region = _evaluate_aws_region(aws_region_name) - ec2 = boto3.client("ec2", region_name=region) - return ec2 - - def extract_tag_from_resource(resource, tag_name): tags = resource.get("Tags", []) return next((item.get("Value") for item in tags if item.get("Key") == tag_name), None) -def _list_resources(resources, resource_name, resource_id_name): - """Return a list of tuple containing the id of the resource and the name of it.""" - resource_options = [] - for resource in resources.get(resource_name): - keyid = resource.get(resource_id_name) - name = extract_tag_from_resource(resource, tag_name="Name") - resource_options.append((keyid, name)) if name else resource_options.append((keyid,)) - - return resource_options - - @handle_client_exception def _list_keys(aws_region_name): - """Return a list of keys as a list of tuple of type (key-name,).""" + """Return a list of keys.""" conn = ec2_conn(aws_region_name) keypairs = conn.describe_key_pairs() - return _list_resources(keypairs, "KeyPairs", "KeyName") + key_options = [] + for resource in keypairs.get("KeyPairs"): + keyid = resource.get("KeyName") + key_options.append(keyid) + + if not key_options: + print( + "No KeyPair found in region {0}, please create one following the guide: " + "https://docs.aws.amazon.com/AWSEC2/latest/UserGuide/ec2-key-pairs.html".format(aws_region_name) + ) + return key_options -@handle_client_exception -def _list_vpcs(aws_region_name): - """Return a list of vpcs as a list of tuple of type (vpc-id, vpc-name (if present)).""" - conn = ec2_conn(aws_region_name) - vpcs = conn.describe_vpcs() - return _list_resources(vpcs, "Vpcs", "VpcId") + +def extract_subnet_size(cidr): + return 2 ** (32 - int(cidr.split("/")[1])) @handle_client_exception -def _list_subnets(aws_region_name, vpc_id): - """Return a list of subnet as a list of tuple of type (subnet-id, subnet-name (if present)).""" +def _list_vpcs_and_subnets(aws_region_name): + """ + Return a dictionary containg a list of vpc in the given region and the associated vpcs. + + Example: + + {"vpc_list": list(tuple(vpc-id, name, number of subnets)) , + "vpc_to_subnet" : {"vpc-id1": list(tuple(subnet-id1, name)), "vpc-id2": list(tuple(subnet-id1, name))}} + :param aws_region_name: the region name + """ conn = ec2_conn(aws_region_name) - subnets = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}]) - return _list_resources(subnets, "Subnets", "SubnetId") + vpcs = conn.describe_vpcs() + vpc_options = [] + vpc_to_subnets = {} + for vpc in vpcs.get("Vpcs"): + vpc_id = vpc.get("VpcId") + subnet_options = [] + subnet_list = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}]).get("Subnets") + for subnet in subnet_list: + subnet_id = subnet.get("SubnetId") + subnet_size_string = "Subnet size: {0}".format(extract_subnet_size(subnet.get("CidrBlock"))) + name = extract_tag_from_resource(subnet, tag_name="Name") + if name: + subnet_options.append((subnet_id, name, subnet_size_string)) + else: + subnet_options.append((subnet_id, subnet_size_string)) + name = extract_tag_from_resource(vpc, tag_name="Name") + vpc_to_subnets[vpc_id] = subnet_options + subnets_number = "{0} subnets inside".format(len(subnet_list)) + vpc_options.append((vpc_id, name, subnets_number)) if name else vpc_options.append((vpc_id, subnets_number)) + + return {"vpc_list": vpc_options, "vpc_to_subnets": vpc_to_subnets} @handle_client_exception @@ -179,16 +145,9 @@ def configure(args): # noqa: C901 FIXME!!! config.read(config_file) # Prompt for required values, using existing as defaults - cluster_template = prompt( - "Cluster configuration label", - default_value=get_config_parameter( - config, - section="global", - parameter_name="cluster_template", - default_value=DEFAULT_VALUES["cluster_template"], - ), - ) + cluster_template = DEFAULT_VALUES["cluster_template"] cluster_label = "cluster " + cluster_template + vpc_label = "vpc " + cluster_template # Use built in boto regions as an available option aws_region_name = _prompt_a_list( @@ -220,9 +179,17 @@ def configure(args): # noqa: C901 FIXME!!! ), ) - max_queue_size = prompt( - "Max Queue Size", + min_queue_size = prompt( + "Minimum cluster size ({0})".format(scheduler_info["instance_size_name"]), validator=lambda x: x.isdigit(), + default_value=get_config_parameter( + config, cluster_label, scheduler_info["min_size"], DEFAULT_VALUES["min_size"] + ), + ) + + max_queue_size = prompt( + "Maximum cluster size ({0})".format(scheduler_info["instance_size_name"]), + validator=lambda x: x.isdigit() and int(x) >= int(min_queue_size), default_value=get_config_parameter( config, cluster_label, scheduler_info["max_size"], DEFAULT_VALUES["max_queue_size"] ), @@ -248,18 +215,12 @@ def configure(args): # noqa: C901 FIXME!!! default_value=DEFAULT_VALUES["compute_instance_type"], ) - vpc_name = prompt( - "VPC configuration label", - default_value=get_config_parameter( - config, section=cluster_label, parameter_name="vpc_settings", default_value=DEFAULT_VALUES["vpc_name"] - ), - ) - vpc_label = "vpc " + vpc_name - - key_name = _prompt_a_list_of_tuple("Key Name", _list_keys(aws_region_name)) - vpc_id = _prompt_a_list_of_tuple("VPC ID", _list_vpcs(aws_region_name)) - master_subnet_id = _prompt_a_list_of_tuple("Master Subnet ID", _list_subnets(aws_region_name, vpc_id)) + key_name = _prompt_a_list("EC2 Key Pair Name", _list_keys(aws_region_name)) + automate_vpc = prompt("Automate VPC creation? (y/n)", lambda x: x == "y" or x == "n", default_value="n") == "y" + vpc_parameters = _create_vpc_parameters( + vpc_label, aws_region_name, scheduler, max_queue_size, automatized_vpc=automate_vpc + ) global_parameters = { "__name__": "global", "cluster_template": cluster_template, @@ -270,22 +231,25 @@ def configure(args): # noqa: C901 FIXME!!! cluster_parameters = { "__name__": cluster_label, "key_name": key_name, - "vpc_settings": vpc_name, + "vpc_settings": cluster_template, "scheduler": scheduler, "base_os": operating_system, "compute_instance_type": compute_instance_type, "master_instance_type": master_instance_type, scheduler_info["max_size"]: max_queue_size, - scheduler_info["initial_size"]: DEFAULT_VALUES["initial_size"], + scheduler_info["min_size"]: min_queue_size, } + if scheduler_info["value_for_initial_size"] == "min_size": + cluster_parameters[scheduler_info["initial_size_parameter_name"]] = min_queue_size + else: + cluster_parameters[scheduler_info["initial_size_parameter_name"]] = scheduler_info["value_for_initial_size"] + aliases_parameters = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} - vpc_parameters = {"__name__": vpc_label, "vpc_id": vpc_id, "master_subnet_id": master_subnet_id} sections = [aws_parameters, cluster_parameters, vpc_parameters, global_parameters, aliases_parameters] # We first remove unnecessary parameters from the past configurations - if config.has_section(cluster_label): - for par in scheduler_info["parameters_to_remove"]: - config.remove_option(cluster_label, par) + _remove_parameter_from_past_configuration(cluster_label, config, scheduler_info["parameters_to_remove"]) + _remove_parameter_from_past_configuration(vpc_label, config, VPC_PARAMETERS_TO_REMOVE) # Loop through the configuration sections we care about for section in sections: @@ -306,16 +270,70 @@ def configure(args): # noqa: C901 FIXME!!! if e.errno != errno.EEXIST: raise # can safely ignore EEXISTS for this purpose... - if not _is_config_valid(args, config): - sys.exit(1) - - # If we are here, than the file it's correct and we can override it. # Write configuration to disk open(config_file, "a").close() os.chmod(config_file, stat.S_IRUSR | stat.S_IWUSR) with open(config_file, "w") as cf: config.write(cf) + if _is_config_valid(args, config): + print("The configuration is valid") + + +def _remove_parameter_from_past_configuration(section, config, parameters_to_remove): + if config.has_section(section): + for par in parameters_to_remove: + config.remove_option(section, par) + + +def _create_vpc_parameters(vpc_label, aws_region_name, scheduler, max_queue_size, automatized_vpc=True): + vpc_parameters = {"__name__": vpc_label} + max_queue_size = int(max_queue_size) + if automatized_vpc: + vpc_parameters.update( + automate_creation_of_vpc_and_subnet( + aws_region_name, + _choose_network_configuration(scheduler), + max_queue_size, + ) + ) + else: + vpc_and_subnets = _list_vpcs_and_subnets(aws_region_name) + vpc_list = vpc_and_subnets["vpc_list"] + if not vpc_list: + print("There are no VPC for the given region. Starting automatic creation of vpc and subnets...") + vpc_parameters.update( + automate_creation_of_vpc_and_subnet( + aws_region_name, _choose_network_configuration(scheduler), max_queue_size + ) + ) + else: + vpc_id = _prompt_a_list_of_tuple("VPC ID", vpc_list) + vpc_parameters["vpc_id"] = vpc_id + subnet_list = vpc_and_subnets["vpc_to_subnets"][vpc_id] + if not subnet_list or ( + prompt("Automate Subnet creation? (y/n)", lambda x: x == "y" or x == "n", default_value="y") == "y" + ): + vpc_parameters.update( + automate_creation_of_subnet( + aws_region_name, vpc_id, _choose_network_configuration(scheduler), max_queue_size + ) + ) + else: + vpc_parameters.update(_ask_for_subnets(subnet_list)) + return vpc_parameters + + +def _ask_for_subnets(subnet_list): + master_subnet_id = _prompt_a_list_of_tuple("Master Subnet ID", subnet_list) + compute_subnet_id = _prompt_a_list_of_tuple("Compute Subnet ID", subnet_list, default_value=master_subnet_id) + vpc_parameters = {"master_subnet_id": master_subnet_id} + + if master_subnet_id != compute_subnet_id: + vpc_parameters["compute_subnet_id"] = compute_subnet_id + + return vpc_parameters + def _is_config_valid(args, config): """ @@ -372,84 +390,15 @@ def scheduler_handler(scheduler): "compute_instance_type", ) scheduler_info["max_size"] = "max_vcpus" - scheduler_info["initial_size"] = "desired_vcpus" + scheduler_info["min_size"] = "min_vcpus" + scheduler_info["initial_size_parameter_name"] = "desired_vcpus" + scheduler_info["value_for_initial_size"] = "min_size" + scheduler_info["instance_size_name"] = "vcpus" else: scheduler_info["parameters_to_remove"] = ("max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type") scheduler_info["max_size"] = "max_queue_size" - scheduler_info["initial_size"] = "initial_queue_size" + scheduler_info["min_size"] = "initial_queue_size" + scheduler_info["initial_size_parameter_name"] = "maintain_initial_size" + scheduler_info["value_for_initial_size"] = "true" + scheduler_info["instance_size_name"] = "instances" return scheduler_info - - -def _prompt_a_list(message, options, default_value=None): - """ - Wrap prompt to use it for list. - - :param message: the message to show the user - :param options: the list of item to show the user - :param default_value: the default value - :return: the validate value - """ - if not options: - print("ERROR: No options found for {0}".format(message)) - sys.exit(1) - if not default_value: - default_value = options[0] - - def input_to_parameter(to_transform): - try: - item = options[int(to_transform) - 1] - except ValueError: - item = to_transform - return item - - return prompt( - message, - validator=lambda x: x in options, - input_to_option=lambda x: input_to_parameter(x), - default_value=default_value, - options_to_print=_to_printable_list(options), - ) - - -def _prompt_a_list_of_tuple(message, options, default_value=None): - """ - Wrap prompt to use it over a list of tuple. - - The correct item will be the first element of each tuple. - :param message: the message to show to the user - :param options: the list of tuple - :param default_value: the default value - :return: the validated value - """ - if not options: - print("ERROR: No options found for {0}".format(message)) - sys.exit(1) - if not default_value: - default_value = options[0][0] - - def input_to_parameter(to_transform): - try: - item = options[int(to_transform) - 1][0] - except ValueError: - item = to_transform - return item - - valid_options = [item[0] for item in options] - - return prompt( - message, - validator=lambda x: x in valid_options, - input_to_option=lambda x: input_to_parameter(x), - default_value=default_value, - options_to_print=_to_printable_list(options), - ) - - -def _to_printable_list(items): - output = [] - for iterator, item in enumerate(items, start=1): - if isinstance(item, (list, tuple)): - output.append("{0}. {1}".format(iterator, " | ".join(item))) - else: - output.append("{0}. {1}".format(iterator, item)) - return output diff --git a/cli/pcluster/easyconfig/easyconfig_networking.py b/cli/pcluster/easyconfig/easyconfig_networking.py new file mode 100644 index 0000000000..d0baab04a9 --- /dev/null +++ b/cli/pcluster/easyconfig/easyconfig_networking.py @@ -0,0 +1,257 @@ +from future.backports import datetime + +import functools +import logging +import os +import sys +import time + +import boto3 +from botocore.exceptions import BotoCoreError, ClientError + +from pcluster.easyconfig.easyconfig_utils import _prompt_a_list +from pcluster.networking.vpc_factory import VpcFactory +from pcluster.utils import decide_cidr, get_subnet_cidr + +DEFAULT_AWS_REGION_NAME = "us-east-1" +LOGGER = logging.getLogger("pcluster.pcluster") +TIMESTAMP = "-{:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow()) +PUBLIC_PRIVATE_CONFIG_NAME = "public-private-with-nat" +PUBLIC_CONFIG_NAME = "public-only" +NUMBER_OF_IP_MASTER_SUBNET = 250 + + +def handle_client_exception(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except (BotoCoreError, ClientError) as e: + LOGGER.error("Failed with error: %s" % e) + LOGGER.error("Hint: please check your AWS credentials.") + LOGGER.error("Run `aws configure` or set the credentials as environment variables.") + sys.exit(1) + + return wrapper + + +class NetworkConfigurer: + """Create a NetworkConfigurer item that will be used by _create_network_configuration.""" + + def __init__( + self, aws_region_name, availability_zone, vpc_id, public_cidr="", internet_gateway_id="", private_cidr="" + ): + self.aws_region_name = aws_region_name + self.availability_zone = availability_zone + self.vpc_id = vpc_id + self.public_cidr = public_cidr + self.internet_gateway_id = internet_gateway_id + self.private_cidr = private_cidr + + def create_stack_parameters(self, also_private_cidr=False): + """Create cloudformation-compatible stack parameter given the variables.""" + parameters = [ + {"ParameterKey": "AvailabilityZone", "ParameterValue": self.availability_zone}, + {"ParameterKey": "InternetGatewayId", "ParameterValue": self.internet_gateway_id}, + {"ParameterKey": "PublicCIDR", "ParameterValue": self.public_cidr}, + {"ParameterKey": "VpcId", "ParameterValue": self.vpc_id}, + ] + if also_private_cidr: + parameters.append({"ParameterKey": "PrivateCIDR", "ParameterValue": self.private_cidr}) + return parameters + + +def _evaluate_aws_region(aws_region_name): + if aws_region_name: + region = aws_region_name + elif os.environ.get("AWS_DEFAULT_REGION"): + region = os.environ.get("AWS_DEFAULT_REGION") + else: + region = DEFAULT_AWS_REGION_NAME + return region + + +@handle_client_exception +def ec2_conn(aws_region_name): + region = _evaluate_aws_region(aws_region_name) + ec2 = boto3.client("ec2", region_name=region) + return ec2 + + +def automate_creation_of_vpc_and_subnet(aws_region_name, network_configuration, number_of_ip_for_compute): + print("Beginning creation of vpc. Please do not leave the terminal until the process has finish") + vpc_creator = VpcFactory(aws_region_name) + vpc_id = vpc_creator.create() + vpc_creator.setup(vpc_id, name="ParallelClusterVPC" + TIMESTAMP) + if not vpc_creator.check(vpc_id): + logging.critical("ERROR:Something went wrong in vpc creation. Please delete it and start the process again") + sys.exit(1) + + vpc_parameters = {"vpc_id": vpc_id} + vpc_parameters.update( + automate_creation_of_subnet(aws_region_name, vpc_id, network_configuration, number_of_ip_for_compute) + ) + return vpc_parameters + + +@handle_client_exception +def automate_creation_of_subnet(aws_region_name, vpc_id, network_configuration, number_of_ip_for_compute): + _check_the_vpc(aws_region_name, vpc_id) + ec2_client = ec2_conn(aws_region_name) + vpc_cidr = ec2_client.describe_vpcs(VpcIds=[vpc_id])["Vpcs"][0]["CidrBlock"] + internet_gateway_response = ec2_client.describe_internet_gateways( + Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}] + ) + internet_gateway_id = ( + internet_gateway_response["InternetGateways"][0]["InternetGatewayId"] + if internet_gateway_response["InternetGateways"] + else "" + ) + + configurer = NetworkConfigurer( + aws_region_name, _get_availability_zone(aws_region_name), vpc_id, internet_gateway_id=internet_gateway_id + ) + + possible_network_configuration = { + PUBLIC_PRIVATE_CONFIG_NAME: _create_public_private_with_nat_configuration, + PUBLIC_CONFIG_NAME: _create_public_configuration, + } + return possible_network_configuration[network_configuration]( + configurer, vpc_cidr, _get_subnets_in_vpc(aws_region_name, vpc_id), number_of_ip_for_compute + ) + + +def _create_public_configuration(configurer, vpc_cidr, subnet_cidrs, number_of_ip_for_compute): + configurer.public_cidr = get_subnet_cidr( + vpc_cidr=vpc_cidr, + occupied_cidr=subnet_cidrs, + max_queue_size=number_of_ip_for_compute + NUMBER_OF_IP_MASTER_SUBNET, + ) + _check_cidr(configurer.public_cidr) + template_name = "public.cfn.json" + stack_output = _create_network_configuration(template_name, configurer, also_private_cidr=False) + return {"master_subnet_id": stack_output[0]["OutputValue"], "use_public_ips": "true"} + + +def _create_public_private_with_nat_configuration(configurer, vpc_cidr, subnet_cidrs, number_of_ip_for_compute): + configurer.public_cidr = decide_cidr( + vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, target_size=NUMBER_OF_IP_MASTER_SUBNET + ) + _check_cidr(configurer.public_cidr) + subnet_cidrs.append(configurer.public_cidr) + configurer.private_cidr = get_subnet_cidr( + vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, max_queue_size=number_of_ip_for_compute + ) + _check_cidr(configurer.private_cidr) + template_name = "public-private.cfn.json" + stack_output = _create_network_configuration(template_name, configurer, also_private_cidr=True) + # stack output size is 2 + public_index = 0 if (stack_output[0]["OutputKey"] == "PublicSubnetId") else 1 + private_index = (public_index + 1) % 2 + return { + "master_subnet_id": stack_output[public_index]["OutputValue"], + "compute_subnet_id": stack_output[private_index]["OutputValue"], + "use_public_ips": "false", + } + + +# very similar to pcluster.py line 104 and after +def _create_network_configuration(template_name, configurer, also_private_cidr): + LOGGER.info("Creating stack for the network configuration...") + LOGGER.info("Do not leave the terminal until the process has finished") + cfn = boto3.client("cloudformation", region_name=configurer.aws_region_name) + capabilities = ["CAPABILITY_IAM"] + try: + stack_name = "parallelclusternetworking-" + ("pubpriv" if also_private_cidr else "pub") + TIMESTAMP + stack = cfn.create_stack( + StackName=stack_name, + TemplateURL="https://network-configuration-bucket.s3-eu-west-1.amazonaws.com/{0}".format(template_name), + Parameters=configurer.create_stack_parameters(also_private_cidr=also_private_cidr), + Capabilities=capabilities, + ) + LOGGER.debug("StackId: %s", stack.get("StackId")) + LOGGER.info("Stack Name: {0}".format(stack_name)) + status = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") + resource_status = "" + while status == "CREATE_IN_PROGRESS": + status = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") + events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")[0] + resource_status = ( + "Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus")) + ).ljust(80) + sys.stdout.write("\r%s" % resource_status) + sys.stdout.flush() + time.sleep(5) + # print the last status update in the logs + if resource_status != "": + LOGGER.debug(resource_status) + + if status != "CREATE_COMPLETE": + LOGGER.critical("\nCluster creation failed. Failed events:") + events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents") + for event in events: + if event.get("ResourceStatus") == "CREATE_FAILED": + LOGGER.info( + " - %s %s %s", + event.get("ResourceType"), + event.get("LogicalResourceId"), + event.get("ResourceStatusReason"), + ) + LOGGER.error("Could not create the network configuration") + sys.exit(0) + print() + LOGGER.info("The stack has been created") + return cfn.describe_stacks(StackName=stack_name).get("Stacks")[0]["Outputs"] + except KeyboardInterrupt as e: + print() + LOGGER.info("Could not write up the configuration.") + LOGGER.info("Please check manually the created resources and stacks") + except Exception as e: # Any exception is a problem + print() + LOGGER.error( + "An exception as occured. Please restart the configuration and check manually the created resource" + ) + LOGGER.critical(e) + sys.exit(1) + + +@handle_client_exception +def _get_availability_zone(aws_region_name): + # FIXME to update + return "" + + +def _choose_network_configuration(scheduler): + if scheduler == "awsbatch": + return PUBLIC_PRIVATE_CONFIG_NAME + options = ( + "Master in a public subnet and compute fleet in a private subnet", + "Master and compute fleet in the same public subnet", + ) + to_network_identifiers = {options[0]: PUBLIC_PRIVATE_CONFIG_NAME, options[1]: PUBLIC_CONFIG_NAME} + + return to_network_identifiers[_prompt_a_list("Network Configuration", options, default_value=options[0])] + + +@handle_client_exception +def _get_subnets_in_vpc(aws_region_name, vpc_id): + """Return a list of the subnets cidr contained in the vpc.""" + conn = ec2_conn(aws_region_name) + subnets = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}])["Subnets"] + return [subnet["CidrBlock"] for subnet in subnets] + + +def _check_the_vpc(aws_region_name, vpc_id): + # This function should be further expandend once we decide to allow the user to use his vpcs. For example, we should + # also check for the presence of a NAT gateway + if not VpcFactory(aws_region_name).check(vpc_id): + logging.error("WARNING: The vpc does not have the correct parameters set.") + + +def _check_cidr(cidr): + if not cidr: + LOGGER.error( + "Could not create the subnet needed for the network configuration. Check that the vpc has enough" + "space for the new subnet" + ) + sys.exit(1) diff --git a/cli/pcluster/easyconfig/easyconfig_utils.py b/cli/pcluster/easyconfig/easyconfig_utils.py new file mode 100644 index 0000000000..073053465b --- /dev/null +++ b/cli/pcluster/easyconfig/easyconfig_utils.py @@ -0,0 +1,130 @@ +# Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance +# with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. +# fmt: off +import logging +import sys +from builtins import input + +LOGGER = logging.getLogger("pcluster.pcluster") + + +def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, default_value=None, options_to_print=None): + """ + Prompt the user a message with optionally some options. + + :param message: the message to show to the user + :param validator: a function that predicates if the input is correct + :param input_to_option: a function that given the input transforms it in something else + :param default_value: the value to return as the default if the user doesn't insert anything + :param options_to_print: the options to print if necessary + :return: the value inserted by the user validated + """ + if options_to_print: + print("Allowed values for {0}:".format(message)) + for item in options_to_print: + print(item) + user_prompt = "{0} [{1}]: ".format(message, default_value or "") + + valid_user_input = False + result = default_value + # We give the user the possibility to try again if wrong + while not valid_user_input: + sys.stdin.flush() + user_input = input(user_prompt).strip() + if user_input == "": + user_input = default_value + result = input_to_option(user_input) + if validator(result): + valid_user_input = True + else: + print("ERROR: {0} is not an acceptable value for {1}".format(user_input, message)) + return result + + +def _prompt_a_list(message, options, default_value=None): + """ + Wrap prompt to use it for list. + + :param message: the message to show the user + :param options: the list of item to show the user + :param default_value: the default value + :return: the validate value + """ + if not options: + LOGGER.error("ERROR: No options found for {0}".format(message)) + sys.exit(1) + if not default_value: + default_value = options[0] + + def input_to_parameter(to_transform): + try: + if to_transform.isdigit() and to_transform != "0": + item = options[int(to_transform) - 1] + else: + item = to_transform + except (ValueError, IndexError): + item = to_transform + return item + + return prompt( + message, + validator=lambda x: x in options, + input_to_option=lambda x: input_to_parameter(x), + default_value=default_value, + options_to_print=_to_printable_list(options), + ) + + +def _prompt_a_list_of_tuple(message, options, default_value=None): + """ + Wrap prompt to use it over a list of tuple. + + The correct item will be the first element of each tuple. + :param message: the message to show to the user + :param options: the list of tuple + :param default_value: the default value + :return: the validated value + """ + if not options: + LOGGER.error("ERROR: No options found for {0}".format(message)) + sys.exit(1) + if not default_value: + default_value = options[0][0] + + def input_to_parameter(to_transform): + try: + if to_transform.isdigit() and to_transform != "0": + item = options[int(to_transform) - 1][0] + else: + item = to_transform + except (ValueError, IndexError): + item = to_transform + return item + + valid_options = [item[0] for item in options] + + return prompt( + message, + validator=lambda x: x in valid_options, + input_to_option=lambda x: input_to_parameter(x), + default_value=default_value, + options_to_print=_to_printable_list(options), + ) + + +def _to_printable_list(items): + output = [] + for iterator, item in enumerate(items, start=1): + if isinstance(item, (list, tuple)): + output.append("{0}. {1}".format(iterator, " | ".join(item))) + else: + output.append("{0}. {1}".format(iterator, item)) + return output From 9d7fafafc6cb0656869bf3692eda5a23b744b57b Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 3 Jun 2019 17:06:03 +0200 Subject: [PATCH 017/201] Unit-tested pcluster configure, refactoring test folder Signed-off-by: Matteo Fiordarancio --- cli/.flake8 | 1 + cli/pcluster/easyconfig/easyconfig.py | 159 ++--- cli/tests/awsbatch/conftest.py | 120 +--- cli/tests/awsbatch/test_awsbstat.py | 3 +- cli/tests/{awsbatch => }/common.py | 8 - cli/tests/conftest.py | 114 ++++ .../configure/test_pclusterconfigure.py | 554 ++++++++++++++++++ .../error.txt | 0 .../output.txt | 47 ++ .../test | 27 + .../error.txt | 0 .../output.txt | 41 ++ .../test | 27 + .../error.txt | 0 .../output.txt | 44 ++ .../test | 28 + .../error.txt | 0 .../output.txt | 44 ++ .../test | 28 + .../error.txt | 0 .../original_config_file | 27 + .../output.txt | 44 ++ .../test | 28 + .../error.txt | 0 .../output.txt | 35 ++ .../test | 28 + .../error.txt | 0 .../output.txt | 40 ++ .../test | 28 + .../error.txt | 0 .../output.txt | 41 ++ .../test_vpc_automation_no_vpc_in_region/test | 28 + .../error.txt | 0 .../output.txt | 31 + .../test | 28 + .../{ => configure}/test_subnet_cidr.py | 4 +- cli/tests/requirements.txt | 1 + cli/tox.ini | 2 +- 38 files changed, 1411 insertions(+), 199 deletions(-) rename cli/tests/{awsbatch => }/common.py (52%) create mode 100644 cli/tests/conftest.py create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure.py create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/original_config_file create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/test rename cli/tests/pcluster/{ => configure}/test_subnet_cidr.py (96%) diff --git a/cli/.flake8 b/cli/.flake8 index 37ac11bacb..6e505b5476 100644 --- a/cli/.flake8 +++ b/cli/.flake8 @@ -18,6 +18,7 @@ per-file-ignores = pcluster/easyconfig.py: E402, D103 pcluster/cfnconfig.py: E402, D103 tests/pcluster/pcluster-unittest.py: D101, D102, D103 + tests/pcluster/configure/test_*.py: D101, D102, D103 tests/pcluster/test_*.py: D101, D102, D103 tests/awsbatch/test_*.py: D101, D102 ../tests/integration-tests/tests/*: D103 diff --git a/cli/pcluster/easyconfig/easyconfig.py b/cli/pcluster/easyconfig/easyconfig.py index 7d20125412..d0c50abb9e 100644 --- a/cli/pcluster/easyconfig/easyconfig.py +++ b/cli/pcluster/easyconfig/easyconfig.py @@ -47,13 +47,12 @@ "cluster_template": "default", "scheduler": "sge", "os": "alinux", - "max_queue_size": "10", + "max_size": "10", "master_instance_type": "t2.micro", "compute_instance_type": "t2.micro", "vpc_name": "public", "min_size": "0", } -FORCED_BATCH_VALUES = {"os": "alinux", "compute_instance_type": "optimal"} VPC_PARAMETERS_TO_REMOVE = "vpc-id", "master_subnet_id", "compute_subnet_id", "use_public_ips", "compute_subnet_cidr" @@ -165,35 +164,11 @@ def configure(args): # noqa: C901 FIXME!!! config, section=cluster_label, parameter_name="scheduler", default_value=DEFAULT_VALUES["scheduler"] ), ) - scheduler_info = scheduler_handler(scheduler) - is_aws_batch = scheduler == "awsbatch" - if is_aws_batch: - operating_system = FORCED_BATCH_VALUES["os"] - else: - operating_system = _prompt_a_list( - "Operating System", - get_supported_os(scheduler), - default_value=get_config_parameter( - config, section=cluster_label, parameter_name="base_os", default_value=DEFAULT_VALUES["os"] - ), - ) + scheduler_handler = SchedulerHandler(config, cluster_label, scheduler) - min_queue_size = prompt( - "Minimum cluster size ({0})".format(scheduler_info["instance_size_name"]), - validator=lambda x: x.isdigit(), - default_value=get_config_parameter( - config, cluster_label, scheduler_info["min_size"], DEFAULT_VALUES["min_size"] - ), - ) - - max_queue_size = prompt( - "Maximum cluster size ({0})".format(scheduler_info["instance_size_name"]), - validator=lambda x: x.isdigit() and int(x) >= int(min_queue_size), - default_value=get_config_parameter( - config, cluster_label, scheduler_info["max_size"], DEFAULT_VALUES["max_queue_size"] - ), - ) + scheduler_handler.prompt_os() + scheduler_handler.prompt_min_max() master_instance_type = prompt( "Master instance type", @@ -206,20 +181,13 @@ def configure(args): # noqa: C901 FIXME!!! ), ) - if is_aws_batch: - compute_instance_type = FORCED_BATCH_VALUES["compute_instance_type"] - else: - compute_instance_type = prompt( - "Compute instance type", - lambda x: x in _list_instances(), - default_value=DEFAULT_VALUES["compute_instance_type"], - ) + scheduler_handler.prompt_compute_sizes() key_name = _prompt_a_list("EC2 Key Pair Name", _list_keys(aws_region_name)) automate_vpc = prompt("Automate VPC creation? (y/n)", lambda x: x == "y" or x == "n", default_value="n") == "y" vpc_parameters = _create_vpc_parameters( - vpc_label, aws_region_name, scheduler, max_queue_size, automatized_vpc=automate_vpc + vpc_label, aws_region_name, scheduler, scheduler_handler.max_queue_size, automatized_vpc=automate_vpc ) global_parameters = { "__name__": "global", @@ -233,22 +201,15 @@ def configure(args): # noqa: C901 FIXME!!! "key_name": key_name, "vpc_settings": cluster_template, "scheduler": scheduler, - "base_os": operating_system, - "compute_instance_type": compute_instance_type, "master_instance_type": master_instance_type, - scheduler_info["max_size"]: max_queue_size, - scheduler_info["min_size"]: min_queue_size, } - if scheduler_info["value_for_initial_size"] == "min_size": - cluster_parameters[scheduler_info["initial_size_parameter_name"]] = min_queue_size - else: - cluster_parameters[scheduler_info["initial_size_parameter_name"]] = scheduler_info["value_for_initial_size"] + cluster_parameters.update(scheduler_handler.get_scheduler_parameters()) aliases_parameters = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} sections = [aws_parameters, cluster_parameters, vpc_parameters, global_parameters, aliases_parameters] # We first remove unnecessary parameters from the past configurations - _remove_parameter_from_past_configuration(cluster_label, config, scheduler_info["parameters_to_remove"]) + _remove_parameter_from_past_configuration(cluster_label, config, scheduler_handler.get_parameters_to_remove()) _remove_parameter_from_past_configuration(vpc_label, config, VPC_PARAMETERS_TO_REMOVE) # Loop through the configuration sections we care about @@ -374,31 +335,83 @@ def get_config_parameter(config, section, parameter_name, default_value): return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value -def scheduler_handler(scheduler): - """ - Return a dictionary containing information based on the scheduler. +class SchedulerHandler: + """Handle question scheduler related.""" + + def __init__(self, config, cluster_label, scheduler): + self.scheduler = scheduler + self.config = config + self.cluster_label = cluster_label + + self.is_aws_batch = True if scheduler == "awsbatch" else False + + self.instance_size_name = "vcpus" if self.is_aws_batch else "instances" + self.max_size_name = "max_vcpus" if self.is_aws_batch else "max_queue_size" + self.min_size_name = "min_vcpus" if self.is_aws_batch else "initial_queue_size" + + self.base_os = "alinux" + self.compute_instance_type = "optimal" + self.max_queue_size = DEFAULT_VALUES["max_size"] + self.min_queue_size = DEFAULT_VALUES["min_size"] + + def prompt_os(self): + """Ask for os, if necessary.""" + if not self.is_aws_batch: + self.base_os = _prompt_a_list( + "Operating System", + get_supported_os(self.scheduler), + default_value=get_config_parameter( + self.config, + section=self.cluster_label, + parameter_name="base_os", + default_value=DEFAULT_VALUES["os"], + ), + ) - :param scheduler the target scheduler - :return: a dictionary with containing the information - """ - scheduler_info = {} - if scheduler == "awsbatch": - scheduler_info["parameters_to_remove"] = ( - "max_queue_size", - "initial_queue_size", - "maintain_initial_size", - "compute_instance_type", + def prompt_compute_sizes(self): + """Ask for compute_instance_type, if necessary.""" + if not self.is_aws_batch: + self.compute_instance_type = prompt( + "Compute instance type", + lambda x: x in _list_instances(), + default_value=DEFAULT_VALUES["compute_instance_type"], + ) + + def prompt_min_max(self): + """Ask for max and min instances / vcpus.""" + self.min_queue_size = prompt( + "Minimum cluster size ({0})".format(self.instance_size_name), + validator=lambda x: x.isdigit(), + default_value=get_config_parameter( + self.config, self.cluster_label, self.min_size_name, DEFAULT_VALUES["min_size"] + ), ) - scheduler_info["max_size"] = "max_vcpus" - scheduler_info["min_size"] = "min_vcpus" - scheduler_info["initial_size_parameter_name"] = "desired_vcpus" - scheduler_info["value_for_initial_size"] = "min_size" - scheduler_info["instance_size_name"] = "vcpus" - else: - scheduler_info["parameters_to_remove"] = ("max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type") - scheduler_info["max_size"] = "max_queue_size" - scheduler_info["min_size"] = "initial_queue_size" - scheduler_info["initial_size_parameter_name"] = "maintain_initial_size" - scheduler_info["value_for_initial_size"] = "true" - scheduler_info["instance_size_name"] = "instances" - return scheduler_info + + self.max_queue_size = prompt( + "Maximum cluster size ({0})".format(self.instance_size_name), + validator=lambda x: x.isdigit() and int(x) >= int(self.min_queue_size), + default_value=get_config_parameter( + self.config, self.cluster_label, self.max_size_name, DEFAULT_VALUES["max_size"] + ), + ) + + def get_scheduler_parameters(self): + """Return a dict containing the value obtained that are dependent on the scheduler.""" + scheduler_parameters = { + "base_os": self.base_os, + "compute_instance_type": self.compute_instance_type, + self.max_size_name: self.max_queue_size, + self.min_size_name: self.min_queue_size, + } + if self.is_aws_batch: + scheduler_parameters["desired_vcpus"] = self.min_queue_size + else: + scheduler_parameters["maintain_initial_size"] = "true" + return scheduler_parameters + + def get_parameters_to_remove(self): + """Return a list of parameter that needs to be removed from the configuration.""" + if self.is_aws_batch: + return "max_queue_size", "initial_queue_size", "maintain_initial_size", "compute_instance_type" + else: + return "max_vcpus", "desired_vcpus", "min_vcpus", "compute_instance_type" diff --git a/cli/tests/awsbatch/conftest.py b/cli/tests/awsbatch/conftest.py index d9db3cff42..18adcad6da 100644 --- a/cli/tests/awsbatch/conftest.py +++ b/cli/tests/awsbatch/conftest.py @@ -1,48 +1,12 @@ -""" -This module loads pytest fixtures and plugins needed by all tests. - -It's very useful for fixtures that need to be shared among all tests. -""" -from __future__ import print_function - -import boto3 import pytest -from botocore.stub import Stubber - -from common import DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG - -@pytest.fixture -def failed_with_message(capsys): - """Assert that the command exited with a specific error message.""" - __tracebackhide__ = True - - def _failed_with_message(func, message, *args, **kwargs): - __tracebackhide__ = True - with pytest.raises(SystemExit) as error: - func(*args, **kwargs) - assert error.type == SystemExit - assert error.value.code == 1 - if message: - assert capsys.readouterr().err == message - - return _failed_with_message - - -@pytest.fixture() -def test_datadir(request, datadir): - """ - Inject the datadir with resources for the specific test function. - - If the test function is declared in a class then datadir is ClassName/FunctionName - otherwise it is only FunctionName. - """ - function_name = request.function.__name__ - if not request.cls: - return datadir / function_name - - class_name = request.cls.__name__ - return datadir / "{0}/{1}".format(class_name, function_name) +DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG = { + "region": "region", + "proxy": None, + "aws_access_key_id": "aws_access_key_id", + "aws_secret_access_key": "aws_secret_access_key", + "job_queue": "job_queue", +} @pytest.fixture() @@ -53,73 +17,3 @@ def awsbatchcliconfig_mock(request, mocker): for key, value in DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG.items(): setattr(mock.return_value, key, value) return mock - - -@pytest.fixture() -def convert_to_date_mock(request, mocker): - """Mock convert_to_date function by enforcing the timezone to UTC.""" - module_under_test = request.module.__name__.replace("test_", "") - - def _convert_to_date_utc(*args, **kwargs): - from awsbatch.utils import convert_to_date - from dateutil import tz - - # executes convert_to_date but overrides arguments so that timezone is enforced to utc - if "timezone" in kwargs: - del kwargs["timezone"] - return convert_to_date(timezone=tz.tzutc(), *args, **kwargs) - - return mocker.patch("awsbatch." + module_under_test + ".convert_to_date", wraps=_convert_to_date_utc) - - -@pytest.fixture() -def boto3_stubber(request, mocker): - """ - Create a function to easily mock boto3 clients. - - To mock a boto3 service simply pass the name of the service to mock and - the mocked requests, where mocked_requests is an object containing the method to mock, - the response to return and the expected params for the boto3 method that gets called. - - The function makes use of botocore.Stubber to mock the boto3 API calls. - Multiple boto3 services can be mocked as part of the same test. - """ - __tracebackhide__ = True - created_stubbers = [] - mocked_clients = {} - region = "us-east-1" - # Mock Boto3ClientFactory in the module under test. - # Use a side_effect to allow mocking multiple clients in the same test function. - module_under_test = request.module.__name__.replace("test_", "") - mocked_client_factory = mocker.patch("awsbatch." + module_under_test + ".Boto3ClientFactory", autospec=True) - mocked_client_factory.return_value.get_client.side_effect = lambda x: mocked_clients[x] - mocked_client_factory.return_value.region = region - - def _boto3_stubber(service, mocked_requests): - client = boto3.client(service, region) - stubber = Stubber(client) - # Save a ref to the stubber so that we can deactivate it at the end of the test. - created_stubbers.append(stubber) - - # Attach mocked requests to the Stubber and activate it. - if not isinstance(mocked_requests, list): - mocked_requests = [mocked_requests] - for mocked_request in mocked_requests: - stubber.add_response( - mocked_request.method, mocked_request.response, expected_params=mocked_request.expected_params - ) - stubber.activate() - - # Add stubber to the collection of mocked clients. This allows to mock multiple clients. - # Mocking twice the same client will replace the previous one. - mocked_clients[service] = client - return client - - # yield allows to return the value and then continue the execution when the test is over. - # Used for resources cleanup. - yield _boto3_stubber - - # Assert that all mocked requests were consumed and deactivate all stubbers. - for stubber in created_stubbers: - stubber.assert_no_pending_responses() - stubber.deactivate() diff --git a/cli/tests/awsbatch/test_awsbstat.py b/cli/tests/awsbatch/test_awsbstat.py index edf8dc6296..105fb768ee 100644 --- a/cli/tests/awsbatch/test_awsbstat.py +++ b/cli/tests/awsbatch/test_awsbstat.py @@ -3,7 +3,8 @@ import pytest from awsbatch import awsbstat -from common import DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG, MockedBoto3Request, read_text +from tests.awsbatch.conftest import DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG +from tests.common import MockedBoto3Request, read_text ALL_JOB_STATUS = ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING", "SUCCEEDED", "FAILED"] DEFAULT_JOB_STATUS = ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING"] diff --git a/cli/tests/awsbatch/common.py b/cli/tests/common.py similarity index 52% rename from cli/tests/awsbatch/common.py rename to cli/tests/common.py index a0bf69f793..7faac934a3 100644 --- a/cli/tests/awsbatch/common.py +++ b/cli/tests/common.py @@ -2,14 +2,6 @@ MockedBoto3Request = namedtuple("MockedBoto3Request", ["method", "response", "expected_params"]) -DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG = { - "region": "region", - "proxy": None, - "aws_access_key_id": "aws_access_key_id", - "aws_secret_access_key": "aws_secret_access_key", - "job_queue": "job_queue", -} - def read_text(path): """Read the content of a file.""" diff --git a/cli/tests/conftest.py b/cli/tests/conftest.py new file mode 100644 index 0000000000..87a0fff587 --- /dev/null +++ b/cli/tests/conftest.py @@ -0,0 +1,114 @@ +""" +This module loads pytest fixtures and plugins needed by all tests. + +It's very useful for fixtures that need to be shared among all tests. +""" +from __future__ import print_function + +import boto3 +import pytest +from botocore.stub import Stubber + + + +@pytest.fixture +def failed_with_message(capsys): + """Assert that the command exited with a specific error message.""" + __tracebackhide__ = True + + def _failed_with_message(func, message, *args, **kwargs): + __tracebackhide__ = True + with pytest.raises(SystemExit) as error: + func(*args, **kwargs) + assert error.type == SystemExit + assert error.value.code == 1 + if message: + assert capsys.readouterr().err == message + + return _failed_with_message + + +@pytest.fixture() +def test_datadir(request, datadir): + """ + Inject the datadir with resources for the specific test function. + + If the test function is declared in a class then datadir is ClassName/FunctionName + otherwise it is only FunctionName. + """ + function_name = request.function.__name__ + if not request.cls: + return datadir / function_name + + class_name = request.cls.__name__ + return datadir / "{0}/{1}".format(class_name, function_name) + + +@pytest.fixture() +def convert_to_date_mock(request, mocker): + """Mock convert_to_date function by enforcing the timezone to UTC.""" + module_under_test = request.module.__name__.replace("test_", "") + + def _convert_to_date_utc(*args, **kwargs): + from awsbatch.utils import convert_to_date + from dateutil import tz + + # executes convert_to_date but overrides arguments so that timezone is enforced to utc + if "timezone" in kwargs: + del kwargs["timezone"] + return convert_to_date(timezone=tz.tzutc(), *args, **kwargs) + + return mocker.patch("awsbatch." + module_under_test + ".convert_to_date", wraps=_convert_to_date_utc) + + +@pytest.fixture() +def boto3_stubber(request, mocker): + """ + Create a function to easily mock boto3 clients. + + To mock a boto3 service simply pass the name of the service to mock and + the mocked requests, where mocked_requests is an object containing the method to mock, + the response to return and the expected params for the boto3 method that gets called. + + The function makes use of botocore.Stubber to mock the boto3 API calls. + Multiple boto3 services can be mocked as part of the same test. + """ + __tracebackhide__ = True + created_stubbers = [] + mocked_clients = {} + region = "us-east-1" + # Mock Boto3ClientFactory in the module under test. + # Use a side_effect to allow mocking multiple clients in the same test function. + module_under_test = request.module.__name__.replace("test_", "") + mocked_client_factory = mocker.patch("awsbatch." + module_under_test + ".Boto3ClientFactory", autospec=True) + mocked_client_factory.return_value.get_client.side_effect = lambda x: mocked_clients[x] + mocked_client_factory.return_value.region = region + + def _boto3_stubber(service, mocked_requests): + client = boto3.client(service, region) + stubber = Stubber(client) + # Save a ref to the stubber so that we can deactivate it at the end of the test. + created_stubbers.append(stubber) + + # Attach mocked requests to the Stubber and activate it. + if not isinstance(mocked_requests, list): + mocked_requests = [mocked_requests] + for mocked_request in mocked_requests: + stubber.add_response( + mocked_request.method, mocked_request.response, expected_params=mocked_request.expected_params + ) + stubber.activate() + + # Add stubber to the collection of mocked clients. This allows to mock multiple clients. + # Mocking twice the same client will replace the previous one. + mocked_clients[service] = client + return client + + # yield allows to return the value and then continue the execution when the test is over. + # Used for resources cleanup. + yield _boto3_stubber + + # Assert that all mocked requests were consumed and deactivate all stubbers. + for stubber in created_stubbers: + stubber.assert_no_pending_responses() + stubber.deactivate() diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure.py b/cli/tests/pcluster/configure/test_pclusterconfigure.py new file mode 100644 index 0000000000..05db13780c --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure.py @@ -0,0 +1,554 @@ +import os +import tempfile + +import pytest +from configparser import ConfigParser + +from assertpy import assert_that +from pcluster.configure.easyconfig import configure + +EASYCONFIG = "pcluster.configure.easyconfig." +NETWORKING = "pcluster.configure.easyconfig_networking." +UTILS = "pcluster.configure.easyconfig_utils." + +TEMP_PATH_FOR_CONFIG = os.path.join(tempfile.gettempdir(), "test_pclusterconfigure") + + +def _mock_input(mocker, input_in_order): + mocker.patch(UTILS + "input", side_effect=input_in_order) + + +def _mock_aws_region(mocker): + regions = [ + "eu-north-1", + "ap-south-1", + "eu-west-3", + "eu-west-2", + "eu-west-1", + "ap-northeast-2", + "ap-northeast-1", + "sa-east-1", + "ca-central-1", + "ap-southeast-1", + "ap-southeast-2", + "eu-central-1", + "us-east-1", + "us-east-2", + "us-west-1", + "us-west-2", + ] + mocker.patch(EASYCONFIG + "get_regions", return_value=regions) + + +def _mock_list_keys(mocker): + # If changed look for test_prompt_a_list + keys = ["key1", "key2", "key3", "key4", "key5", "key6"] + mocker.patch(EASYCONFIG + "_list_keys", return_value=keys) + + +def _mock_list_vpcs_and_subnets(mocker, empty_region=False): + # If changed look for test_prompt_a_list_of_tuple + if empty_region: + dict = {"vpc_list": [], "vpc_to_subnets": {}} + else: + dict = { + "vpc_list": [ + ("vpc-1", "ParallelClusterVPC-20190625135738", "2 subnets inside"), + ("vpc-2", "ParallelClusterVPC-20190624105051", "0 subnets inside"), + ("vpc-3", "default", "3 subnets inside"), + ("vpc-4", "ParallelClusterVPC-20190626095403", "1 subnets inside"), + ], + "vpc_to_subnets": { + "vpc-1": [ + ("subnet-11", "ParallelClusterPublicSubnet", "Subnet size: 256"), + ("subnet-12", "ParallelClusterPrivateSubnet", "Subnet size: 4096"), + ], + "vpc-2": [], + "vpc-3": [ + ("subnet-31", "Subnet size: 4096"), + ("subnet-32", "Subnet size: 4096"), + ("subnet-33", "Subnet size: 4096"), + ], + "vpc-4": [("subnet-41", "ParallelClusterPublicSubnet", "Subnet size: 4096")], + }, + } + mocker.patch(EASYCONFIG + "_list_vpcs_and_subnets", return_value=dict) + + +def _mock_get_subnets_in_vpc(mocker): + mocker.patch(NETWORKING + "_get_subnets_in_vpc", return_value=[]) + + +def _mock_vpc_factory(mocker, is_a_valid_vpc): + vpc_factory = NETWORKING + "VpcFactory" + mock = mocker.patch(vpc_factory, autospec=True) + mock.return_value.create.return_value = "vpc-0" + mock.return_value.check.return_value = is_a_valid_vpc + + +def _mock_ec2_conn(mocker): + mocker.patch(NETWORKING + "_extract_vpc_cidr", return_value="10.0.0.0/16") + mocker.patch(NETWORKING + "_extract_ig_id", return_value="ig-123") + + +def _mock_create_network_configuration(mocker, public_subnet_id, private_subnet_id=None): + def _side_effect_function(template_name, configurer, also_private_cidr): + if private_subnet_id: + return [ + {"OutputKey": "PrivateSubnetId", "OutputValue": private_subnet_id}, + {"OutputKey": "PublicSubnetId", "OutputValue": public_subnet_id}, + ] + else: + return [{"OutputKey": "PublicSubnetId", "OutputValue": public_subnet_id}] + + mocker.patch(NETWORKING + "_create_network_configuration", side_effect=_side_effect_function) + + +def _launch_config(mocker, path, remove_path=True): + if remove_path and os.path.isfile(path): + os.remove(path) + args = mocker.Mock + args.config_file = path + configure(args) + + +def _are_configurations_equals(path_verify, path_verified): + if not os.path.isfile(path_verify): + return False + if not os.path.isfile(path_verified): + return False + config_temp = ConfigParser() + config_temp.read(path_verify) + dict1 = {s: dict(config_temp.items(s)) for s in config_temp.sections()} + config_expected = ConfigParser() + config_expected.read(path_verified) + dict2 = {s: dict(config_expected.items(s)) for s in config_expected.sections()} + for section_name, section in dict1.items(): + for key, value in section.items(): + if dict2[section_name][key] != value: + return False + return True + + +def _write_output_and_error(capsys, error_path, output_path): + readouterr = capsys.readouterr() + with open(error_path, "w+") as file: + file.write(readouterr.err) + with open(output_path, "w+") as file: + file.write(readouterr.out) + + +def _are_output_error_correct(capsys, output, error): + readouterr = capsys.readouterr() + with open(output) as f: + assert_that(readouterr.out).is_equal_to(f.read()) + with open(error) as f: + assert_that(readouterr.err).is_equal_to(f.read()) + + +class ComposeInput: + def __init__(self, aws_region_name, scheduler): + self.is_not_aws_batch = scheduler != "awsbatch" + self.input_list = [aws_region_name, scheduler] + + def add_first_flow(self, op_sys, min_size, max_size, master_instance, compute_instance, key): + if self.is_not_aws_batch: + self.input_list.append(op_sys) + self.input_list.extend([min_size, max_size, master_instance]) + if self.is_not_aws_batch: + self.input_list.append(compute_instance) + self.input_list.append(key) + + def add_no_automation_no_empty_vpc(self, vpc_id, master_id, compute_id): + self.input_list.extend(["n", vpc_id, "n", master_id, compute_id]) + + def add_sub_automation(self, vpc_id, network_configuration, vpc_has_subnets=True): + self.input_list.extend(["n", vpc_id]) + if vpc_has_subnets: + self.input_list.append("y") + if self.is_not_aws_batch: + self.input_list.append(network_configuration) + + def add_vpc_sub_automation_empty_region(self, network_configuration): + self.input_list.extend(["n", network_configuration]) + + def add_vpc_sub_automation(self, network_configuration): + self.input_list.append("y") + if self.is_not_aws_batch: + self.input_list.append(network_configuration) + + def finalize_config(self, mocker): + _mock_input(mocker, self.input_list) + + +class MockHandler: + def __init__(self, mocker, empty_region=False): + self.mocker = mocker + _mock_aws_region(self.mocker) + _mock_list_keys(self.mocker) + _mock_list_vpcs_and_subnets(self.mocker, empty_region) + + def add_subnet_automation(self, public_subnet_id, is_a_valid_vpc=True, private_subnet_id=None): + _mock_vpc_factory(self.mocker, is_a_valid_vpc) + _mock_get_subnets_in_vpc(self.mocker) + _mock_ec2_conn(self.mocker) + _mock_create_network_configuration(self.mocker, public_subnet_id, private_subnet_id) + + +def get_file_path(test_datadir): + config = os.path.join(test_datadir, "test") + output = os.path.join(test_datadir, "output.txt") + error = os.path.join(test_datadir, "error.txt") + return config, error, output + + +def _verify_test(mocker, capsys, output, error, config, temp_path_for_config): + _launch_config(mocker, temp_path_for_config) + assert_that(_are_configurations_equals(temp_path_for_config, config)).is_true() + _are_output_error_correct(capsys, output, error) + os.remove(temp_path_for_config) + + +# note that user_prompt passed to input will not be shown. +def create_new_test(mocker, capsys): + """ + Create a new test for the pcluster configure. + + You have to be sure that pcluster configure is correct when you use this function. You will also have to check + output manually. Note that it does not print user_prompt passed as input, but neither does all the tests + """ + test_name = "test_vpc_automation_no_vpc_in_region" + config_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "test") + error_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "error.txt") + output_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "output.txt") + + mock_handler = MockHandler(mocker, empty_region=True) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="slurm") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_vpc_sub_automation_empty_region(network_configuration="1") + input_composer.finalize_config(mocker) + + _launch_config(mocker, config_path) + _write_output_and_error(capsys, error_path, output_path) + assert_that(True).is_true() + + +def test_no_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + MockHandler(mocker) + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="torque") + input_composer.add_first_flow( + op_sys="alinux", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_no_automation_no_empty_vpc(vpc_id="vpc-1", master_id="subnet-11", compute_id="subnet-12") + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_no_automation_yes_awsbatch_no_errors(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + MockHandler(mocker) + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="awsbatch") + input_composer.add_first_flow( + op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" + ) + input_composer.add_no_automation_no_empty_vpc(vpc_id="vpc-1", master_id="subnet-11", compute_id="subnet-12") + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_subnet_automation_no_awsbatch_no_errors_empty_vpc(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="sge") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_sub_automation(vpc_id="vpc-2", network_configuration="1", vpc_has_subnets=False) + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_subnet_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="sge") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration="1", vpc_has_subnets=True) + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_subnet_automation_no_awsbatch_no_errors_with_config_file(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + old_config_file = test_datadir / "original_config_file" + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="sge") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration="1", vpc_has_subnets=True) + input_composer.finalize_config(mocker) + + _launch_config(mocker, old_config_file, remove_path=False) + assert_that(_are_configurations_equals(old_config_file, config)).is_true() + _are_output_error_correct(capsys, output, error) + os.remove(old_config_file) + + +def test_vpc_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="sge") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_vpc_sub_automation(network_configuration="1") + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_vpc_automation_yes_awsbatch_no_errors(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="awsbatch") + input_composer.add_first_flow( + op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" + ) + input_composer.add_vpc_sub_automation(network_configuration="1") + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_vpc_automation_invalid_vpc_block(mocker, capsys, test_datadir): + with pytest.raises(SystemExit): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation( + public_subnet_id="subnet-pu", private_subnet_id="subnet-pr", is_a_valid_vpc=False + ) + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="awsbatch") + input_composer.add_first_flow( + op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" + ) + input_composer.add_vpc_sub_automation(network_configuration="1") + input_composer.finalize_config(mocker) + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_subnet_automation_yes_awsbatch_invalid_vpc(mocker, capsys, test_datadir, caplog): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker) + mock_handler.add_subnet_automation( + public_subnet_id="subnet-pu", private_subnet_id="subnet-pr", is_a_valid_vpc=False + ) + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="awsbatch") + input_composer.add_first_flow( + op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" + ) + input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration="1") + input_composer.finalize_config(mocker) + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + assert_that("WARNING: The vpc does not have the correct parameters set." in caplog.text).is_true() + + +def test_vpc_automation_no_vpc_in_region(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker, empty_region=True) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="slurm") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_vpc_sub_automation_empty_region(network_configuration="1") + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def general_wrapper_for_prompt_testing( + mocker, + region="eu-west-1", + scheduler="torque", + op_sys="centos6", + min_size="0", + max_size="10", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + vpc_id="vpc-1", + master_id="subnet-11", + compute_id="subnet-12", +): + path = os.path.join(tempfile.gettempdir(), "test_pclusterconfigure") + MockHandler(mocker) + input_composer = ComposeInput(aws_region_name=region, scheduler=scheduler) + input_composer.add_first_flow(op_sys, min_size, max_size, master_instance, compute_instance, key) + input_composer.add_no_automation_no_empty_vpc(vpc_id, master_id, compute_id) + input_composer.finalize_config(mocker) + + _launch_config(mocker, path) + return True + + +def test_min_max(mocker): + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="17", max_size="16") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="-17", max_size="16") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="1", max_size="-16") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="1", max_size="1.6") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="1", max_size="1,6") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="schrodinger", max_size="16") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, min_size="12", max_size="cat") + with pytest.raises(StopIteration): + greater_than_default = "2500" + default = "" + general_wrapper_for_prompt_testing(mocker, min_size=greater_than_default, max_size=default) + + assert_that(general_wrapper_for_prompt_testing(mocker, min_size="", max_size="")).is_true() + assert_that(general_wrapper_for_prompt_testing(mocker, min_size="1", max_size="2")).is_true() + assert_that(general_wrapper_for_prompt_testing(mocker, min_size="", max_size="1")).is_true() + assert_that(general_wrapper_for_prompt_testing(mocker, min_size="4", max_size="")).is_true() + + +def test_prompt_a_list(mocker): + # Remember that keys go from key1...key6 + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="key0") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="key7") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="0") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="-1") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="-17") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="8") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, key="sopralapancalacapracampa") + + for i in range(1, 7): + assert_that(general_wrapper_for_prompt_testing(mocker, key="key" + str(i))).is_true() + assert_that(general_wrapper_for_prompt_testing(mocker, key=str(i))).is_true() + + +def test_prompt_a_list_of_tuple(mocker): + # Look at _mock_list_vpcs and subnets + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="2 subnets inside") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="ParallelClusterVPC-20190625135738") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="vpc-0") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="vpc-7") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="0") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="-1") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="-17") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="8") + with pytest.raises(StopIteration): + general_wrapper_for_prompt_testing(mocker, vpc_id="sopralapancalacapracampa") + + for i in range(1, 5): + i_s = str(i) + if i == 2: + with pytest.raises(StopIteration): + assert_that( + general_wrapper_for_prompt_testing( + mocker, + vpc_id="vpc-" + i_s, + master_id="subnet-{0}1".format(i_s), + compute_id="subnet-{0}1".format(i_s), + ) + ).is_true() + assert_that( + general_wrapper_for_prompt_testing( + mocker, vpc_id=i_s, master_id="subnet-{0}1".format(i_s), compute_id="subnet-{0}1".format(i_s) + ) + ).is_true() + else: + assert_that( + general_wrapper_for_prompt_testing( + mocker, + vpc_id="vpc-" + i_s, + master_id="subnet-{0}1".format(i_s), + compute_id="subnet-{0}1".format(i_s), + ) + ).is_true() + assert_that( + general_wrapper_for_prompt_testing( + mocker, vpc_id=i_s, master_id="subnet-{0}1".format(i_s), compute_id="subnet-{0}1".format(i_s) + ) + ).is_true() diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/output.txt new file mode 100644 index 0000000000..30ffabe9a3 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/output.txt @@ -0,0 +1,47 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for VPC ID: +1. vpc-1 | ParallelClusterVPC-20190625135738 | 2 subnets inside +2. vpc-2 | ParallelClusterVPC-20190624105051 | 0 subnets inside +3. vpc-3 | default | 3 subnets inside +4. vpc-4 | ParallelClusterVPC-20190626095403 | 1 subnets inside +Allowed values for Master Subnet ID: +1. subnet-11 | ParallelClusterPublicSubnet | Subnet size: 256 +2. subnet-12 | ParallelClusterPrivateSubnet | Subnet size: 4096 +Allowed values for Compute Subnet ID: +1. subnet-11 | ParallelClusterPublicSubnet | Subnet size: 256 +2. subnet-12 | ParallelClusterPrivateSubnet | Subnet size: 4096 +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/test new file mode 100644 index 0000000000..1c3bfa9211 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_no_awsbatch_no_errors/test @@ -0,0 +1,27 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = torque +base_os = alinux +compute_instance_type = t2.micro +master_instance_type = t2.nano +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-1 +master_subnet_id = subnet-11 +compute_subnet_id = subnet-12 + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/output.txt new file mode 100644 index 0000000000..d662742943 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/output.txt @@ -0,0 +1,41 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for VPC ID: +1. vpc-1 | ParallelClusterVPC-20190625135738 | 2 subnets inside +2. vpc-2 | ParallelClusterVPC-20190624105051 | 0 subnets inside +3. vpc-3 | default | 3 subnets inside +4. vpc-4 | ParallelClusterVPC-20190626095403 | 1 subnets inside +Allowed values for Master Subnet ID: +1. subnet-11 | ParallelClusterPublicSubnet | Subnet size: 256 +2. subnet-12 | ParallelClusterPrivateSubnet | Subnet size: 4096 +Allowed values for Compute Subnet ID: +1. subnet-11 | ParallelClusterPublicSubnet | Subnet size: 256 +2. subnet-12 | ParallelClusterPrivateSubnet | Subnet size: 4096 +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/test new file mode 100644 index 0000000000..c0a21e6d60 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_no_automation_yes_awsbatch_no_errors/test @@ -0,0 +1,27 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = awsbatch +base_os = alinux +compute_instance_type = optimal +master_instance_type = t2.nano +max_vcpus = 14 +min_vcpus = 13 +desired_vcpus = 13 + +[vpc default] +vpc_id = vpc-1 +master_subnet_id = subnet-11 +compute_subnet_id = subnet-12 + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/output.txt new file mode 100644 index 0000000000..26dbccfbed --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/output.txt @@ -0,0 +1,44 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for VPC ID: +1. vpc-1 | ParallelClusterVPC-20190625135738 | 2 subnets inside +2. vpc-2 | ParallelClusterVPC-20190624105051 | 0 subnets inside +3. vpc-3 | default | 3 subnets inside +4. vpc-4 | ParallelClusterVPC-20190626095403 | 1 subnets inside +Allowed values for Network Configuration: +1. Master in a public subnet and compute fleet in a private subnet +2. Master and compute fleet in the same public subnet +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/test new file mode 100644 index 0000000000..da9c1095d6 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = sge +base_os = centos6 +compute_instance_type = t2.micro +master_instance_type = t2.nano +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-1 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/output.txt new file mode 100644 index 0000000000..26dbccfbed --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/output.txt @@ -0,0 +1,44 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for VPC ID: +1. vpc-1 | ParallelClusterVPC-20190625135738 | 2 subnets inside +2. vpc-2 | ParallelClusterVPC-20190624105051 | 0 subnets inside +3. vpc-3 | default | 3 subnets inside +4. vpc-4 | ParallelClusterVPC-20190626095403 | 1 subnets inside +Allowed values for Network Configuration: +1. Master in a public subnet and compute fleet in a private subnet +2. Master and compute fleet in the same public subnet +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/test new file mode 100644 index 0000000000..d6859e5040 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_empty_vpc/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = sge +base_os = centos6 +compute_instance_type = t2.micro +master_instance_type = t2.nano +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-2 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/original_config_file b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/original_config_file new file mode 100644 index 0000000000..c0a21e6d60 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/original_config_file @@ -0,0 +1,27 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = awsbatch +base_os = alinux +compute_instance_type = optimal +master_instance_type = t2.nano +max_vcpus = 14 +min_vcpus = 13 +desired_vcpus = 13 + +[vpc default] +vpc_id = vpc-1 +master_subnet_id = subnet-11 +compute_subnet_id = subnet-12 + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/output.txt new file mode 100644 index 0000000000..26dbccfbed --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/output.txt @@ -0,0 +1,44 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for VPC ID: +1. vpc-1 | ParallelClusterVPC-20190625135738 | 2 subnets inside +2. vpc-2 | ParallelClusterVPC-20190624105051 | 0 subnets inside +3. vpc-3 | default | 3 subnets inside +4. vpc-4 | ParallelClusterVPC-20190626095403 | 1 subnets inside +Allowed values for Network Configuration: +1. Master in a public subnet and compute fleet in a private subnet +2. Master and compute fleet in the same public subnet +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/test new file mode 100644 index 0000000000..da9c1095d6 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_no_awsbatch_no_errors_with_config_file/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = sge +base_os = centos6 +compute_instance_type = t2.micro +master_instance_type = t2.nano +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-1 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/output.txt new file mode 100644 index 0000000000..c7ed0ce40d --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/output.txt @@ -0,0 +1,35 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for VPC ID: +1. vpc-1 | ParallelClusterVPC-20190625135738 | 2 subnets inside +2. vpc-2 | ParallelClusterVPC-20190624105051 | 0 subnets inside +3. vpc-3 | default | 3 subnets inside +4. vpc-4 | ParallelClusterVPC-20190626095403 | 1 subnets inside +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/test new file mode 100644 index 0000000000..89bf92cd14 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_subnet_automation_yes_awsbatch_invalid_vpc/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = awsbatch +base_os = alinux +compute_instance_type = optimal +master_instance_type = t2.nano +max_vcpus = 14 +min_vcpus = 13 +desired_vcpus = 13 + +[vpc default] +vpc_id = vpc-1 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt new file mode 100644 index 0000000000..6a4bbc3758 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt @@ -0,0 +1,40 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Allowed values for Network Configuration: +1. Master in a public subnet and compute fleet in a private subnet +2. Master and compute fleet in the same public subnet +Beginning creation of vpc. Please do not leave the terminal until the process has finish +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/test new file mode 100644 index 0000000000..7f897fe922 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = sge +base_os = centos6 +compute_instance_type = t2.micro +master_instance_type = t2.nano +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-0 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt new file mode 100644 index 0000000000..dd1815c711 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt @@ -0,0 +1,41 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +There are no VPC for the given region. Starting automatic creation of vpc and subnets... +Allowed values for Network Configuration: +1. Master in a public subnet and compute fleet in a private subnet +2. Master and compute fleet in the same public subnet +Beginning creation of vpc. Please do not leave the terminal until the process has finish +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test new file mode 100644 index 0000000000..71a691d445 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = slurm +base_os = centos6 +compute_instance_type = t2.micro +master_instance_type = t2.nano +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-0 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt new file mode 100644 index 0000000000..287a13428b --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt @@ -0,0 +1,31 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +Beginning creation of vpc. Please do not leave the terminal until the process has finish +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/test new file mode 100644 index 0000000000..c11ae6a964 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/test @@ -0,0 +1,28 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = awsbatch +base_os = alinux +compute_instance_type = optimal +master_instance_type = t2.nano +max_vcpus = 14 +min_vcpus = 13 +desired_vcpus = 13 + +[vpc default] +vpc_id = vpc-0 +master_subnet_id = subnet-pu +compute_subnet_id = subnet-pr +use_public_ips = false + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/test_subnet_cidr.py b/cli/tests/pcluster/configure/test_subnet_cidr.py similarity index 96% rename from cli/tests/pcluster/test_subnet_cidr.py rename to cli/tests/pcluster/configure/test_subnet_cidr.py index 65ba97e681..8e1e3e7614 100644 --- a/cli/tests/pcluster/test_subnet_cidr.py +++ b/cli/tests/pcluster/configure/test_subnet_cidr.py @@ -76,6 +76,4 @@ def test_get_subnet_cidr(): max_queue_size=100, ) ).is_equal_to("10.0.56.0/21") - assert_that( - get_subnet_cidr("10.0.0.0/16", ["10.0.0.0/24"], 256) - ).is_equal_to("10.0.16.0/20") + assert_that(get_subnet_cidr("10.0.0.0/16", ["10.0.0.0/24"], 256)).is_equal_to("10.0.16.0/20") diff --git a/cli/tests/requirements.txt b/cli/tests/requirements.txt index bc086877ef..a4e4a37f71 100644 --- a/cli/tests/requirements.txt +++ b/cli/tests/requirements.txt @@ -3,4 +3,5 @@ pytest-cov pytest-datadir pytest-html pytest-mock +assertpy discover # Needed for Python 2.6 compatibility diff --git a/cli/tox.ini b/cli/tox.ini index aca94b14e5..c981b6c774 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -17,7 +17,7 @@ commands = # Running with discover and not unittest discover for Python 2.6 compatibility python -m discover -s tests/pcluster -p "*_test.py" # awsbatch-cli is not currently compatible with Python2.6 - py{27,34,35,36,37}: py.test -l -v --basetemp={envtmpdir} --html=report.html --cov={envsitepackagesdir}/awsbatch tests/awsbatch/ + py{27,34,35,36,37}: py.test -l -v --basetemp={envtmpdir} --html=report.html --cov={envsitepackagesdir}/awsbatch --cov={envsitepackagesdir}/pcluster tests # Section used to define common variables used by multiple testenvs. [vars] From 5310c6a84dc8b1cc96486ed060a9af3ab27904d1 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Fri, 28 Jun 2019 15:35:39 +0200 Subject: [PATCH 018/201] Refactoring of the code Signed-off-by: Matteo Fiordarancio --- cli/pcluster/cfnconfig.py | 6 +- cli/pcluster/cli.py | 10 +- cli/pcluster/configure/__init__.py | 0 .../{easyconfig => configure}/easyconfig.py | 181 ++++++------ cli/pcluster/configure/networking.py | 272 ++++++++++++++++++ .../utils.py} | 104 +++---- cli/pcluster/easyconfig/__init__.py | 10 - .../easyconfig/easyconfig_networking.py | 257 ----------------- cli/pcluster/networking/vpc_factory.py | 8 + cli/pcluster/pcluster.py | 47 +-- cli/pcluster/utils.py | 217 +++++++++----- cli/setup.py | 2 +- cli/tests/awsbatch/conftest.py | 19 -- cli/tests/awsbatch/test_awsbstat.py | 2 +- cli/tests/conftest.py | 20 +- .../configure/test_pclusterconfigure.py | 90 ++++-- .../output.txt | 2 +- .../output.txt | 4 +- .../test_vpc_automation_no_vpc_in_region/test | 2 +- .../error.txt | 0 .../output.txt | 41 +++ .../test | 27 ++ .../output.txt | 2 +- .../pcluster/configure/test_subnet_cidr.py | 55 ++-- cli/tox.ini | 2 +- tests/integration-tests/cfn_stacks_factory.py | 6 +- tests/integration-tests/conftest.py | 2 +- .../network_template_builder.py | 8 +- 28 files changed, 777 insertions(+), 619 deletions(-) create mode 100644 cli/pcluster/configure/__init__.py rename cli/pcluster/{easyconfig => configure}/easyconfig.py (73%) create mode 100644 cli/pcluster/configure/networking.py rename cli/pcluster/{easyconfig/easyconfig_utils.py => configure/utils.py} (58%) delete mode 100644 cli/pcluster/easyconfig/__init__.py delete mode 100644 cli/pcluster/easyconfig/easyconfig_networking.py delete mode 100644 cli/tests/awsbatch/conftest.py create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/error.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/output.txt create mode 100644 cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/test diff --git a/cli/pcluster/cfnconfig.py b/cli/pcluster/cfnconfig.py index 720af9f634..fb642ad6bd 100644 --- a/cli/pcluster/cfnconfig.py +++ b/cli/pcluster/cfnconfig.py @@ -30,7 +30,7 @@ from botocore.exceptions import ClientError from pcluster.config_sanity import ResourceValidator -from pcluster.utils import get_instance_vcpus, get_supported_features +from pcluster.utils import get_instance_vcpus, get_supported_features, get_templates_bucket_path class ParallelClusterConfig(object): @@ -316,10 +316,8 @@ def __init_template_url(self): self.__fail("template_url set in [%s] section but not defined." % self.__cluster_section) self.__validate_resource("URL", self.template_url) except configparser.NoOptionError: - s3_suffix = ".cn" if self.region.startswith("cn") else "" self.template_url = ( - "https://s3.%s.amazonaws.com%s/%s-aws-parallelcluster/templates/" - "aws-parallelcluster-%s.cfn.json" % (self.region, s3_suffix, self.region, self.version) + get_templates_bucket_path(self.region) + "aws-parallelcluster-%s.cfn.json" % self.version ) except AttributeError: pass diff --git a/cli/pcluster/cli.py b/cli/pcluster/cli.py index c576923e88..4801fff544 100644 --- a/cli/pcluster/cli.py +++ b/cli/pcluster/cli.py @@ -20,7 +20,8 @@ import argparse from botocore.exceptions import NoCredentialsError -from pcluster import easyconfig, pcluster +from pcluster import pcluster +from pcluster.configure import easyconfig LOGGER = logging.getLogger("pcluster.pcluster") @@ -372,6 +373,13 @@ def main(): except NoCredentialsError: logger.error("AWS Credentials not found.") sys.exit(1) + except KeyboardInterrupt: + logger.info("Exiting...") + sys.exit(1) except Exception as e: logger.error("Unexpected error of type %s: %s", type(e).__name__, e) sys.exit(1) + + +if __name__ == "__main__": + main() diff --git a/cli/pcluster/configure/__init__.py b/cli/pcluster/configure/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/pcluster/easyconfig/easyconfig.py b/cli/pcluster/configure/easyconfig.py similarity index 73% rename from cli/pcluster/easyconfig/easyconfig.py rename to cli/pcluster/configure/easyconfig.py index d0c50abb9e..641203b097 100644 --- a/cli/pcluster/easyconfig/easyconfig.py +++ b/cli/pcluster/configure/easyconfig.py @@ -19,29 +19,27 @@ import stat import tempfile -import boto3 import configparser from pcluster import cfnconfig -from pcluster.easyconfig.easyconfig_networking import ( - _choose_network_configuration, - automate_creation_of_subnet, - automate_creation_of_vpc_and_subnet, +from pcluster.configure.networking import ( + NetworkConfiguration, + PublicPrivateNetworkConfig, + automate_subnet_creation, + automate_vpc_with_subnet_creation, ec2_conn, - handle_client_exception, ) -from pcluster.easyconfig.easyconfig_utils import _prompt_a_list, _prompt_a_list_of_tuple, prompt -from pcluster.utils import get_subnet_cidr, get_supported_os, get_supported_schedulers +from pcluster.configure.utils import get_regions, get_resource_tag, handle_client_exception, prompt, prompt_iterable +from pcluster.utils import get_supported_os, get_supported_schedulers from future import standard_library # isort:skip -standard_library.install_aliases() # fmt: on +standard_library.install_aliases() LOGGER = logging.getLogger("pcluster.pcluster") -unsupported_regions = ["ap-northeast-3"] DEFAULT_VALUES = { "aws_region_name": "us-east-1", "cluster_template": "default", @@ -57,26 +55,14 @@ @handle_client_exception -def get_regions(): - ec2 = boto3.client("ec2") - regions = ec2.describe_regions().get("Regions") - return [region.get("RegionName") for region in regions if region.get("RegionName") not in unsupported_regions] - - -def extract_tag_from_resource(resource, tag_name): - tags = resource.get("Tags", []) - return next((item.get("Value") for item in tags if item.get("Key") == tag_name), None) - - -@handle_client_exception -def _list_keys(aws_region_name): +def _get_keys(aws_region_name): """Return a list of keys.""" conn = ec2_conn(aws_region_name) keypairs = conn.describe_key_pairs() key_options = [] - for resource in keypairs.get("KeyPairs"): - keyid = resource.get("KeyName") - key_options.append(keyid) + for key in keypairs.get("KeyPairs"): + key_name = key.get("KeyName") + key_options.append(key_name) if not key_options: print( @@ -87,12 +73,12 @@ def _list_keys(aws_region_name): return key_options -def extract_subnet_size(cidr): +def _extract_subnet_size(cidr): return 2 ** (32 - int(cidr.split("/")[1])) @handle_client_exception -def _list_vpcs_and_subnets(aws_region_name): +def _get_vpcs_and_subnets(aws_region_name): """ Return a dictionary containg a list of vpc in the given region and the associated vpcs. @@ -105,25 +91,34 @@ def _list_vpcs_and_subnets(aws_region_name): conn = ec2_conn(aws_region_name) vpcs = conn.describe_vpcs() vpc_options = [] - vpc_to_subnets = {} + vpc_subnets = {} + for vpc in vpcs.get("Vpcs"): vpc_id = vpc.get("VpcId") - subnet_options = [] - subnet_list = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}]).get("Subnets") - for subnet in subnet_list: - subnet_id = subnet.get("SubnetId") - subnet_size_string = "Subnet size: {0}".format(extract_subnet_size(subnet.get("CidrBlock"))) - name = extract_tag_from_resource(subnet, tag_name="Name") - if name: - subnet_options.append((subnet_id, name, subnet_size_string)) - else: - subnet_options.append((subnet_id, subnet_size_string)) - name = extract_tag_from_resource(vpc, tag_name="Name") - vpc_to_subnets[vpc_id] = subnet_options - subnets_number = "{0} subnets inside".format(len(subnet_list)) - vpc_options.append((vpc_id, name, subnets_number)) if name else vpc_options.append((vpc_id, subnets_number)) + subnets = _get_subnets(conn, vpc_id) + vpc_name = get_resource_tag(vpc, tag_name="Name") + vpc_subnets[vpc_id] = subnets + subnets_count = "{0} subnets inside".format(len(subnets)) + if vpc_name: + vpc_options.append((vpc_id, vpc_name, subnets_count)) + else: + vpc_options.append((vpc_id, subnets_count)) + + return {"vpc_list": vpc_options, "vpc_subnets": vpc_subnets} - return {"vpc_list": vpc_options, "vpc_to_subnets": vpc_to_subnets} + +def _get_subnets(conn, vpc_id): + subnet_options = [] + subnet_list = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}]).get("Subnets") + for subnet in subnet_list: + subnet_id = subnet.get("SubnetId") + subnet_size_description = "Subnet size: {0}".format(_extract_subnet_size(subnet.get("CidrBlock"))) + name = get_resource_tag(subnet, tag_name="Name") + if name: + subnet_options.append((subnet_id, name, subnet_size_description)) + else: + subnet_options.append((subnet_id, subnet_size_description)) + return subnet_options @handle_client_exception @@ -149,18 +144,18 @@ def configure(args): # noqa: C901 FIXME!!! vpc_label = "vpc " + cluster_template # Use built in boto regions as an available option - aws_region_name = _prompt_a_list( + aws_region_name = prompt_iterable( "AWS Region ID", get_regions(), - default_value=get_config_parameter( + default_value=_get_config_parameter( config, section="aws", parameter_name="aws_region_name", default_value=DEFAULT_VALUES["aws_region_name"] ), ) - scheduler = _prompt_a_list( + scheduler = prompt_iterable( "Scheduler", get_supported_schedulers(), - default_value=get_config_parameter( + default_value=_get_config_parameter( config, section=cluster_label, parameter_name="scheduler", default_value=DEFAULT_VALUES["scheduler"] ), ) @@ -168,12 +163,12 @@ def configure(args): # noqa: C901 FIXME!!! scheduler_handler = SchedulerHandler(config, cluster_label, scheduler) scheduler_handler.prompt_os() - scheduler_handler.prompt_min_max() + scheduler_handler.prompt_cluster_size() master_instance_type = prompt( "Master instance type", lambda x: x in _list_instances(), - default_value=get_config_parameter( + default_value=_get_config_parameter( config, section=cluster_label, parameter_name="master_instance_type", @@ -181,13 +176,13 @@ def configure(args): # noqa: C901 FIXME!!! ), ) - scheduler_handler.prompt_compute_sizes() + scheduler_handler.prompt_compute_instance_type() - key_name = _prompt_a_list("EC2 Key Pair Name", _list_keys(aws_region_name)) + key_name = prompt_iterable("EC2 Key Pair Name", _get_keys(aws_region_name)) automate_vpc = prompt("Automate VPC creation? (y/n)", lambda x: x == "y" or x == "n", default_value="n") == "y" vpc_parameters = _create_vpc_parameters( - vpc_label, aws_region_name, scheduler, scheduler_handler.max_queue_size, automatized_vpc=automate_vpc + vpc_label, aws_region_name, scheduler, scheduler_handler.max_cluster_size, automate_vpc_creation=automate_vpc ) global_parameters = { "__name__": "global", @@ -247,37 +242,35 @@ def _remove_parameter_from_past_configuration(section, config, parameters_to_rem config.remove_option(section, par) -def _create_vpc_parameters(vpc_label, aws_region_name, scheduler, max_queue_size, automatized_vpc=True): +def _create_vpc_parameters(vpc_label, aws_region_name, scheduler, min_subnet_size, automate_vpc_creation=True): vpc_parameters = {"__name__": vpc_label} - max_queue_size = int(max_queue_size) - if automatized_vpc: + min_subnet_size = int(min_subnet_size) + if automate_vpc_creation: vpc_parameters.update( - automate_creation_of_vpc_and_subnet( - aws_region_name, - _choose_network_configuration(scheduler), - max_queue_size, + automate_vpc_with_subnet_creation( + aws_region_name, _choose_network_configuration(scheduler), min_subnet_size ) ) else: - vpc_and_subnets = _list_vpcs_and_subnets(aws_region_name) + vpc_and_subnets = _get_vpcs_and_subnets(aws_region_name) vpc_list = vpc_and_subnets["vpc_list"] if not vpc_list: - print("There are no VPC for the given region. Starting automatic creation of vpc and subnets...") + print("There are no VPC for the given region. Starting automatic creation of VPC and subnets...") vpc_parameters.update( - automate_creation_of_vpc_and_subnet( - aws_region_name, _choose_network_configuration(scheduler), max_queue_size + automate_vpc_with_subnet_creation( + aws_region_name, _choose_network_configuration(scheduler), min_subnet_size ) ) else: - vpc_id = _prompt_a_list_of_tuple("VPC ID", vpc_list) + vpc_id = prompt_iterable("VPC ID", vpc_list) vpc_parameters["vpc_id"] = vpc_id - subnet_list = vpc_and_subnets["vpc_to_subnets"][vpc_id] + subnet_list = vpc_and_subnets["vpc_subnets"][vpc_id] if not subnet_list or ( prompt("Automate Subnet creation? (y/n)", lambda x: x == "y" or x == "n", default_value="y") == "y" ): vpc_parameters.update( - automate_creation_of_subnet( - aws_region_name, vpc_id, _choose_network_configuration(scheduler), max_queue_size + automate_subnet_creation( + aws_region_name, vpc_id, _choose_network_configuration(scheduler), min_subnet_size ) ) else: @@ -286,8 +279,8 @@ def _create_vpc_parameters(vpc_label, aws_region_name, scheduler, max_queue_size def _ask_for_subnets(subnet_list): - master_subnet_id = _prompt_a_list_of_tuple("Master Subnet ID", subnet_list) - compute_subnet_id = _prompt_a_list_of_tuple("Compute Subnet ID", subnet_list, default_value=master_subnet_id) + master_subnet_id = prompt_iterable("Master Subnet ID", subnet_list) + compute_subnet_id = prompt_iterable("Compute Subnet ID", subnet_list, default_value=master_subnet_id) vpc_parameters = {"master_subnet_id": master_subnet_id} if master_subnet_id != compute_subnet_id: @@ -322,7 +315,7 @@ def _is_config_valid(args, config): return is_valid -def get_config_parameter(config, section, parameter_name, default_value): +def _get_config_parameter(config, section, parameter_name, default_value): """ Get the parameter if present in the configuration otherwise returns default value. @@ -335,6 +328,20 @@ def get_config_parameter(config, section, parameter_name, default_value): return config.get(section, parameter_name) if config.has_option(section, parameter_name) else default_value +def _choose_network_configuration(scheduler): + if scheduler == "awsbatch": + return PublicPrivateNetworkConfig() + target_type = prompt_iterable( + "Network Configuration", + options=[configuration.value.config_type for configuration in NetworkConfiguration], + default_value=PublicPrivateNetworkConfig().config_type, + ) + + return next( + configuration.value for configuration in NetworkConfiguration if configuration.value.config_type == target_type + ) + + class SchedulerHandler: """Handle question scheduler related.""" @@ -343,7 +350,7 @@ def __init__(self, config, cluster_label, scheduler): self.config = config self.cluster_label = cluster_label - self.is_aws_batch = True if scheduler == "awsbatch" else False + self.is_aws_batch = self.scheduler == "awsbatch" self.instance_size_name = "vcpus" if self.is_aws_batch else "instances" self.max_size_name = "max_vcpus" if self.is_aws_batch else "max_queue_size" @@ -351,16 +358,16 @@ def __init__(self, config, cluster_label, scheduler): self.base_os = "alinux" self.compute_instance_type = "optimal" - self.max_queue_size = DEFAULT_VALUES["max_size"] - self.min_queue_size = DEFAULT_VALUES["min_size"] + self.max_cluster_size = DEFAULT_VALUES["max_size"] + self.min_cluster_size = DEFAULT_VALUES["min_size"] def prompt_os(self): """Ask for os, if necessary.""" if not self.is_aws_batch: - self.base_os = _prompt_a_list( + self.base_os = prompt_iterable( "Operating System", get_supported_os(self.scheduler), - default_value=get_config_parameter( + default_value=_get_config_parameter( self.config, section=self.cluster_label, parameter_name="base_os", @@ -368,7 +375,7 @@ def prompt_os(self): ), ) - def prompt_compute_sizes(self): + def prompt_compute_instance_type(self): """Ask for compute_instance_type, if necessary.""" if not self.is_aws_batch: self.compute_instance_type = prompt( @@ -377,34 +384,34 @@ def prompt_compute_sizes(self): default_value=DEFAULT_VALUES["compute_instance_type"], ) - def prompt_min_max(self): + def prompt_cluster_size(self): """Ask for max and min instances / vcpus.""" - self.min_queue_size = prompt( + self.min_cluster_size = prompt( "Minimum cluster size ({0})".format(self.instance_size_name), validator=lambda x: x.isdigit(), - default_value=get_config_parameter( + default_value=_get_config_parameter( self.config, self.cluster_label, self.min_size_name, DEFAULT_VALUES["min_size"] ), ) - self.max_queue_size = prompt( + self.max_cluster_size = prompt( "Maximum cluster size ({0})".format(self.instance_size_name), - validator=lambda x: x.isdigit() and int(x) >= int(self.min_queue_size), - default_value=get_config_parameter( + validator=lambda x: x.isdigit() and int(x) >= int(self.min_cluster_size), + default_value=_get_config_parameter( self.config, self.cluster_label, self.max_size_name, DEFAULT_VALUES["max_size"] ), ) def get_scheduler_parameters(self): - """Return a dict containing the value obtained that are dependent on the scheduler.""" + """Return a dict containing the scheduler dependent parameters.""" scheduler_parameters = { "base_os": self.base_os, "compute_instance_type": self.compute_instance_type, - self.max_size_name: self.max_queue_size, - self.min_size_name: self.min_queue_size, + self.max_size_name: self.max_cluster_size, + self.min_size_name: self.min_cluster_size, } if self.is_aws_batch: - scheduler_parameters["desired_vcpus"] = self.min_queue_size + scheduler_parameters["desired_vcpus"] = self.min_cluster_size else: scheduler_parameters["maintain_initial_size"] = "true" return scheduler_parameters diff --git a/cli/pcluster/configure/networking.py b/cli/pcluster/configure/networking.py new file mode 100644 index 0000000000..b5da06f612 --- /dev/null +++ b/cli/pcluster/configure/networking.py @@ -0,0 +1,272 @@ +# Copyright 2013-2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the 'License'). You may not use this file except in compliance +# with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. +# fmt: off +from future.backports import datetime + +import abc +import logging +import os +import sys +from enum import Enum + +import boto3 +import pkg_resources + +from pcluster.configure.utils import handle_client_exception +from pcluster.networking.vpc_factory import VpcFactory +from pcluster.utils import ( + evaluate_cidr, + get_stack_output_value, + get_subnet_cidr, + get_templates_bucket_path, + verify_stack_creation, +) + +DEFAULT_AWS_REGION_NAME = "us-east-1" +LOGGER = logging.getLogger("pcluster.pcluster") +TIMESTAMP = "-{:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow()) +MASTER_SUBNET_IPS = 250 + +if sys.version_info >= (3, 4): + ABC = abc.ABC +else: + ABC = abc.ABCMeta('ABC', (), {}) + + +class BaseNetworkConfig(ABC): + """The abstract base configuration from which all configurations shall inherit.""" + + def __init__(self, config_type, template_name, stack_name_prefix): + self.config_type = config_type + self.template_name = template_name + self.stack_name_prefix = stack_name_prefix + + def create(self, aws_region_name, vpc_id, compute_subnet_size): + """ + Create the configuration. + + :param aws_region_name: the region in which create the configuration + :param vpc_id: the id of the vpc in which create the configuration + :param compute_subnet_size: the minimum size of the compute subnet + :return: the parameters to write in the config file + """ + subnets_cidrs = get_vpc_subnets(aws_region_name, vpc_id) + vpc_cidr = _get_vpc_cidr(aws_region_name, vpc_id) + internet_gateway_id = _get_internet_gateway_id(aws_region_name, vpc_id) + return self._create(aws_region_name, vpc_id, vpc_cidr, subnets_cidrs, internet_gateway_id, compute_subnet_size) + + @abc.abstractmethod + def _create(self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size): + pass + + @staticmethod + def _build_cfn_param(key, value): + return {"ParameterKey": key, "ParameterValue": value} + + @staticmethod + def _get_cfn_parameters(aws_region_name, vpc_id, internet_gateway_id): + availability_zone = _get_availability_zone(aws_region_name) + return [ + BaseNetworkConfig._build_cfn_param("AvailabilityZone", availability_zone), + BaseNetworkConfig._build_cfn_param("InternetGatewayId", internet_gateway_id), + BaseNetworkConfig._build_cfn_param("VpcId", vpc_id), + ] + + +class PublicNetworkConfig(BaseNetworkConfig): + """The public configuration that creates one public subnet with master and compute fleet.""" + + def __init__(self): + super(PublicNetworkConfig, self).__init__( + config_type="Master and compute fleet in the same public subnet", + template_name="public", + stack_name_prefix="pub" + ) + + def get_cfn_parameters(self, aws_region_name, vpc_id, internet_gateway_id, public_cidr): + """Create cloudformation-compatible stack parameter given the variables.""" + parameters = super(PublicNetworkConfig, self)._get_cfn_parameters(aws_region_name, vpc_id, internet_gateway_id) + parameters.append(super(PublicNetworkConfig, self)._build_cfn_param("PublicCIDR", public_cidr)) + return parameters + + def _create(self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size): # noqa D102 + public_cidr = get_subnet_cidr( + vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, min_subnet_size=compute_subnet_size + MASTER_SUBNET_IPS + ) + _validate_cidr(public_cidr) + parameters = self.get_cfn_parameters(aws_region_name, vpc_id, internet_gateway_id, public_cidr) + stack_output = _create_network_stack(aws_region_name, self, parameters) + return {"master_subnet_id": get_stack_output_value(stack_output, "PublicSubnetId"), "use_public_ips": "true"} + + +class PublicPrivateNetworkConfig(BaseNetworkConfig): + """The publicprivate configuration that creates one public subnet for master and one private subnet for compute.""" + + def __init__(self): + super(PublicPrivateNetworkConfig, self).__init__( + config_type="Master in a public subnet and compute fleet in a private subnet", + template_name="public-private", + stack_name_prefix="pubpriv" + ) + + def get_cfn_parameters(self, aws_region_name, vpc_id, internet_gateway_id, public_cidr, private_cidr): + """Create cloudformation-compatible stack parameter given the variables.""" + parameters = super(PublicPrivateNetworkConfig, self)._get_cfn_parameters( + aws_region_name, vpc_id, internet_gateway_id + ) + parameters.append(super(PublicPrivateNetworkConfig, self)._build_cfn_param("PublicCIDR", public_cidr)) + parameters.append(super(PublicPrivateNetworkConfig, self)._build_cfn_param("PrivateCIDR", private_cidr)) + return parameters + + def _create(self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size): # noqa D102 + public_cidr = evaluate_cidr( + vpc_cidr=vpc_cidr, occupied_cidrs=subnet_cidrs, target_size=MASTER_SUBNET_IPS + ) + _validate_cidr(public_cidr) + subnet_cidrs.append(public_cidr) + private_cidr = get_subnet_cidr( + vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, min_subnet_size=compute_subnet_size + ) + _validate_cidr(private_cidr) + parameters = self.get_cfn_parameters(aws_region_name, vpc_id, internet_gateway_id, public_cidr, private_cidr) + stack_output = _create_network_stack(aws_region_name, self, parameters) + return { + "master_subnet_id": get_stack_output_value(stack_output, "PublicSubnetId"), + "compute_subnet_id": get_stack_output_value(stack_output, "PrivateSubnetId"), + "use_public_ips": "false", + } + + +class NetworkConfiguration(Enum): + """Contain all possible network configuration.""" + + # py2.7 compatibility, need to specify the order + __order__ = "PUBLIC_PRIVATE PUBLIC" + + PUBLIC_PRIVATE = PublicPrivateNetworkConfig() + PUBLIC = PublicNetworkConfig() + + +def _create_network_stack(aws_region_name, configuration, parameters): + LOGGER.info("Creating CloudFormation stack...") + LOGGER.info("Do not leave the terminal until the process has finished") + stack_name = "parallelclusternetworking-{0}{1}".format(configuration.stack_name_prefix, TIMESTAMP) + version = pkg_resources.get_distribution("aws-parallelcluster").version + try: + cfn = boto3.client("cloudformation", region_name=aws_region_name) + stack = cfn.create_stack( + StackName=stack_name, + TemplateURL=get_templates_bucket_path(aws_region_name) + "networking/%s-%s.cfn.json" % ( + configuration.template_name, version + ), + Parameters=parameters, + Capabilities=["CAPABILITY_IAM"], + ) + LOGGER.debug("StackId: {0}".format(stack.get("StackId"))) + LOGGER.info("Stack Name: {0}".format(stack_name)) + if not verify_stack_creation(cfn, stack_name): + LOGGER.error("Could not create the network configuration") + sys.exit(0) + print() + LOGGER.info("The stack has been created") + return cfn.describe_stacks(StackName=stack_name).get("Stacks")[0]["Outputs"] + except KeyboardInterrupt: + print() + LOGGER.info("Unable to update the configuration file with the selected network configuration. " + "Please manually check the status of the CloudFormation stack: {0}".format(stack_name)) + except Exception as e: # Any exception is a problem + print() + LOGGER.error( + "An exception as occured. Please restart the configuration and check manually the created resource" + ) + LOGGER.critical(e) + sys.exit(1) + + +def _validate_cidr(cidr): + if not cidr: + LOGGER.error("Unable to create subnet. Please check the number of available IPs in the VPC") + sys.exit(1) + + +@handle_client_exception +def get_vpc_subnets(aws_region_name, vpc_id): + """Return a list of the subnets cidr contained in the vpc.""" + conn = ec2_conn(aws_region_name) + subnets = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}])["Subnets"] + return [subnet["CidrBlock"] for subnet in subnets] + + +@handle_client_exception +def _get_vpc_cidr(aws_region_name, vpc_id): + return ec2_conn(aws_region_name).describe_vpcs(VpcIds=[vpc_id])["Vpcs"][0]["CidrBlock"] + + +@handle_client_exception +def _get_internet_gateway_id(aws_region_name, vpc_id): + response = ec2_conn(aws_region_name).describe_internet_gateways( + Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}] + ) + return ( + response["InternetGateways"][0]["InternetGatewayId"] + if response["InternetGateways"] + else "" + ) + + +@handle_client_exception +def _get_availability_zone(aws_region_name): + # FIXME placeholder for a function that should decide the best availability zone for the given aws_region + return "" + + +def _evaluate_aws_region(aws_region_name): + if aws_region_name: + region = aws_region_name + elif os.environ.get("AWS_DEFAULT_REGION"): + region = os.environ.get("AWS_DEFAULT_REGION") + else: + region = DEFAULT_AWS_REGION_NAME + return region + + +@handle_client_exception +def ec2_conn(aws_region_name): + region = _evaluate_aws_region(aws_region_name) + ec2 = boto3.client("ec2", region_name=region) + return ec2 + + +def automate_vpc_with_subnet_creation(aws_region_name, network_configuration, compute_subnet_size): + print("Beginning VPC creation. Please do not leave the terminal until the creation is finalized") + vpc_creator = VpcFactory(aws_region_name) + vpc_id = vpc_creator.create() + vpc_creator.setup(vpc_id, name="ParallelClusterVPC" + TIMESTAMP) + if not vpc_creator.check(vpc_id): + logging.critical("Something went wrong in VPC creation. Please delete it and start the process again") + sys.exit(1) + + vpc_parameters = {"vpc_id": vpc_id} + vpc_parameters.update(automate_subnet_creation(aws_region_name, vpc_id, network_configuration, compute_subnet_size)) + return vpc_parameters + + +@handle_client_exception +def automate_subnet_creation(aws_region_name, vpc_id, network_configuration, compute_subnet_size): + _validate_vpc(aws_region_name, vpc_id) + return network_configuration.create(aws_region_name, vpc_id, compute_subnet_size) + + +def _validate_vpc(aws_region_name, vpc_id): + # This function should be further expandend once we decide to allow the user to use his vpcs. For example, we should + # also check for the presence of a NAT gateway + if not VpcFactory(aws_region_name).check(vpc_id): + logging.error("WARNING: The VPC does not have the correct parameters set.") diff --git a/cli/pcluster/easyconfig/easyconfig_utils.py b/cli/pcluster/configure/utils.py similarity index 58% rename from cli/pcluster/easyconfig/easyconfig_utils.py rename to cli/pcluster/configure/utils.py index 073053465b..13944e75b6 100644 --- a/cli/pcluster/easyconfig/easyconfig_utils.py +++ b/cli/pcluster/configure/utils.py @@ -9,11 +9,30 @@ # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. # fmt: off +import functools import logging import sys from builtins import input +import boto3 +from botocore.exceptions import BotoCoreError, ClientError + LOGGER = logging.getLogger("pcluster.pcluster") +unsupported_regions = ["ap-northeast-3"] + + +def handle_client_exception(func): + @functools.wraps(func) + def wrapper(*args, **kwargs): + try: + return func(*args, **kwargs) + except (BotoCoreError, ClientError) as e: + LOGGER.error("Failed with error: %s" % e) + LOGGER.error("Hint: please check your AWS credentials.") + LOGGER.error("Run `aws configure` or set the credentials as environment variables.") + sys.exit(1) + + return wrapper def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, default_value=None, options_to_print=None): @@ -35,12 +54,9 @@ def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, defau valid_user_input = False result = default_value - # We give the user the possibility to try again if wrong + # Give the user the possibility to try again if wrong while not valid_user_input: - sys.stdin.flush() - user_input = input(user_prompt).strip() - if user_input == "": - user_input = default_value + user_input = input(user_prompt).strip() or default_value result = input_to_option(user_input) if validator(result): valid_user_input = True @@ -49,78 +65,50 @@ def prompt(message, validator=lambda x: True, input_to_option=lambda x: x, defau return result -def _prompt_a_list(message, options, default_value=None): - """ - Wrap prompt to use it for list. - - :param message: the message to show the user - :param options: the list of item to show the user - :param default_value: the default value - :return: the validate value +def prompt_iterable(message, options, default_value=None): """ - if not options: - LOGGER.error("ERROR: No options found for {0}".format(message)) - sys.exit(1) - if not default_value: - default_value = options[0] - - def input_to_parameter(to_transform): - try: - if to_transform.isdigit() and to_transform != "0": - item = options[int(to_transform) - 1] - else: - item = to_transform - except (ValueError, IndexError): - item = to_transform - return item + Wrap prompt to use it over a list or a list of tuple. - return prompt( - message, - validator=lambda x: x in options, - input_to_option=lambda x: input_to_parameter(x), - default_value=default_value, - options_to_print=_to_printable_list(options), - ) - - -def _prompt_a_list_of_tuple(message, options, default_value=None): - """ - Wrap prompt to use it over a list of tuple. - - The correct item will be the first element of each tuple. + The selected option will be the first element of the selected tuple. :param message: the message to show to the user :param options: the list of tuple :param default_value: the default value :return: the validated value """ + is_tuple = isinstance(options[0], (list, tuple)) + if not options: LOGGER.error("ERROR: No options found for {0}".format(message)) sys.exit(1) + if not default_value: - default_value = options[0][0] + default_value = options[0][0] if is_tuple else options[0] - def input_to_parameter(to_transform): + def input_to_parameter(user_input): try: - if to_transform.isdigit() and to_transform != "0": - item = options[int(to_transform) - 1][0] + if user_input.isdigit() and user_input != "0": + option_value = options[int(user_input) - 1][0] if is_tuple else options[int(user_input) - 1] else: - item = to_transform + option_value = user_input except (ValueError, IndexError): - item = to_transform - return item + option_value = user_input + return option_value - valid_options = [item[0] for item in options] + if is_tuple: + valid_options = [item[0] for item in options] + else: + valid_options = options return prompt( message, validator=lambda x: x in valid_options, input_to_option=lambda x: input_to_parameter(x), default_value=default_value, - options_to_print=_to_printable_list(options), + options_to_print=generate_printable_list(options), ) -def _to_printable_list(items): +def generate_printable_list(items): output = [] for iterator, item in enumerate(items, start=1): if isinstance(item, (list, tuple)): @@ -128,3 +116,15 @@ def _to_printable_list(items): else: output.append("{0}. {1}".format(iterator, item)) return output + + +@handle_client_exception +def get_regions(): + ec2 = boto3.client("ec2") + regions = ec2.describe_regions().get("Regions") + return [region.get("RegionName") for region in regions if region.get("RegionName") not in unsupported_regions] + + +def get_resource_tag(resource, tag_name): + tags = resource.get("Tags", []) + return next((item.get("Value") for item in tags if item.get("Key") == tag_name), None) diff --git a/cli/pcluster/easyconfig/__init__.py b/cli/pcluster/easyconfig/__init__.py deleted file mode 100644 index 1d44f1e10c..0000000000 --- a/cli/pcluster/easyconfig/__init__.py +++ /dev/null @@ -1,10 +0,0 @@ -# Copyright 2013-2014 Amazon.com, Inc. or its affiliates. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance -# with the License. A copy of the License is located at -# -# http://aws.amazon.com/apache2.0/ -# -# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES -# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and -# limitations under the License. diff --git a/cli/pcluster/easyconfig/easyconfig_networking.py b/cli/pcluster/easyconfig/easyconfig_networking.py deleted file mode 100644 index d0baab04a9..0000000000 --- a/cli/pcluster/easyconfig/easyconfig_networking.py +++ /dev/null @@ -1,257 +0,0 @@ -from future.backports import datetime - -import functools -import logging -import os -import sys -import time - -import boto3 -from botocore.exceptions import BotoCoreError, ClientError - -from pcluster.easyconfig.easyconfig_utils import _prompt_a_list -from pcluster.networking.vpc_factory import VpcFactory -from pcluster.utils import decide_cidr, get_subnet_cidr - -DEFAULT_AWS_REGION_NAME = "us-east-1" -LOGGER = logging.getLogger("pcluster.pcluster") -TIMESTAMP = "-{:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow()) -PUBLIC_PRIVATE_CONFIG_NAME = "public-private-with-nat" -PUBLIC_CONFIG_NAME = "public-only" -NUMBER_OF_IP_MASTER_SUBNET = 250 - - -def handle_client_exception(func): - @functools.wraps(func) - def wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except (BotoCoreError, ClientError) as e: - LOGGER.error("Failed with error: %s" % e) - LOGGER.error("Hint: please check your AWS credentials.") - LOGGER.error("Run `aws configure` or set the credentials as environment variables.") - sys.exit(1) - - return wrapper - - -class NetworkConfigurer: - """Create a NetworkConfigurer item that will be used by _create_network_configuration.""" - - def __init__( - self, aws_region_name, availability_zone, vpc_id, public_cidr="", internet_gateway_id="", private_cidr="" - ): - self.aws_region_name = aws_region_name - self.availability_zone = availability_zone - self.vpc_id = vpc_id - self.public_cidr = public_cidr - self.internet_gateway_id = internet_gateway_id - self.private_cidr = private_cidr - - def create_stack_parameters(self, also_private_cidr=False): - """Create cloudformation-compatible stack parameter given the variables.""" - parameters = [ - {"ParameterKey": "AvailabilityZone", "ParameterValue": self.availability_zone}, - {"ParameterKey": "InternetGatewayId", "ParameterValue": self.internet_gateway_id}, - {"ParameterKey": "PublicCIDR", "ParameterValue": self.public_cidr}, - {"ParameterKey": "VpcId", "ParameterValue": self.vpc_id}, - ] - if also_private_cidr: - parameters.append({"ParameterKey": "PrivateCIDR", "ParameterValue": self.private_cidr}) - return parameters - - -def _evaluate_aws_region(aws_region_name): - if aws_region_name: - region = aws_region_name - elif os.environ.get("AWS_DEFAULT_REGION"): - region = os.environ.get("AWS_DEFAULT_REGION") - else: - region = DEFAULT_AWS_REGION_NAME - return region - - -@handle_client_exception -def ec2_conn(aws_region_name): - region = _evaluate_aws_region(aws_region_name) - ec2 = boto3.client("ec2", region_name=region) - return ec2 - - -def automate_creation_of_vpc_and_subnet(aws_region_name, network_configuration, number_of_ip_for_compute): - print("Beginning creation of vpc. Please do not leave the terminal until the process has finish") - vpc_creator = VpcFactory(aws_region_name) - vpc_id = vpc_creator.create() - vpc_creator.setup(vpc_id, name="ParallelClusterVPC" + TIMESTAMP) - if not vpc_creator.check(vpc_id): - logging.critical("ERROR:Something went wrong in vpc creation. Please delete it and start the process again") - sys.exit(1) - - vpc_parameters = {"vpc_id": vpc_id} - vpc_parameters.update( - automate_creation_of_subnet(aws_region_name, vpc_id, network_configuration, number_of_ip_for_compute) - ) - return vpc_parameters - - -@handle_client_exception -def automate_creation_of_subnet(aws_region_name, vpc_id, network_configuration, number_of_ip_for_compute): - _check_the_vpc(aws_region_name, vpc_id) - ec2_client = ec2_conn(aws_region_name) - vpc_cidr = ec2_client.describe_vpcs(VpcIds=[vpc_id])["Vpcs"][0]["CidrBlock"] - internet_gateway_response = ec2_client.describe_internet_gateways( - Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}] - ) - internet_gateway_id = ( - internet_gateway_response["InternetGateways"][0]["InternetGatewayId"] - if internet_gateway_response["InternetGateways"] - else "" - ) - - configurer = NetworkConfigurer( - aws_region_name, _get_availability_zone(aws_region_name), vpc_id, internet_gateway_id=internet_gateway_id - ) - - possible_network_configuration = { - PUBLIC_PRIVATE_CONFIG_NAME: _create_public_private_with_nat_configuration, - PUBLIC_CONFIG_NAME: _create_public_configuration, - } - return possible_network_configuration[network_configuration]( - configurer, vpc_cidr, _get_subnets_in_vpc(aws_region_name, vpc_id), number_of_ip_for_compute - ) - - -def _create_public_configuration(configurer, vpc_cidr, subnet_cidrs, number_of_ip_for_compute): - configurer.public_cidr = get_subnet_cidr( - vpc_cidr=vpc_cidr, - occupied_cidr=subnet_cidrs, - max_queue_size=number_of_ip_for_compute + NUMBER_OF_IP_MASTER_SUBNET, - ) - _check_cidr(configurer.public_cidr) - template_name = "public.cfn.json" - stack_output = _create_network_configuration(template_name, configurer, also_private_cidr=False) - return {"master_subnet_id": stack_output[0]["OutputValue"], "use_public_ips": "true"} - - -def _create_public_private_with_nat_configuration(configurer, vpc_cidr, subnet_cidrs, number_of_ip_for_compute): - configurer.public_cidr = decide_cidr( - vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, target_size=NUMBER_OF_IP_MASTER_SUBNET - ) - _check_cidr(configurer.public_cidr) - subnet_cidrs.append(configurer.public_cidr) - configurer.private_cidr = get_subnet_cidr( - vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, max_queue_size=number_of_ip_for_compute - ) - _check_cidr(configurer.private_cidr) - template_name = "public-private.cfn.json" - stack_output = _create_network_configuration(template_name, configurer, also_private_cidr=True) - # stack output size is 2 - public_index = 0 if (stack_output[0]["OutputKey"] == "PublicSubnetId") else 1 - private_index = (public_index + 1) % 2 - return { - "master_subnet_id": stack_output[public_index]["OutputValue"], - "compute_subnet_id": stack_output[private_index]["OutputValue"], - "use_public_ips": "false", - } - - -# very similar to pcluster.py line 104 and after -def _create_network_configuration(template_name, configurer, also_private_cidr): - LOGGER.info("Creating stack for the network configuration...") - LOGGER.info("Do not leave the terminal until the process has finished") - cfn = boto3.client("cloudformation", region_name=configurer.aws_region_name) - capabilities = ["CAPABILITY_IAM"] - try: - stack_name = "parallelclusternetworking-" + ("pubpriv" if also_private_cidr else "pub") + TIMESTAMP - stack = cfn.create_stack( - StackName=stack_name, - TemplateURL="https://network-configuration-bucket.s3-eu-west-1.amazonaws.com/{0}".format(template_name), - Parameters=configurer.create_stack_parameters(also_private_cidr=also_private_cidr), - Capabilities=capabilities, - ) - LOGGER.debug("StackId: %s", stack.get("StackId")) - LOGGER.info("Stack Name: {0}".format(stack_name)) - status = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") - resource_status = "" - while status == "CREATE_IN_PROGRESS": - status = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") - events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")[0] - resource_status = ( - "Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus")) - ).ljust(80) - sys.stdout.write("\r%s" % resource_status) - sys.stdout.flush() - time.sleep(5) - # print the last status update in the logs - if resource_status != "": - LOGGER.debug(resource_status) - - if status != "CREATE_COMPLETE": - LOGGER.critical("\nCluster creation failed. Failed events:") - events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents") - for event in events: - if event.get("ResourceStatus") == "CREATE_FAILED": - LOGGER.info( - " - %s %s %s", - event.get("ResourceType"), - event.get("LogicalResourceId"), - event.get("ResourceStatusReason"), - ) - LOGGER.error("Could not create the network configuration") - sys.exit(0) - print() - LOGGER.info("The stack has been created") - return cfn.describe_stacks(StackName=stack_name).get("Stacks")[0]["Outputs"] - except KeyboardInterrupt as e: - print() - LOGGER.info("Could not write up the configuration.") - LOGGER.info("Please check manually the created resources and stacks") - except Exception as e: # Any exception is a problem - print() - LOGGER.error( - "An exception as occured. Please restart the configuration and check manually the created resource" - ) - LOGGER.critical(e) - sys.exit(1) - - -@handle_client_exception -def _get_availability_zone(aws_region_name): - # FIXME to update - return "" - - -def _choose_network_configuration(scheduler): - if scheduler == "awsbatch": - return PUBLIC_PRIVATE_CONFIG_NAME - options = ( - "Master in a public subnet and compute fleet in a private subnet", - "Master and compute fleet in the same public subnet", - ) - to_network_identifiers = {options[0]: PUBLIC_PRIVATE_CONFIG_NAME, options[1]: PUBLIC_CONFIG_NAME} - - return to_network_identifiers[_prompt_a_list("Network Configuration", options, default_value=options[0])] - - -@handle_client_exception -def _get_subnets_in_vpc(aws_region_name, vpc_id): - """Return a list of the subnets cidr contained in the vpc.""" - conn = ec2_conn(aws_region_name) - subnets = conn.describe_subnets(Filters=[{"Name": "vpcId", "Values": [vpc_id]}])["Subnets"] - return [subnet["CidrBlock"] for subnet in subnets] - - -def _check_the_vpc(aws_region_name, vpc_id): - # This function should be further expandend once we decide to allow the user to use his vpcs. For example, we should - # also check for the presence of a NAT gateway - if not VpcFactory(aws_region_name).check(vpc_id): - logging.error("WARNING: The vpc does not have the correct parameters set.") - - -def _check_cidr(cidr): - if not cidr: - LOGGER.error( - "Could not create the subnet needed for the network configuration. Check that the vpc has enough" - "space for the new subnet" - ) - sys.exit(1) diff --git a/cli/pcluster/networking/vpc_factory.py b/cli/pcluster/networking/vpc_factory.py index 4d141a17d6..50823f1fb2 100644 --- a/cli/pcluster/networking/vpc_factory.py +++ b/cli/pcluster/networking/vpc_factory.py @@ -80,5 +80,13 @@ def check(self, vpc_id): vpc = self.ec2.Vpc(vpc_id) dns_resolution = vpc.describe_attribute(Attribute="enableDnsSupport")["EnableDnsSupport"]["Value"] dns_hostnames = vpc.describe_attribute(Attribute="enableDnsHostnames")["EnableDnsHostnames"]["Value"] + + if not dns_hostnames: + print("DNS Hostnames of the VPC {0} must be set to True".format(vpc_id)) + if not dns_resolution: + print("DNS Resolution of the VPC {0} must be set to True".format(vpc_id)) + if vpc.dhcp_options_id == "default": + print("DHCP options of the VPC {0} must be set.".format(vpc_id)) + # default is equal to NO dhcp options set return dns_resolution and dns_hostnames and vpc.dhcp_options_id != "default" diff --git a/cli/pcluster/pcluster.py b/cli/pcluster/pcluster.py index 64080e8cc7..ad66241edc 100644 --- a/cli/pcluster/pcluster.py +++ b/cli/pcluster/pcluster.py @@ -36,6 +36,8 @@ from botocore.exceptions import ClientError from tabulate import tabulate +from pcluster.utils import get_stack_output_value, verify_stack_creation + from . import cfnconfig, utils if sys.version_info[0] >= 3: @@ -129,34 +131,8 @@ def create(args): # noqa: C901 FIXME!!! ) LOGGER.debug("StackId: %s", stack.get("StackId")) - status = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") - if not args.nowait: - resource_status = "" - while status == "CREATE_IN_PROGRESS": - status = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") - events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents")[0] - resource_status = ( - "Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus")) - ).ljust(80) - sys.stdout.write("\r%s" % resource_status) - sys.stdout.flush() - time.sleep(5) - # print the last status update in the logs - if resource_status != "": - LOGGER.debug(resource_status) - - if status != "CREATE_COMPLETE": - LOGGER.critical("\nCluster creation failed. Failed events:") - events = cfn.describe_stack_events(StackName=stack_name).get("StackEvents") - for event in events: - if event.get("ResourceStatus") == "CREATE_FAILED": - LOGGER.info( - " - %s %s %s", - event.get("ResourceType"), - event.get("LogicalResourceId"), - event.get("ResourceStatusReason"), - ) + verify_stack_creation(cfn, stack_name) LOGGER.info("") result_stack = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0] _print_stack_outputs(result_stack) @@ -391,7 +367,7 @@ def get_batch_ce(stack_name, config): try: outputs = cfn.describe_stacks(StackName=stack_name).get("Stacks")[0].get("Outputs") - return _get_output_value(outputs, "BatchComputeEnvironmentArn") + return get_stack_output_value(outputs, "BatchComputeEnvironmentArn") except ClientError as e: LOGGER.critical(e.response.get("Error").get("Message")) sys.exit(1) @@ -669,17 +645,6 @@ def _get_master_server_ip(stack_name, config): return ip_address -def _get_output_value(outputs, key_name): - """ - Get output value from Cloudformation Stack Output. - - :param outputs: Cloudformation Stack Outputs - :param key_name: Output Key - :return: OutputValue if that output exists, otherwise None - """ - return next((o.get("OutputValue") for o in outputs if o.get("OutputKey") == key_name), None) - - def _get_param_value(params, key_name): """ Get parameter value from Cloudformation Stack Parameters. @@ -716,8 +681,8 @@ def command(args, extra_args): # noqa: C901 FIXME!!! sys.exit(1) elif status in valid_status: outputs = stack_result.get("Outputs") - username = _get_output_value(outputs, "ClusterUser") - ip = _get_output_value(outputs, "MasterPublicIP") or _get_master_server_ip(stack, config) + username = get_stack_output_value(outputs, "ClusterUser") + ip = get_stack_output_value(outputs, "MasterPublicIP") or _get_master_server_ip(stack, config) if not username: LOGGER.info("Failed to get cluster %s username.", args.cluster_name) diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index 7dd62891db..185356fdbd 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -8,12 +8,13 @@ # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import, print_function +from __future__ import absolute_import, print_function, unicode_literals import json +import logging import os -import socket -import struct +import sys +import time import zipfile from io import BytesIO from ipaddress import ip_address, ip_network, summarize_address_range @@ -21,6 +22,8 @@ import boto3 from botocore.exceptions import ClientError +LOGGER = logging.getLogger("pcluster.pcluster") + def boto3_client(service, aws_client_config): return boto3.client( @@ -209,119 +212,179 @@ def next_power_of_2(x): return 1 if x == 0 else 2 ** (x - 1).bit_length() -def get_subnet_cidr(vpc_cidr, occupied_cidr, max_queue_size): +def get_subnet_cidr(vpc_cidr, occupied_cidr, min_subnet_size): """ Decide the parallelcluster subnet size of the compute fleet. + :param vpc_cidr: the vpc_cidr in which the suitable subnet should be :param occupied_cidr: a list of cidr of the already occupied subnets in the vpc - :param max_queue_size: the max nodes / vcpus that the user has set + :param min_subnet_size: the minimum size of the subnet :return: """ - target_size = max(4000, 2 * max_queue_size) - cidr = decide_cidr(vpc_cidr, occupied_cidr, target_size) + default_target_size = 4000 + target_size = max(default_target_size, 2 * min_subnet_size) + cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size) while cidr is None: - if target_size < max_queue_size: + if target_size < min_subnet_size: return None target_size = target_size // 2 - cidr = decide_cidr(vpc_cidr, occupied_cidr, target_size) + cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size) return cidr -# This code is complex, get ready -def decide_cidr(vpc_cidr, occupied_cidr, target_size): +def evaluate_cidr(vpc_cidr, occupied_cidrs, target_size): """ - Decide the smallest suitable CIDR for a subnet with size >= target_size. + Decide the first smallest suitable CIDR for a subnet with size >= target_size. :param vpc_cidr: the vpc_cidr in which the suitable subnet should be - :param occupied_cidr: a list of cidr of the already occupied subnets in the vpc + :param occupied_cidrs: a list of cidr of the already occupied subnets in the vpc :param target_size: the minimum target size of the subnet :return: the suitable CIDR if found, else None """ - # How the algorithm works: If we want to find a suitable CIDR inside a vpc with already some subnet inside, we first - # have to check wheter the size of the subnet we want to create is greater than the minimum Cidr (/16, /24, ecc...). - # If it is, we have to transform all the occupied_cidr into subnets that have at least the cidr of the instance we - # want to allocate. To do that, we use _promote_cidrs(). - # - # Why doing that? - # - # Well, the function summarize_address_range() returns a iterator of all the cidr needed to encapsulate the given - # begin ip and the end ip strictly. So for example, from 10.0.0.0 to 10.0.1.1, the function will return[10.0.0.0/24, - # 10.0.1.0/31]. We therefore need to give to that function an ip range that can be compressed in just one cidr. In - # order to do that, we basically expand all the cidr and then eliminate all the duplicates. - - # Once we have the target cidr (which is 32 - the power of 2 that is equal to subnet_size ) to be the minimum - # of all the occupied_cidr, we create a list of tuple (beginip, endip) that are sorted by endip. We then compare - # each beginip with the endip of the previous one looking for a space greater than the one of subnet_size. - # If we found it, we convert it to a cidr using the summarize_address_range() function. - # Function cost: O(nlogn), where n is the size of occupied cidr - # Understanding cost: O(over9000) - aws_reserved_ip = 6 - min_bitmask_length = 28 - target_bitmask_length = min( - 32 - ((next_power_of_2(target_size + aws_reserved_ip) - 1).bit_length()), min_bitmask_length - ) - subnet_size = 2 ** (32 - target_bitmask_length) + subnet_size, subnet_bitmask = _evaluate_subnet_size(target_size) vpc_begin_address_decimal, vpc_end_address_decimal = _get_cidr_limits_as_decimal(vpc_cidr) - if vpc_end_address_decimal - vpc_begin_address_decimal + 1 < subnet_size: # if we do not have enough space + # if we do not have enough space + if vpc_end_address_decimal - vpc_begin_address_decimal + 1 < subnet_size: return None - if not occupied_cidr: # if we have space and no occupied cidr + # if we have space and no occupied cidr + if not occupied_cidrs: return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size) - occupied_cidr_max_bitmask = max([int(subnet_cidr.split("/")[1]) for subnet_cidr in occupied_cidr]) - if occupied_cidr_max_bitmask > target_bitmask_length: - # This means that it's smaller, so we need to make it bigger - occupied_cidr = _expand_cidrs(occupied_cidr, min_size=target_bitmask_length) - - # subnets_number is a list of pair(begin ip, end ip) obtained from the cidr. So for example - # 10.0.0.0/17 = 10.0.0.0, 10.0.127.255 - begin_ip_index = 0 - end_ip_index = 1 - subnets_limits = [_get_cidr_limits_as_decimal(subnet) for subnet in occupied_cidr] - subnets_limits.sort(key=lambda x: x[1]) # sort by ending numbers, sorting by beginning is the same - # to check for space between the last occupied and the end of the vpc - subnets_limits.append((vpc_end_address_decimal, vpc_end_address_decimal)) + lower_limit_index = 0 + upper_limit_index = 1 - if (subnets_limits[0][begin_ip_index] - vpc_begin_address_decimal) >= subnet_size: - return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size) + # Get subnets limits + occupied_cidrs = _align_subnet_cidrs(occupied_cidrs, subnet_bitmask) + subnets_limits = [_get_cidr_limits_as_decimal(subnet) for subnet in occupied_cidrs] + subnets_limits.sort(key=lambda x: x[upper_limit_index]) # Looking at space between occupied cidrs - for index in range(1, len(subnets_limits)): - begin_number = subnets_limits[index][begin_ip_index] - end_previous_number = subnets_limits[index - 1][end_ip_index] - if begin_number - end_previous_number > subnet_size: - return _decimal_ip_limits_to_cidr(end_previous_number + 1, end_previous_number + subnet_size) - return None + resulting_cidr = None + + subnets_limits.append((vpc_end_address_decimal, vpc_end_address_decimal)) + for index in range(0, len(subnets_limits)): + current_lower_limit = subnets_limits[index][lower_limit_index] + # In the first case, vpc_begin_address is free, whereas upper_limit_index is not + previous_upper_limit = ( + subnets_limits[index - 1][upper_limit_index] if index > 0 else vpc_begin_address_decimal - 1 + ) + if current_lower_limit - previous_upper_limit > subnet_size: + resulting_cidr = _decimal_ip_limits_to_cidr(previous_upper_limit + 1, previous_upper_limit + subnet_size) + break + + return resulting_cidr + + +def _align_subnet_cidrs(occupied_cidr, target_bitmask): + """Transform the subnet cidr that are smaller than the minimum bitmask to bigger ones.""" + correct_cidrs = set() + for subnet_cidr in occupied_cidr: + if _get_bitmask(subnet_cidr) > target_bitmask: + correct_cidrs.add(expand_cidr(subnet_cidr, target_bitmask)) + else: + correct_cidrs.add(subnet_cidr) + return list(correct_cidrs) + + +def _get_bitmask(cidr): + return int(cidr.split("/")[1]) + + +def _evaluate_subnet_size(target_size): + aws_reserved_ip = 6 + min_bitmask = 28 + subnet_bitmask = min(32 - ((next_power_of_2(target_size + aws_reserved_ip) - 1).bit_length()), min_bitmask) + subnet_size = 2 ** (32 - subnet_bitmask) + return subnet_size, subnet_bitmask def _decimal_ip_limits_to_cidr(begin, end): """Given begin and end ip (as decimals number), return the CIDR that begins with begin ip and ends with end ip.""" - return str( - summarize_address_range( - ip_address(socket.inet_ntoa(struct.pack("!L", begin))), ip_address(socket.inet_ntoa(struct.pack("!L", end))) - ).__next__() - ) + return str(next(summarize_address_range(ip_address(begin), ip_address(end)))) def _get_cidr_limits_as_decimal(cidr): """Given a cidr, return the begin ip and the end ip as decimal.""" - address = ip_network(cidr) + address = ip_network(unicode(cidr)) return _ip_to_decimal(str(address[0])), _ip_to_decimal(str(address[-1])) def _ip_to_decimal(ip): """Transform an ip into its decimal representantion.""" - return int(bin(struct.unpack("!I", socket.inet_aton(ip))[0]), 2) + return int(ip_address(unicode(ip))) -def _expand_cidrs(occupied_cidrs, min_size): - """Given a list of cidrs, it upgrade the netmask of each one to min_size and returns the updated cidrs.""" - new_cidrs = set() - for cidr in occupied_cidrs: - if int(cidr.split("/")[1]) > min_size: - ip_addr = ip_network(u"{0}".format(cidr)) - new_cidrs.add(str(ip_addr.supernet(new_prefix=min_size))) - else: - new_cidrs.add(cidr) - return list(new_cidrs) +def expand_cidr(cidr, new_size): + """ + Given a list of cidrs, it upgrade the netmask of each one to min_size and returns the updated cidrs. + + For example, given the list of cidrs ["10.0.0.0/24", "10.0.4.0/23"] and min_size = 23, the resulting updated cidrs + will be ["10.0.0.0/23", "10.0.4.0/23]. Notice that any duplicate of the updated list will be removed. + :param cidr: the list of cidr to promote + :param new_size: the minimum bitmask required + """ + ip_addr = ip_network(unicode(cidr)) + return str(ip_addr.supernet(new_prefix=new_size)) + + +# py2.7 compatibility +def unicode(ip): + return "{0}".format(ip) + + +def get_stack_output_value(stack_outputs, output_key): + """ + Get output value from Cloudformation Stack Output. + + :param stack_outputs: Cloudformation Stack Outputs + :param output_key: Output Key + :return: OutputValue if that output exists, otherwise None + """ + return next((o.get("OutputValue") for o in stack_outputs if o.get("OutputKey") == output_key), None) + + +def verify_stack_creation(cfn_client, stack_name): + """ + Wait for the stack creation to be completed and notify if the stack creation fails. + + :param cfn_client: the CloudFormation client to use to verify stack status + :param stack_name: the stack name that we should verify + :return: True if the creation was successful, false otherwise. + """ + status = cfn_client.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") + resource_status = "" + while status == "CREATE_IN_PROGRESS": + status = cfn_client.describe_stacks(StackName=stack_name).get("Stacks")[0].get("StackStatus") + events = cfn_client.describe_stack_events(StackName=stack_name).get("StackEvents")[0] + resource_status = ("Status: %s - %s" % (events.get("LogicalResourceId"), events.get("ResourceStatus"))).ljust( + 80 + ) + sys.stdout.write("\r%s" % resource_status) + sys.stdout.flush() + time.sleep(5) + # print the last status update in the logs + if resource_status != "": + LOGGER.debug(resource_status) + if status != "CREATE_COMPLETE": + LOGGER.critical("\nCluster creation failed. Failed events:") + events = cfn_client.describe_stack_events(StackName=stack_name).get("StackEvents") + for event in events: + if event.get("ResourceStatus") == "CREATE_FAILED": + LOGGER.info( + " - %s %s %s", + event.get("ResourceType"), + event.get("LogicalResourceId"), + event.get("ResourceStatusReason"), + ) + return False + return True + + +def get_templates_bucket_path(aws_region_name): + """Return a string containing the path of bucket.""" + s3_suffix = ".cn" if aws_region_name.startswith("cn") else "" + return "https://s3.{REGION}.amazonaws.com{S3_SUFFIX}/{REGION}-aws-parallelcluster/templates/".format( + REGION=aws_region_name, S3_SUFFIX=s3_suffix + ) diff --git a/cli/setup.py b/cli/setup.py index 6bf12f0017..8f0ff1bd92 100644 --- a/cli/setup.py +++ b/cli/setup.py @@ -22,7 +22,7 @@ def readme(): VERSION = "2.4.0" -REQUIRES = ["boto3>=1.9.54", "future>=0.16.0,<=0.17.1", "tabulate>=0.8.2,<=0.8.3", "ipaddress"] +REQUIRES = ["boto3>=1.9.54", "future>=0.16.0,<=0.17.1", "tabulate>=0.8.2,<=0.8.3", "ipaddress>=1.0.22", "enum34>=1.1.6"] if sys.version_info[:2] == (2, 6): # For python2.6 we have to require argparse since it diff --git a/cli/tests/awsbatch/conftest.py b/cli/tests/awsbatch/conftest.py deleted file mode 100644 index 18adcad6da..0000000000 --- a/cli/tests/awsbatch/conftest.py +++ /dev/null @@ -1,19 +0,0 @@ -import pytest - -DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG = { - "region": "region", - "proxy": None, - "aws_access_key_id": "aws_access_key_id", - "aws_secret_access_key": "aws_secret_access_key", - "job_queue": "job_queue", -} - - -@pytest.fixture() -def awsbatchcliconfig_mock(request, mocker): - """Mock AWSBatchCliConfig object with a default mock.""" - module_under_test = request.module.__name__.replace("test_", "") - mock = mocker.patch("awsbatch." + module_under_test + ".AWSBatchCliConfig", autospec=True) - for key, value in DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG.items(): - setattr(mock.return_value, key, value) - return mock diff --git a/cli/tests/awsbatch/test_awsbstat.py b/cli/tests/awsbatch/test_awsbstat.py index 105fb768ee..c3966feb83 100644 --- a/cli/tests/awsbatch/test_awsbstat.py +++ b/cli/tests/awsbatch/test_awsbstat.py @@ -3,8 +3,8 @@ import pytest from awsbatch import awsbstat -from tests.awsbatch.conftest import DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG from tests.common import MockedBoto3Request, read_text +from tests.conftest import DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG ALL_JOB_STATUS = ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING", "SUCCEEDED", "FAILED"] DEFAULT_JOB_STATUS = ["SUBMITTED", "PENDING", "RUNNABLE", "STARTING", "RUNNING"] diff --git a/cli/tests/conftest.py b/cli/tests/conftest.py index 87a0fff587..5bd1a39057 100644 --- a/cli/tests/conftest.py +++ b/cli/tests/conftest.py @@ -10,7 +10,6 @@ from botocore.stub import Stubber - @pytest.fixture def failed_with_message(capsys): """Assert that the command exited with a specific error message.""" @@ -112,3 +111,22 @@ def _boto3_stubber(service, mocked_requests): for stubber in created_stubbers: stubber.assert_no_pending_responses() stubber.deactivate() + + +DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG = { + "region": "region", + "proxy": None, + "aws_access_key_id": "aws_access_key_id", + "aws_secret_access_key": "aws_secret_access_key", + "job_queue": "job_queue", +} + + +@pytest.fixture() +def awsbatchcliconfig_mock(request, mocker): + """Mock AWSBatchCliConfig object with a default mock.""" + module_under_test = request.module.__name__.replace("test_", "") + mock = mocker.patch("awsbatch." + module_under_test + ".AWSBatchCliConfig", autospec=True) + for key, value in DEFAULT_AWSBATCHCLICONFIG_MOCK_CONFIG.items(): + setattr(mock.return_value, key, value) + return mock diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure.py b/cli/tests/pcluster/configure/test_pclusterconfigure.py index 05db13780c..6423e285ab 100644 --- a/cli/tests/pcluster/configure/test_pclusterconfigure.py +++ b/cli/tests/pcluster/configure/test_pclusterconfigure.py @@ -6,12 +6,15 @@ from assertpy import assert_that from pcluster.configure.easyconfig import configure +from pcluster.configure.networking import NetworkConfiguration EASYCONFIG = "pcluster.configure.easyconfig." -NETWORKING = "pcluster.configure.easyconfig_networking." -UTILS = "pcluster.configure.easyconfig_utils." +NETWORKING = "pcluster.configure.networking." +UTILS = "pcluster.configure.utils." TEMP_PATH_FOR_CONFIG = os.path.join(tempfile.gettempdir(), "test_pclusterconfigure") +PUBLIC_PRIVATE_CONFIGURATION = NetworkConfiguration.PUBLIC_PRIVATE.value.config_type +PUBLIC_CONFIGURATION = NetworkConfiguration.PUBLIC.value.config_type def _mock_input(mocker, input_in_order): @@ -43,22 +46,22 @@ def _mock_aws_region(mocker): def _mock_list_keys(mocker): # If changed look for test_prompt_a_list keys = ["key1", "key2", "key3", "key4", "key5", "key6"] - mocker.patch(EASYCONFIG + "_list_keys", return_value=keys) + mocker.patch(EASYCONFIG + "_get_keys", return_value=keys) def _mock_list_vpcs_and_subnets(mocker, empty_region=False): # If changed look for test_prompt_a_list_of_tuple if empty_region: - dict = {"vpc_list": [], "vpc_to_subnets": {}} + mocked_response = {"vpc_list": [], "vpc_to_subnets": {}} else: - dict = { + mocked_response = { "vpc_list": [ ("vpc-1", "ParallelClusterVPC-20190625135738", "2 subnets inside"), ("vpc-2", "ParallelClusterVPC-20190624105051", "0 subnets inside"), ("vpc-3", "default", "3 subnets inside"), ("vpc-4", "ParallelClusterVPC-20190626095403", "1 subnets inside"), ], - "vpc_to_subnets": { + "vpc_subnets": { "vpc-1": [ ("subnet-11", "ParallelClusterPublicSubnet", "Subnet size: 256"), ("subnet-12", "ParallelClusterPrivateSubnet", "Subnet size: 4096"), @@ -72,11 +75,11 @@ def _mock_list_vpcs_and_subnets(mocker, empty_region=False): "vpc-4": [("subnet-41", "ParallelClusterPublicSubnet", "Subnet size: 4096")], }, } - mocker.patch(EASYCONFIG + "_list_vpcs_and_subnets", return_value=dict) + mocker.patch(EASYCONFIG + "_get_vpcs_and_subnets", return_value=mocked_response) def _mock_get_subnets_in_vpc(mocker): - mocker.patch(NETWORKING + "_get_subnets_in_vpc", return_value=[]) + mocker.patch(NETWORKING + "get_vpc_subnets", return_value=[]) def _mock_vpc_factory(mocker, is_a_valid_vpc): @@ -87,12 +90,12 @@ def _mock_vpc_factory(mocker, is_a_valid_vpc): def _mock_ec2_conn(mocker): - mocker.patch(NETWORKING + "_extract_vpc_cidr", return_value="10.0.0.0/16") - mocker.patch(NETWORKING + "_extract_ig_id", return_value="ig-123") + mocker.patch(NETWORKING + "_get_vpc_cidr", return_value="10.0.0.0/16") + mocker.patch(NETWORKING + "_get_internet_gateway_id", return_value="ig-123") def _mock_create_network_configuration(mocker, public_subnet_id, private_subnet_id=None): - def _side_effect_function(template_name, configurer, also_private_cidr): + def _side_effect_function(aws_region_name, config, parameters): if private_subnet_id: return [ {"OutputKey": "PrivateSubnetId", "OutputValue": private_subnet_id}, @@ -101,7 +104,7 @@ def _side_effect_function(template_name, configurer, also_private_cidr): else: return [{"OutputKey": "PublicSubnetId", "OutputValue": public_subnet_id}] - mocker.patch(NETWORKING + "_create_network_configuration", side_effect=_side_effect_function) + mocker.patch(NETWORKING + "_create_network_stack", side_effect=_side_effect_function) def _launch_config(mocker, path, remove_path=True): @@ -126,6 +129,8 @@ def _are_configurations_equals(path_verify, path_verified): for section_name, section in dict1.items(): for key, value in section.items(): if dict2[section_name][key] != value: + print("In section {0}, parameter {1} is different.".format(section_name, key)) + print("It is {0} but it should be {1}".format(value, dict2[section_name][key])) return False return True @@ -196,10 +201,11 @@ def add_subnet_automation(self, public_subnet_id, is_a_valid_vpc=True, private_s def get_file_path(test_datadir): - config = os.path.join(test_datadir, "test") - output = os.path.join(test_datadir, "output.txt") - error = os.path.join(test_datadir, "error.txt") - return config, error, output + config = test_datadir / "test" + output = test_datadir / "output.txt" + error = test_datadir / "error.txt" + # str for python 2.7 compatibility + return str(config), str(error), str(output) def _verify_test(mocker, capsys, output, error, config, temp_path_for_config): @@ -217,13 +223,13 @@ def create_new_test(mocker, capsys): You have to be sure that pcluster configure is correct when you use this function. You will also have to check output manually. Note that it does not print user_prompt passed as input, but neither does all the tests """ - test_name = "test_vpc_automation_no_vpc_in_region" + test_name = "test_vpc_automation_no_vpc_in_region_public" config_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "test") error_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "error.txt") output_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "output.txt") mock_handler = MockHandler(mocker, empty_region=True) - mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu") input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="slurm") input_composer.add_first_flow( op_sys="centos6", @@ -233,7 +239,7 @@ def create_new_test(mocker, capsys): compute_instance="t2.micro", key="key1", ) - input_composer.add_vpc_sub_automation_empty_region(network_configuration="1") + input_composer.add_vpc_sub_automation_empty_region(network_configuration=PUBLIC_CONFIGURATION) input_composer.finalize_config(mocker) _launch_config(mocker, config_path) @@ -288,7 +294,9 @@ def test_subnet_automation_no_awsbatch_no_errors_empty_vpc(mocker, capsys, test_ compute_instance="t2.micro", key="key1", ) - input_composer.add_sub_automation(vpc_id="vpc-2", network_configuration="1", vpc_has_subnets=False) + input_composer.add_sub_automation( + vpc_id="vpc-2", network_configuration=PUBLIC_PRIVATE_CONFIGURATION, vpc_has_subnets=False + ) input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) @@ -308,7 +316,9 @@ def test_subnet_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): compute_instance="t2.micro", key="key1", ) - input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration="1", vpc_has_subnets=True) + input_composer.add_sub_automation( + vpc_id="vpc-1", network_configuration=PUBLIC_PRIVATE_CONFIGURATION, vpc_has_subnets=True + ) input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) @@ -316,7 +326,7 @@ def test_subnet_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): def test_subnet_automation_no_awsbatch_no_errors_with_config_file(mocker, capsys, test_datadir): config, error, output = get_file_path(test_datadir) - old_config_file = test_datadir / "original_config_file" + old_config_file = str(test_datadir / "original_config_file") mock_handler = MockHandler(mocker) mock_handler.add_subnet_automation(public_subnet_id="subnet-pu", private_subnet_id="subnet-pr") @@ -329,7 +339,9 @@ def test_subnet_automation_no_awsbatch_no_errors_with_config_file(mocker, capsys compute_instance="t2.micro", key="key1", ) - input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration="1", vpc_has_subnets=True) + input_composer.add_sub_automation( + vpc_id="vpc-1", network_configuration=PUBLIC_PRIVATE_CONFIGURATION, vpc_has_subnets=True + ) input_composer.finalize_config(mocker) _launch_config(mocker, old_config_file, remove_path=False) @@ -352,7 +364,7 @@ def test_vpc_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): compute_instance="t2.micro", key="key1", ) - input_composer.add_vpc_sub_automation(network_configuration="1") + input_composer.add_vpc_sub_automation(network_configuration=PUBLIC_PRIVATE_CONFIGURATION) input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) @@ -367,7 +379,7 @@ def test_vpc_automation_yes_awsbatch_no_errors(mocker, capsys, test_datadir): input_composer.add_first_flow( op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" ) - input_composer.add_vpc_sub_automation(network_configuration="1") + input_composer.add_vpc_sub_automation(network_configuration=PUBLIC_PRIVATE_CONFIGURATION) input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) @@ -385,7 +397,7 @@ def test_vpc_automation_invalid_vpc_block(mocker, capsys, test_datadir): input_composer.add_first_flow( op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" ) - input_composer.add_vpc_sub_automation(network_configuration="1") + input_composer.add_vpc_sub_automation(network_configuration=PUBLIC_PRIVATE_CONFIGURATION) input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) @@ -401,10 +413,10 @@ def test_subnet_automation_yes_awsbatch_invalid_vpc(mocker, capsys, test_datadir input_composer.add_first_flow( op_sys=None, min_size="13", max_size="14", master_instance="t2.nano", compute_instance=None, key="key1" ) - input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration="1") + input_composer.add_sub_automation(vpc_id="vpc-1", network_configuration=PUBLIC_PRIVATE_CONFIGURATION) input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) - assert_that("WARNING: The vpc does not have the correct parameters set." in caplog.text).is_true() + assert_that("WARNING: The VPC does not have the correct parameters set." in caplog.text).is_true() def test_vpc_automation_no_vpc_in_region(mocker, capsys, test_datadir): @@ -421,7 +433,27 @@ def test_vpc_automation_no_vpc_in_region(mocker, capsys, test_datadir): compute_instance="t2.micro", key="key1", ) - input_composer.add_vpc_sub_automation_empty_region(network_configuration="1") + input_composer.add_vpc_sub_automation_empty_region(network_configuration=PUBLIC_PRIVATE_CONFIGURATION) + input_composer.finalize_config(mocker) + + _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) + + +def test_vpc_automation_no_vpc_in_region_public(mocker, capsys, test_datadir): + config, error, output = get_file_path(test_datadir) + + mock_handler = MockHandler(mocker, empty_region=True) + mock_handler.add_subnet_automation(public_subnet_id="subnet-pu") + input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="slurm") + input_composer.add_first_flow( + op_sys="centos6", + min_size="13", + max_size="14", + master_instance="t2.nano", + compute_instance="t2.micro", + key="key1", + ) + input_composer.add_vpc_sub_automation_empty_region(network_configuration="2") input_composer.finalize_config(mocker) _verify_test(mocker, capsys, output, error, config, TEMP_PATH_FOR_CONFIG) diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt index 6a4bbc3758..adaecd8f7b 100644 --- a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_awsbatch_no_errors/output.txt @@ -36,5 +36,5 @@ Allowed values for EC2 Key Pair Name: Allowed values for Network Configuration: 1. Master in a public subnet and compute fleet in a private subnet 2. Master and compute fleet in the same public subnet -Beginning creation of vpc. Please do not leave the terminal until the process has finish +Beginning VPC creation. Please do not leave the terminal until the creation is finalized The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt index dd1815c711..68cf85f9d7 100644 --- a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/output.txt @@ -33,9 +33,9 @@ Allowed values for EC2 Key Pair Name: 4. key4 5. key5 6. key6 -There are no VPC for the given region. Starting automatic creation of vpc and subnets... +There are no VPC for the given region. Starting automatic creation of VPC and subnets... Allowed values for Network Configuration: 1. Master in a public subnet and compute fleet in a private subnet 2. Master and compute fleet in the same public subnet -Beginning creation of vpc. Please do not leave the terminal until the process has finish +Beginning VPC creation. Please do not leave the terminal until the creation is finalized The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test index 71a691d445..2e08821c27 100644 --- a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region/test @@ -5,9 +5,9 @@ aws_region_name = eu-west-1 key_name = key1 vpc_settings = default scheduler = slurm +master_instance_type = t2.nano base_os = centos6 compute_instance_type = t2.micro -master_instance_type = t2.nano max_queue_size = 14 initial_queue_size = 13 maintain_initial_size = true diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/error.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/error.txt new file mode 100644 index 0000000000..e69de29bb2 diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/output.txt new file mode 100644 index 0000000000..68cf85f9d7 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/output.txt @@ -0,0 +1,41 @@ +Allowed values for AWS Region ID: +1. eu-north-1 +2. ap-south-1 +3. eu-west-3 +4. eu-west-2 +5. eu-west-1 +6. ap-northeast-2 +7. ap-northeast-1 +8. sa-east-1 +9. ca-central-1 +10. ap-southeast-1 +11. ap-southeast-2 +12. eu-central-1 +13. us-east-1 +14. us-east-2 +15. us-west-1 +16. us-west-2 +Allowed values for Scheduler: +1. sge +2. torque +3. slurm +4. awsbatch +Allowed values for Operating System: +1. alinux +2. centos6 +3. centos7 +4. ubuntu1404 +5. ubuntu1604 +Allowed values for EC2 Key Pair Name: +1. key1 +2. key2 +3. key3 +4. key4 +5. key5 +6. key6 +There are no VPC for the given region. Starting automatic creation of VPC and subnets... +Allowed values for Network Configuration: +1. Master in a public subnet and compute fleet in a private subnet +2. Master and compute fleet in the same public subnet +Beginning VPC creation. Please do not leave the terminal until the creation is finalized +The configuration is valid diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/test b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/test new file mode 100644 index 0000000000..c29bc588f2 --- /dev/null +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_no_vpc_in_region_public/test @@ -0,0 +1,27 @@ +[aws] +aws_region_name = eu-west-1 + +[cluster default] +key_name = key1 +vpc_settings = default +scheduler = slurm +master_instance_type = t2.nano +base_os = centos6 +compute_instance_type = t2.micro +max_queue_size = 14 +initial_queue_size = 13 +maintain_initial_size = true + +[vpc default] +vpc_id = vpc-0 +master_subnet_id = subnet-pu +use_public_ips = true + +[global] +cluster_template = default +update_check = true +sanity_check = true + +[aliases] +ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} + diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt index 287a13428b..3139939465 100644 --- a/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt +++ b/cli/tests/pcluster/configure/test_pclusterconfigure/test_vpc_automation_yes_awsbatch_no_errors/output.txt @@ -27,5 +27,5 @@ Allowed values for EC2 Key Pair Name: 4. key4 5. key5 6. key6 -Beginning creation of vpc. Please do not leave the terminal until the process has finish +Beginning VPC creation. Please do not leave the terminal until the creation is finalized The configuration is valid diff --git a/cli/tests/pcluster/configure/test_subnet_cidr.py b/cli/tests/pcluster/configure/test_subnet_cidr.py index 8e1e3e7614..9c1973fdea 100644 --- a/cli/tests/pcluster/configure/test_subnet_cidr.py +++ b/cli/tests/pcluster/configure/test_subnet_cidr.py @@ -1,79 +1,82 @@ from assertpy import assert_that -from pcluster.utils import decide_cidr, get_subnet_cidr +from pcluster.utils import evaluate_cidr, get_subnet_cidr def test_empty_vpc(): - assert_that(decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=[], target_size=250)).is_equal_to("10.0.0.0/24") - assert_that(decide_cidr(vpc_cidr="10.0.0.0/8", occupied_cidr=[], target_size=250)).is_equal_to("10.0.0.0/24") - assert_that(decide_cidr(vpc_cidr="10.2.0.0/16", occupied_cidr=[], target_size=250)).is_equal_to("10.2.0.0/24") - assert_that(decide_cidr(vpc_cidr="10.2.0.0/25", occupied_cidr=[], target_size=500)).is_none() - assert_that(decide_cidr(vpc_cidr="10.2.0.0/25", occupied_cidr=[], target_size=100)).is_equal_to("10.2.0.0/25") + assert_that(evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=[], target_size=250)).is_equal_to("10.0.0.0/24") + assert_that(evaluate_cidr(vpc_cidr="10.0.0.0/8", occupied_cidrs=[], target_size=250)).is_equal_to("10.0.0.0/24") + assert_that(evaluate_cidr(vpc_cidr="10.2.0.0/16", occupied_cidrs=[], target_size=250)).is_equal_to("10.2.0.0/24") + assert_that(evaluate_cidr(vpc_cidr="10.2.0.0/25", occupied_cidrs=[], target_size=500)).is_none() + assert_that(evaluate_cidr(vpc_cidr="10.2.0.0/25", occupied_cidrs=[], target_size=100)).is_equal_to("10.2.0.0/25") -def test_no_space_vpc(): - assert_that(decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/16"], target_size=1)).is_none() +def test_fully_booked_vpc(): + assert_that(evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.0.0/16"], target_size=1)).is_none() assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/17", "10.0.128.0/17"], target_size=1) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.0.0/17", "10.0.128.0/17"], target_size=1) ).is_none() assert_that( - decide_cidr( - vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], target_size=16385 + evaluate_cidr( + vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], target_size=16385 ) ).is_none() assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.128.0/17"], target_size=16385) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.0.0/18", "10.0.128.0/17"], target_size=16385) ).is_none() -def test_space_needed_bigger_than_allocated_subnets(): +# testing _expand_cidrs function +def test_target_size_bigger_than_allocated_subnets(): assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.3.0/24"], target_size=500) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.1.0/24", "10.0.3.0/24"], target_size=500) ).is_equal_to("10.0.4.0/23") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.4.0/24"], target_size=500) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.1.0/24", "10.0.4.0/24"], target_size=500) ).is_equal_to("10.0.2.0/23") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.4.0/24"], target_size=1000) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.1.0/24", "10.0.4.0/24"], target_size=1000) ).is_equal_to("10.0.8.0/22") -def test_space_needed_smaller_than_allocated_subnets(): +def test_target_size_smaller_than_allocated_subnets(): assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.3.0/24"], target_size=250) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.1.0/24", "10.0.3.0/24"], target_size=250) ).is_equal_to("10.0.0.0/24") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.2.0/24"], target_size=250) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.1.0/24", "10.0.2.0/24"], target_size=250) ).is_equal_to("10.0.0.0/24") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.3.0/24", "10.0.2.0/24"], target_size=250) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.3.0/24", "10.0.2.0/24"], target_size=250) ).is_equal_to("10.0.0.0/24") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.4.0/24", "10.0.2.0/24"], target_size=250) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.4.0/24", "10.0.2.0/24"], target_size=250) ).is_equal_to("10.0.0.0/24") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.4.0/24", "10.0.2.0/24"], target_size=120) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.4.0/24", "10.0.2.0/24"], target_size=120) ).is_equal_to("10.0.0.0/25") assert_that( - decide_cidr(vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.1.0/24", "10.0.0.0/24"], target_size=120) + evaluate_cidr(vpc_cidr="10.0.0.0/16", occupied_cidrs=["10.0.1.0/24", "10.0.0.0/24"], target_size=120) ).is_equal_to("10.0.2.0/25") def test_get_subnet_cidr(): assert_that( get_subnet_cidr( - vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], max_queue_size=17000 + vpc_cidr="10.0.0.0/16", + occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], + min_subnet_size=17000, ) ).is_none() assert_that( get_subnet_cidr( - vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], max_queue_size=100 + vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/18", "10.0.64.0/18", "10.0.128.0/18"], min_subnet_size=100 ) ).is_equal_to("10.0.192.0/20") assert_that( get_subnet_cidr( vpc_cidr="10.0.0.0/16", occupied_cidr=["10.0.0.0/19", "10.0.32.0/20", "10.0.48.0/21", "10.0.64.0/18", "10.0.128.0/17"], - max_queue_size=100, + min_subnet_size=100, ) ).is_equal_to("10.0.56.0/21") assert_that(get_subnet_cidr("10.0.0.0/16", ["10.0.0.0/24"], 256)).is_equal_to("10.0.16.0/20") diff --git a/cli/tox.ini b/cli/tox.ini index c981b6c774..fab7965601 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -17,7 +17,7 @@ commands = # Running with discover and not unittest discover for Python 2.6 compatibility python -m discover -s tests/pcluster -p "*_test.py" # awsbatch-cli is not currently compatible with Python2.6 - py{27,34,35,36,37}: py.test -l -v --basetemp={envtmpdir} --html=report.html --cov={envsitepackagesdir}/awsbatch --cov={envsitepackagesdir}/pcluster tests + py{27,34,35,36,37}: py.test -l -v --basetemp={envtmpdir} --html=report.html --cov={envsitepackagesdir}/awsbatch # Section used to define common variables used by multiple testenvs. [vars] diff --git a/tests/integration-tests/cfn_stacks_factory.py b/tests/integration-tests/cfn_stacks_factory.py index 25c39898ba..19378998bf 100644 --- a/tests/integration-tests/cfn_stacks_factory.py +++ b/tests/integration-tests/cfn_stacks_factory.py @@ -14,7 +14,7 @@ from botocore.exceptions import ClientError from retrying import retry -from utils import retrieve_cfn_outputs, set_credentials, unset_credentials, retrieve_cfn_resources +from utils import retrieve_cfn_outputs, retrieve_cfn_resources, set_credentials, unset_credentials class CfnStack: @@ -76,7 +76,9 @@ def create_stack(self, stack): self.__created_stacks[id] = stack try: cfn_client = boto3.client("cloudformation", region_name=region) - result = cfn_client.create_stack(StackName=name, TemplateBody=stack.template, Parameters=stack.parameters) + result = cfn_client.create_stack( + StackName=name, TemplateBody=stack.template, Parameters=stack.parameters + ) stack.cfn_stack_id = result["StackId"] final_status = self.__wait_for_stack_creation(stack.cfn_stack_id, cfn_client) self.__assert_stack_status(final_status, "CREATE_COMPLETE") diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index adcec239f2..79c685ecc5 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -35,6 +35,7 @@ check_marker_skip_list, ) from jinja2 import Environment, FileSystemLoader +from network_template_builder import Gateways, NetworkTemplateBuilder, SubnetConfig, VPCConfig from utils import ( create_s3_bucket, delete_s3_bucket, @@ -43,7 +44,6 @@ to_snake_case, unset_credentials, ) -from network_template_builder import Gateways, NetworkTemplateBuilder, SubnetConfig, VPCConfig def pytest_addoption(parser): diff --git a/tests/integration-tests/network_template_builder.py b/tests/integration-tests/network_template_builder.py index 33ea1c5110..ca5deed0ec 100644 --- a/tests/integration-tests/network_template_builder.py +++ b/tests/integration-tests/network_template_builder.py @@ -25,7 +25,7 @@ VPCGatewayAttachment, ) -PREPENDNAME = "ParallelCluster" +TAGS_PREFIX = "ParallelCluster" class Gateways(Enum): @@ -47,7 +47,7 @@ class SubnetConfig(NamedTuple): def tags(self): """Get the tags for the subnet""" - return Tags(Name=PREPENDNAME + self.name + "Subnet", Stack=Ref("AWS::StackId")) + return Tags(Name=TAGS_PREFIX + self.name + "Subnet", Stack=Ref("AWS::StackId")) class VPCConfig(NamedTuple): @@ -141,7 +141,7 @@ def __build_internet_gateway(self, vpc: VPC): internet_gateway = self.__template.add_resource( InternetGateway( "InternetGateway", - Tags=Tags(Name=PREPENDNAME + "IG", Stack=Ref("AWS::StackId")), + Tags=Tags(Name=TAGS_PREFIX + "IG", Stack=Ref("AWS::StackId")), Condition=self.__create_ig, ) ) @@ -212,7 +212,7 @@ def __build_route_table( RouteTable( "RouteTable" + subnet_config.name, VpcId=Ref(vpc), - Tags=Tags(Name=PREPENDNAME + "RouteTable" + subnet_config.name, Stack=Ref("AWS::StackId")), + Tags=Tags(Name=TAGS_PREFIX + "RouteTable" + subnet_config.name, Stack=Ref("AWS::StackId")), ) ) self.__template.add_resource( From 76daea29cc9f550375fce520f15432f76b9e6c33 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Tue, 30 Jul 2019 17:28:50 +0200 Subject: [PATCH 019/201] Disable Docs linters in Travis Signed-off-by: Francesco De Martino --- .travis.yml | 7 ------- 1 file changed, 7 deletions(-) diff --git a/.travis.yml b/.travis.yml index 4e0faefb66..dba8f1d2fe 100644 --- a/.travis.yml +++ b/.travis.yml @@ -19,13 +19,6 @@ matrix: python: 3.6 stage: linters env: TOXENV=cfn-format-check,cfn-lint - - name: Docs Checks - python: 3.6 - stage: linters - env: TOXENV=docs-linters - before_install: - # Needed to run docs-linters target in tox. - - sudo apt-get update && sudo apt-get install -y enchant install: - pip install tox-travis From f62c7acb13839e718a9aca8b03b8e4f16cd5f532 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Mon, 15 Jul 2019 17:11:03 +0200 Subject: [PATCH 020/201] integ tests: move reusable benchmarks functions to common module Signed-off-by: Francesco De Martino --- .../benchmarks/common/__init__.py | 11 ++ .../benchmarks/common/metrics_reporter.py | 177 +++++++++++++++++ .../benchmarks/common/util.py | 22 +++ .../benchmarks/test_scaling_performance.py | 186 ++---------------- 4 files changed, 221 insertions(+), 175 deletions(-) create mode 100644 tests/integration-tests/benchmarks/common/__init__.py create mode 100644 tests/integration-tests/benchmarks/common/metrics_reporter.py create mode 100644 tests/integration-tests/benchmarks/common/util.py diff --git a/tests/integration-tests/benchmarks/common/__init__.py b/tests/integration-tests/benchmarks/common/__init__.py new file mode 100644 index 0000000000..2251b11f46 --- /dev/null +++ b/tests/integration-tests/benchmarks/common/__init__.py @@ -0,0 +1,11 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. diff --git a/tests/integration-tests/benchmarks/common/metrics_reporter.py b/tests/integration-tests/benchmarks/common/metrics_reporter.py new file mode 100644 index 0000000000..f7d01a2599 --- /dev/null +++ b/tests/integration-tests/benchmarks/common/metrics_reporter.py @@ -0,0 +1,177 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import datetime +import logging +import os +from time import sleep + +import boto3 +from retrying import RetryError, retry + +from time_utils import seconds + +METRIC_WIDGET_TEMPLATE = """ + {{ + "metrics": [ + [ "ParallelCluster/benchmarking/{cluster_name}", "ComputeNodesCount", {{ "stat": "Maximum", "label": \ +"ComputeNodesCount Max" }} ], + [ "...", {{ "stat": "Minimum", "label": "ComputeNodesCount Min" }} ], + [ "AWS/AutoScaling", "GroupDesiredCapacity", "AutoScalingGroupName", "{asg_name}", {{ "stat": "Maximum", \ +"label": "GroupDesiredCapacity" }} ], + [ ".", "GroupInServiceInstances", ".", ".", {{ "stat": "Maximum", "label": "GroupInServiceInstances" }} ] + ], + "view": "timeSeries", + "stacked": false, + "stat": "Maximum", + "period": 1, + "title": "{title}", + "width": 1400, + "height": 700, + "start": "{graph_start_time}", + "end": "{graph_end_time}", + "annotations": {{ + "horizontal": [ + {{ + "label": "Scaling Target", + "value": {scaling_target} + }} + ], + "vertical": [ + {{ + "label": "Start Time", + "value": "{start_time}" + }}, + {{ + "label": "End Time", + "value": "{end_time}" + }} + ] + }}, + "yAxis": {{ + "left": {{ + "showUnits": false, + "label": "Count" + }}, + "right": {{ + "showUnits": true + }} + }} + }}""" + + +def publish_compute_nodes_metric(scheduler_commands, max_monitoring_time, region, cluster_name): + logging.info("Monitoring scheduler status and publishing metrics") + cw_client = boto3.client("cloudwatch", region_name=region) + compute_nodes_time_series = [0] + timestamps = [datetime.datetime.utcnow()] + + @retry( + retry_on_result=lambda _: len(compute_nodes_time_series) == 1 or compute_nodes_time_series[-1] != 0, + wait_fixed=seconds(20), + stop_max_delay=max_monitoring_time, + ) + def _watch_compute_nodes_allocation(): + try: + compute_nodes = scheduler_commands.compute_nodes_count() + logging.info("Publishing metric: count={0}".format(compute_nodes)) + cw_client.put_metric_data( + Namespace="ParallelCluster/benchmarking/{cluster_name}".format(cluster_name=cluster_name), + MetricData=[{"MetricName": "ComputeNodesCount", "Value": compute_nodes, "Unit": "Count"}], + ) + # add values only if there is a transition. + if compute_nodes_time_series[-1] != compute_nodes: + compute_nodes_time_series.append(compute_nodes) + timestamps.append(datetime.datetime.utcnow()) + except Exception as e: + logging.warning("Failed while watching nodes allocation with exception: %s", e) + raise + + try: + _watch_compute_nodes_allocation() + except RetryError: + # ignoring this error in order to perform assertions on the collected data. + pass + + end_time = datetime.datetime.utcnow() + logging.info( + "Monitoring completed: compute_nodes_time_series [ %s ], timestamps [ %s ]", + " ".join(map(str, compute_nodes_time_series)), + " ".join(map(str, timestamps)), + ) + logging.info("Sleeping for 3 minutes to wait for the metrics to propagate...") + sleep(180) + + return compute_nodes_time_series, timestamps, end_time + + +def enable_asg_metrics(region, cluster): + logging.info("Enabling ASG metrics for %s", cluster.asg) + boto3.client("autoscaling", region_name=region).enable_metrics_collection( + AutoScalingGroupName=cluster.asg, + Metrics=["GroupDesiredCapacity", "GroupInServiceInstances", "GroupTerminatingInstances"], + Granularity="1Minute", + ) + + +def _publish_metric(region, instance, os, scheduler, state, count): + cw_client = boto3.client("cloudwatch", region_name=region) + logging.info("Publishing metric: state={0} count={1}".format(state, count)) + cw_client.put_metric_data( + Namespace="parallelcluster/benchmarking/test_scaling_speed/{region}/{instance}/{os}/{scheduler}".format( + region=region, instance=instance, os=os, scheduler=scheduler + ), + MetricData=[ + { + "MetricName": "ComputeNodesCount", + "Dimensions": [{"Name": "state", "Value": state}], + "Value": count, + "Unit": "Count", + } + ], + ) + + +def produce_benchmark_metrics_report( + benchmark_params, region, cluster_name, asg_name, start_time, end_time, scaling_target, request +): + title = ", ".join("{0}={1}".format(key, val) for (key, val) in benchmark_params.items()) + graph_start_time = _to_datetime(start_time) - datetime.timedelta(minutes=2) + graph_end_time = _to_datetime(end_time) + datetime.timedelta(minutes=2) + scaling_target = scaling_target + widget_metric = METRIC_WIDGET_TEMPLATE.format( + cluster_name=cluster_name, + asg_name=asg_name, + start_time=start_time, + end_time=end_time, + title=title, + graph_start_time=graph_start_time, + graph_end_time=graph_end_time, + scaling_target=scaling_target, + ) + logging.info(widget_metric) + cw_client = boto3.client("cloudwatch", region_name=region) + response = cw_client.get_metric_widget_image(MetricWidget=widget_metric) + _write_results_to_outdir(request, response["MetricWidgetImage"]) + + +def _to_datetime(timestamp): + return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z") + + +def _write_results_to_outdir(request, image_bytes): + out_dir = request.config.getoption("output_dir") + os.makedirs("{out_dir}/benchmarks".format(out_dir=out_dir), exist_ok=True) + graph_dst = "{out_dir}/benchmarks/{test_name}.png".format( + out_dir=out_dir, test_name=request.node.nodeid.replace("::", "-") + ) + with open(graph_dst, "wb") as image: + image.write(image_bytes) diff --git a/tests/integration-tests/benchmarks/common/util.py b/tests/integration-tests/benchmarks/common/util.py new file mode 100644 index 0000000000..2117b9c62b --- /dev/null +++ b/tests/integration-tests/benchmarks/common/util.py @@ -0,0 +1,22 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import json + +import boto3 + + +def get_instance_vcpus(region, instance): + bucket_name = "{0}-aws-parallelcluster".format(region) + s3 = boto3.resource("s3", region_name=region) + instances_file_content = s3.Object(bucket_name, "instances/instances.json").get()["Body"].read() + instances = json.loads(instances_file_content) + return int(instances[instance]["vcpus"]) diff --git a/tests/integration-tests/benchmarks/test_scaling_performance.py b/tests/integration-tests/benchmarks/test_scaling_performance.py index d0ae0a8aa5..b3f18c7919 100644 --- a/tests/integration-tests/benchmarks/test_scaling_performance.py +++ b/tests/integration-tests/benchmarks/test_scaling_performance.py @@ -10,68 +10,21 @@ # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. import datetime -import json import logging -import os -from time import sleep -import boto3 import pytest -from retrying import RetryError, retry from assertpy import assert_that +from benchmarks.common.metrics_reporter import ( + enable_asg_metrics, + produce_benchmark_metrics_report, + publish_compute_nodes_metric, +) +from benchmarks.common.util import get_instance_vcpus from remote_command_executor import RemoteCommandExecutor from tests.common.assertions import assert_no_errors_in_logs from tests.common.schedulers_common import get_scheduler_commands -from time_utils import minutes, seconds - -METRIC_WIDGET_TEMPLATE = """ - {{ - "metrics": [ - [ "ParallelCluster/benchmarking/{cluster_name}", "ComputeNodesCount", {{ "stat": "Maximum", "label": \ -"ComputeNodesCount Max" }} ], - [ "...", {{ "stat": "Minimum", "label": "ComputeNodesCount Min" }} ], - [ "AWS/AutoScaling", "GroupDesiredCapacity", "AutoScalingGroupName", "{asg_name}", {{ "stat": "Maximum", \ -"label": "GroupDesiredCapacity" }} ], - [ ".", "GroupInServiceInstances", ".", ".", {{ "stat": "Maximum", "label": "GroupInServiceInstances" }} ] - ], - "view": "timeSeries", - "stacked": false, - "stat": "Maximum", - "period": 1, - "title": "{title}", - "width": 1400, - "height": 700, - "start": "{graph_start_time}", - "end": "{graph_end_time}", - "annotations": {{ - "horizontal": [ - {{ - "label": "Scaling Target", - "value": {scaling_target} - }} - ], - "vertical": [ - {{ - "label": "Start Time", - "value": "{start_time}" - }}, - {{ - "label": "End Time", - "value": "{end_time}" - }} - ] - }}, - "yAxis": {{ - "left": {{ - "showUnits": false, - "label": "Count" - }}, - "right": {{ - "showUnits": true - }} - }} - }}""" +from time_utils import minutes @pytest.mark.schedulers(["slurm", "sge", "torque"]) @@ -97,16 +50,16 @@ def test_scaling_performance(region, scheduler, os, instance, pcluster_config_re remote_command_executor = RemoteCommandExecutor(cluster) scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor) - _enable_asg_metrics(region, cluster) + enable_asg_metrics(region, cluster) logging.info("Starting benchmark with following parameters: %s", benchmark_params) start_time = datetime.datetime.utcnow() if scheduler == "sge": - kwargs = {"slots": _get_instance_vcpus(region, instance) * benchmark_params["scaling_target"]} + kwargs = {"slots": get_instance_vcpus(region, instance) * benchmark_params["scaling_target"]} else: kwargs = {"nodes": benchmark_params["scaling_target"]} result = scheduler_commands.submit_command("sleep {0}".format(benchmark_params["job_duration"]), **kwargs) scheduler_commands.assert_job_submitted(result.stdout) - compute_nodes_time_series, timestamps, end_time = _publish_compute_nodes_metric( + compute_nodes_time_series, timestamps, end_time = publish_compute_nodes_metric( scheduler_commands, max_monitoring_time=minutes(benchmarks_max_time), region=region, @@ -115,7 +68,7 @@ def test_scaling_performance(region, scheduler, os, instance, pcluster_config_re logging.info("Benchmark completed. Producing outputs and performing assertions.") benchmark_params["total_time"] = "{0}seconds".format(int((end_time - start_time).total_seconds())) - _produce_benchmark_metrics_report( + produce_benchmark_metrics_report( benchmark_params, region, cluster.cfn_name, @@ -128,120 +81,3 @@ def test_scaling_performance(region, scheduler, os, instance, pcluster_config_re assert_that(max(compute_nodes_time_series)).is_equal_to(benchmark_params["scaling_target"]) assert_that(compute_nodes_time_series[-1]).is_equal_to(0) assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"]) - - -def _publish_compute_nodes_metric(scheduler_commands, max_monitoring_time, region, cluster_name): - logging.info("Monitoring scheduler status and publishing metrics") - cw_client = boto3.client("cloudwatch", region_name=region) - compute_nodes_time_series = [0] - timestamps = [datetime.datetime.utcnow()] - - @retry( - retry_on_result=lambda _: len(compute_nodes_time_series) == 1 or compute_nodes_time_series[-1] != 0, - wait_fixed=seconds(20), - stop_max_delay=max_monitoring_time, - ) - def _watch_compute_nodes_allocation(): - try: - compute_nodes = scheduler_commands.compute_nodes_count() - logging.info("Publishing metric: count={0}".format(compute_nodes)) - cw_client.put_metric_data( - Namespace="ParallelCluster/benchmarking/{cluster_name}".format(cluster_name=cluster_name), - MetricData=[{"MetricName": "ComputeNodesCount", "Value": compute_nodes, "Unit": "Count"}], - ) - # add values only if there is a transition. - if compute_nodes_time_series[-1] != compute_nodes: - compute_nodes_time_series.append(compute_nodes) - timestamps.append(datetime.datetime.utcnow()) - except Exception as e: - logging.warning("Failed while watching nodes allocation with exception: %s", e) - raise - - try: - _watch_compute_nodes_allocation() - except RetryError: - # ignoring this error in order to perform assertions on the collected data. - pass - - end_time = datetime.datetime.utcnow() - logging.info( - "Monitoring completed: compute_nodes_time_series [ %s ], timestamps [ %s ]", - " ".join(map(str, compute_nodes_time_series)), - " ".join(map(str, timestamps)), - ) - logging.info("Sleeping for 3 minutes to wait for the metrics to propagate...") - sleep(180) - - return compute_nodes_time_series, timestamps, end_time - - -def _enable_asg_metrics(region, cluster): - logging.info("Enabling ASG metrics for %s", cluster.asg) - boto3.client("autoscaling", region_name=region).enable_metrics_collection( - AutoScalingGroupName=cluster.asg, - Metrics=["GroupDesiredCapacity", "GroupInServiceInstances", "GroupTerminatingInstances"], - Granularity="1Minute", - ) - - -def _publish_metric(region, instance, os, scheduler, state, count): - cw_client = boto3.client("cloudwatch", region_name=region) - logging.info("Publishing metric: state={0} count={1}".format(state, count)) - cw_client.put_metric_data( - Namespace="parallelcluster/benchmarking/test_scaling_speed/{region}/{instance}/{os}/{scheduler}".format( - region=region, instance=instance, os=os, scheduler=scheduler - ), - MetricData=[ - { - "MetricName": "ComputeNodesCount", - "Dimensions": [{"Name": "state", "Value": state}], - "Value": count, - "Unit": "Count", - } - ], - ) - - -def _produce_benchmark_metrics_report( - benchmark_params, region, cluster_name, asg_name, start_time, end_time, scaling_target, request -): - title = ", ".join("{0}={1}".format(key, val) for (key, val) in benchmark_params.items()) - graph_start_time = _to_datetime(start_time) - datetime.timedelta(minutes=2) - graph_end_time = _to_datetime(end_time) + datetime.timedelta(minutes=2) - scaling_target = scaling_target - widget_metric = METRIC_WIDGET_TEMPLATE.format( - cluster_name=cluster_name, - asg_name=asg_name, - start_time=start_time, - end_time=end_time, - title=title, - graph_start_time=graph_start_time, - graph_end_time=graph_end_time, - scaling_target=scaling_target, - ) - logging.info(widget_metric) - cw_client = boto3.client("cloudwatch", region_name=region) - response = cw_client.get_metric_widget_image(MetricWidget=widget_metric) - _write_results_to_outdir(request, response["MetricWidgetImage"]) - - -def _to_datetime(timestamp): - return datetime.datetime.strptime(timestamp, "%Y-%m-%dT%H:%M:%S.%f%z") - - -def _write_results_to_outdir(request, image_bytes): - out_dir = request.config.getoption("output_dir") - os.makedirs("{out_dir}/benchmarks".format(out_dir=out_dir), exist_ok=True) - graph_dst = "{out_dir}/benchmarks/{test_name}.png".format( - out_dir=out_dir, test_name=request.node.nodeid.replace("::", "-") - ) - with open(graph_dst, "wb") as image: - image.write(image_bytes) - - -def _get_instance_vcpus(region, instance): - bucket_name = "{0}-aws-parallelcluster".format(region) - s3 = boto3.resource("s3", region_name=region) - instances_file_content = s3.Object(bucket_name, "instances/instances.json").get()["Body"].read() - instances = json.loads(instances_file_content) - return int(instances[instance]["vcpus"]) From c82770bf5fae017e72b7237a98bc9be93edc68a5 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Fri, 19 Jul 2019 16:30:59 +0200 Subject: [PATCH 021/201] integ tests: implement scheduler stress tests Signed-off-by: Francesco De Martino --- .../benchmarks/test_scheduler_performance.py | 122 ++++++++++++++++++ .../pcluster.config.ini | 25 ++++ .../tests/common/schedulers_common.py | 12 +- 3 files changed, 156 insertions(+), 3 deletions(-) create mode 100644 tests/integration-tests/benchmarks/test_scheduler_performance.py create mode 100644 tests/integration-tests/benchmarks/test_scheduler_performance/test_scheduler_performance/pcluster.config.ini diff --git a/tests/integration-tests/benchmarks/test_scheduler_performance.py b/tests/integration-tests/benchmarks/test_scheduler_performance.py new file mode 100644 index 0000000000..12c45b19b7 --- /dev/null +++ b/tests/integration-tests/benchmarks/test_scheduler_performance.py @@ -0,0 +1,122 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import datetime +import logging +import threading +from concurrent.futures.thread import ThreadPoolExecutor + +import pytest + +from assertpy import assert_that +from benchmarks.common.metrics_reporter import ( + enable_asg_metrics, + produce_benchmark_metrics_report, + publish_compute_nodes_metric, +) +from benchmarks.common.util import get_instance_vcpus +from remote_command_executor import RemoteCommandExecutor +from tests.common.assertions import assert_no_errors_in_logs +from tests.common.schedulers_common import get_scheduler_commands +from time_utils import minutes + + +@pytest.mark.schedulers(["slurm", "sge", "torque"]) +@pytest.mark.benchmarks +def test_scheduler_performance(region, scheduler, os, instance, pcluster_config_reader, clusters_factory, request): + """The test runs a stress test to verify scheduler behaviour with many submitted jobs.""" + benchmarks_max_time = request.config.getoption("benchmarks_max_time") + instance_slots = get_instance_vcpus(region, instance) + + benchmark_params = { + "region": region, + "scheduler": scheduler, + "os": os, + "instance": instance, + "scaling_target": request.config.getoption("benchmarks_target_capacity"), + "scaledown_idletime": 2, + "job_duration": 60, + "jobs_to_submit": 2 * instance_slots * request.config.getoption("benchmarks_target_capacity"), + } + + cluster_config = pcluster_config_reader( + scaledown_idletime=benchmark_params["scaledown_idletime"], scaling_target=benchmark_params["scaling_target"] + ) + cluster = clusters_factory(cluster_config) + remote_command_executor = RemoteCommandExecutor(cluster) + scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor) + enable_asg_metrics(region, cluster) + + logging.info("Starting benchmark with following parameters: %s", benchmark_params) + start_time = datetime.datetime.utcnow() + _submit_jobs(benchmark_params, scheduler_commands, instance_slots, cluster) + compute_nodes_time_series, timestamps, end_time = publish_compute_nodes_metric( + scheduler_commands, + max_monitoring_time=minutes(benchmarks_max_time), + region=region, + cluster_name=cluster.cfn_name, + ) + + logging.info("Benchmark completed. Producing outputs and performing assertions.") + benchmark_params["total_time"] = "{0}seconds".format(int((end_time - start_time).total_seconds())) + produce_benchmark_metrics_report( + benchmark_params, + region, + cluster.cfn_name, + cluster.asg, + start_time.replace(tzinfo=datetime.timezone.utc).isoformat(), + end_time.replace(tzinfo=datetime.timezone.utc).isoformat(), + benchmark_params["scaling_target"], + request, + ) + assert_that(max(compute_nodes_time_series)).is_equal_to(benchmark_params["scaling_target"]) + assert_that(compute_nodes_time_series[-1]).is_equal_to(0) + _assert_jobs_completed(remote_command_executor, benchmark_params["jobs_to_submit"]) + assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"]) + + +def _submit_jobs(benchmark_params, scheduler_commands, instance_slots, cluster): + """ + Submit 1 job to make the cluster scale to scaling_target and then a series of very small jobs + to test scheduler performance. + """ + if benchmark_params["scheduler"] == "sge": + kwargs = {"slots": instance_slots * benchmark_params["scaling_target"]} + else: + kwargs = {"nodes": benchmark_params["scaling_target"]} + result = scheduler_commands.submit_command("sleep 1", **kwargs) + job_id = scheduler_commands.assert_job_submitted(result.stdout) + + with ThreadPoolExecutor(max_workers=10) as executor: + # allows to keep thread local data that gets reused for all tasks executed by the thread + local_data = threading.local() + + def _submit_one_slot_job(): + if not hasattr(local_data, "scheduler_commands"): + local_data.scheduler_commands = get_scheduler_commands( + benchmark_params["scheduler"], RemoteCommandExecutor(cluster) + ) + local_data.scheduler_commands.submit_command( + "sleep {0}; mkdir -p /shared/job-results; mktemp /shared/job-results/job.XXXXXXXX".format( + benchmark_params["job_duration"] + ), + slots=1, + after_ok=job_id, + ) + + for _ in range(0, benchmark_params["jobs_to_submit"]): + executor.submit(_submit_one_slot_job) + + +def _assert_jobs_completed(remote_command_executor, expected_completed_jobs_count): + result = remote_command_executor.run_remote_command("ls /shared/job-results | wc -l") + completed_jobs_count = int(result.stdout.strip()) + assert_that(completed_jobs_count).is_equal_to(expected_completed_jobs_count) diff --git a/tests/integration-tests/benchmarks/test_scheduler_performance/test_scheduler_performance/pcluster.config.ini b/tests/integration-tests/benchmarks/test_scheduler_performance/test_scheduler_performance/pcluster.config.ini new file mode 100644 index 0000000000..afa550a104 --- /dev/null +++ b/tests/integration-tests/benchmarks/test_scheduler_performance/test_scheduler_performance/pcluster.config.ini @@ -0,0 +1,25 @@ +[global] +cluster_template = default + +[aws] +aws_region_name = {{ region }} + +[cluster default] +base_os = {{ os }} +key_name = {{ key_name }} +vpc_settings = parallelcluster-vpc +scheduler = {{ scheduler }} +master_instance_type = {{ instance }} +compute_instance_type = {{ instance }} +initial_queue_size = 0 +max_queue_size = {{ scaling_target }} +scaling_settings = custom + +[scaling custom] +scaledown_idletime = {{ scaledown_idletime }} + +[vpc parallelcluster-vpc] +vpc_id = {{ vpc_id }} +master_subnet_id = {{ public_subnet_id }} +compute_subnet_id = {{ private_subnet_id }} +use_public_ips = false diff --git a/tests/integration-tests/tests/common/schedulers_common.py b/tests/integration-tests/tests/common/schedulers_common.py index e0e8d811b6..b8f49de496 100644 --- a/tests/integration-tests/tests/common/schedulers_common.py +++ b/tests/integration-tests/tests/common/schedulers_common.py @@ -166,7 +166,7 @@ def assert_job_submitted(self, qsub_output, is_array=False): # noqa: D102 assert_that(match).is_not_none() return match.group(1) - def submit_command(self, command, nodes=1, slots=None, hold=False): # noqa: D102 + def submit_command(self, command, nodes=1, slots=None, hold=False, after_ok=None): # noqa: D102 flags = "" if nodes != 1: raise Exception("SGE does not support nodes option") @@ -174,6 +174,8 @@ def submit_command(self, command, nodes=1, slots=None, hold=False): # noqa: D10 flags += "-pe mpi {0} ".format(slots) if hold: flags += "-h " + if after_ok: + flags += "-hold_jid {0} ".format(after_ok) return self._remote_command_executor.run_remote_command( "echo '{0}' | qsub {1}".format(command, flags), raise_on_error=False ) @@ -234,12 +236,14 @@ def assert_job_submitted(self, sbatch_output): # noqa: D102 assert_that(match).is_not_none() return match.group(1) - def submit_command(self, command, nodes=1, slots=None, host=None): # noqa: D102 + def submit_command(self, command, nodes=1, slots=None, host=None, after_ok=None): # noqa: D102 submission_command = "sbatch -N {0} --wrap='{1}'".format(nodes, command) if host: submission_command += " --nodelist={0}".format(host) if slots: submission_command += " -n {0}".format(slots) + if after_ok: + submission_command += " -d afterok:{0}".format(after_ok) return self._remote_command_executor.run_remote_command(submission_command) def submit_script( @@ -301,8 +305,10 @@ def assert_job_submitted(self, qsub_output): # noqa: D102 self._remote_command_executor.run_remote_command("qstat -f {0}".format(id)) return id - def submit_command(self, command, nodes=1, slots=None): # noqa: D102 + def submit_command(self, command, nodes=1, slots=None, after_ok=None): # noqa: D102 flags = "-l nodes={0}:ppn={1}".format(nodes or 1, slots or 1) + if after_ok: + flags += " -W depend=afterok:{0}".format(after_ok) return self._remote_command_executor.run_remote_command( "echo '{0}' | qsub {1}".format(command, flags), raise_on_error=False ) From 9ac74bbff306072fb0b8ceb67c416fe748cc8832 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Tue, 30 Jul 2019 17:07:27 +0200 Subject: [PATCH 022/201] integ tests: fix Torque script args in submit_script Signed-off-by: Francesco De Martino --- .../tests/common/schedulers_common.py | 10 +++++++--- 1 file changed, 7 insertions(+), 3 deletions(-) diff --git a/tests/integration-tests/tests/common/schedulers_common.py b/tests/integration-tests/tests/common/schedulers_common.py index b8f49de496..9b581693e4 100644 --- a/tests/integration-tests/tests/common/schedulers_common.py +++ b/tests/integration-tests/tests/common/schedulers_common.py @@ -183,13 +183,15 @@ def submit_command(self, command, nodes=1, slots=None, hold=False, after_ok=None def submit_script(self, script, script_args=None, nodes=1, slots=None, additional_files=None): # noqa: D102 if not additional_files: additional_files = [] + if not script_args: + script_args = [] additional_files.append(script) flags = "" if slots: flags += "-pe mpi {0} ".format(slots) script_name = os.path.basename(script) return self._remote_command_executor.run_remote_command( - "qsub {0} {1} {2}".format(flags, script_name, script_args), additional_files=additional_files + "qsub {0} {1} {2}".format(flags, script_name, " ".join(script_args)), additional_files=additional_files ) def assert_job_succeeded(self, job_id, children_number=0): # noqa: D102 @@ -251,6 +253,8 @@ def submit_script( ): # noqa: D102 if not additional_files: additional_files = [] + if not script_args: + script_args = [] additional_files.append(script) script_name = os.path.basename(script) submission_command = "sbatch" @@ -260,7 +264,7 @@ def submit_script( submission_command += " -n {0}".format(slots) if nodes > 1: submission_command += " -N {0}".format(nodes) - submission_command += " {1} {2}".format(nodes, script_name, script_args) + submission_command += " {1} {2}".format(nodes, script_name, " ".join(script_args)) return self._remote_command_executor.run_remote_command(submission_command, additional_files=additional_files) def assert_job_succeeded(self, job_id, children_number=0): # noqa: D102 @@ -320,7 +324,7 @@ def submit_script(self, script, script_args=None, nodes=1, slots=None, additiona additional_files.append(script) flags = "-l nodes={0}:ppn={1}".format(nodes or 1, slots or 1) if script_args: - flags += " -F {0}".format(script_args) + flags += ' -F "{0}"'.format(" ".join(script_args)) return self._remote_command_executor.run_remote_command( "qsub {0} {1}".format(flags, script_name), additional_files=additional_files ) From ece36e8b95f747abdd2e7141d646b4c5e76b3615 Mon Sep 17 00:00:00 2001 From: Luca Carrogu Date: Tue, 30 Jul 2019 15:51:41 +0200 Subject: [PATCH 023/201] Do not fail if there aren't AMIs in a given region Signed-off-by: Luca Carrogu --- util/generate-ami-list.py | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/util/generate-ami-list.py b/util/generate-ami-list.py index 5b5841d887..4161004882 100644 --- a/util/generate-ami-list.py +++ b/util/generate-ami-list.py @@ -48,7 +48,10 @@ def get_ami_list_from_file(regions, cfn_template_file): current_amis = cfn_data.get("Mappings").get("AWSRegionOS2AMI") for region_name in regions: - amis_json[region_name] = OrderedDict(sorted(current_amis.get(region_name).items())) + if region_name in current_amis: + amis_json[region_name] = OrderedDict(sorted(current_amis.get(region_name).items())) + else: + print("Warning: there are no AMIs in the region (%s)" % region_name) return amis_json From 9fd63578424d76cb89bec2aae666f6a07380f17f Mon Sep 17 00:00:00 2001 From: Luca Carrogu Date: Wed, 31 Jul 2019 17:29:25 +0200 Subject: [PATCH 024/201] Increase master_root_volume_size and compute_root_volume_size to 20 Signed-off-by: Luca Carrogu --- CHANGELOG.rst | 9 ++++++++- cloudformation/aws-parallelcluster.cfn.json | 8 ++++---- 2 files changed, 12 insertions(+), 5 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f6a3ba3aea..3f967702dc 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -2,6 +2,13 @@ CHANGELOG ========= +x.x.x +===== + +**CHANGES** + +* Increase default EBS volume size from 17GB to 20GB + 2.4.1 ===== @@ -44,7 +51,7 @@ CHANGELOG * Make FSx Substack depend on ComputeSecurityGroupIngress to keep FSx from trying to create prior to the SG allowing traffic within itself * Restore correct value for ``filehandle_limit`` that was getting reset when setting ``memory_limit`` for EFA -* Torque: fix compute nodes locking mechanism to prevent job scheduling on nodes being terminated +* Torque: fix compute nodes locking mechanism to prevent job scheduling on nodes being terminated * Restore logic that was automatically adding compute nodes identity to SSH ``known_hosts`` file * Slurm: fix issue that was causing the ParallelCluster daemons to fail when the cluster is stopped and an empty compute nodes file is imported in Slurm config diff --git a/cloudformation/aws-parallelcluster.cfn.json b/cloudformation/aws-parallelcluster.cfn.json index 1bc2fc3598..7c7a1cc286 100644 --- a/cloudformation/aws-parallelcluster.cfn.json +++ b/cloudformation/aws-parallelcluster.cfn.json @@ -506,14 +506,14 @@ "MasterRootVolumeSize": { "Description": "Size of MasterServer EBS root volume in GB", "Type": "Number", - "Default": "17", - "MinValue": "17" + "Default": "20", + "MinValue": "20" }, "ComputeRootVolumeSize": { "Description": "Size of ComputeFleet EBS root volume in GB", "Type": "Number", - "Default": "17", - "MinValue": "17" + "Default": "20", + "MinValue": "20" }, "EC2IAMRoleName": { "Description": "Existing EC2 IAM role name", From a391d8936ec23390f83cf738f2d0af37b0a43159 Mon Sep 17 00:00:00 2001 From: Luca Carrogu Date: Wed, 31 Jul 2019 21:56:08 +0200 Subject: [PATCH 025/201] Increase master_root_volume_size and compute_root_volume_size to 20 Signed-off-by: Luca Carrogu --- cli/pcluster/examples/config | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/cli/pcluster/examples/config b/cli/pcluster/examples/config index 6da6acbdba..1e1cfeedf3 100644 --- a/cli/pcluster/examples/config +++ b/cli/pcluster/examples/config @@ -96,11 +96,11 @@ key_name = mykey # (defaults to false) #encrypted_ephemeral = false # MasterServer root volume size in GB. (AMI must support growroot) -# (defaults to 17) -#master_root_volume_size = 17 +# (defaults to 20) +#master_root_volume_size = 20 # ComputeFleet root volume size in GB. (AMI must support growroot) -# (defaults to 17) -#compute_root_volume_size = 17 +# (defaults to 20) +#compute_root_volume_size = 20 # OS type used in the cluster # (defaults to alinux) #base_os = alinux From f294fb541a2e2ba3f5fcbd93dbe2dfd57bb99967 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 1 Aug 2019 16:50:16 +0200 Subject: [PATCH 026/201] Use Placement Group in Dryrun Signed-off-by: Sean Smith --- cli/pcluster/cfnconfig.py | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/cli/pcluster/cfnconfig.py b/cli/pcluster/cfnconfig.py index 87d83b87ae..4a585dc1e1 100644 --- a/cli/pcluster/cfnconfig.py +++ b/cli/pcluster/cfnconfig.py @@ -360,7 +360,7 @@ def __init_vpc_parameters(self): "VPC section [%s] used in [%s] section is not defined" % (vpc_section, self.__cluster_section) ) - def __check_account_capacity(self): + def __check_account_capacity(self): # noqa: C901 """Try to launch the requested number of instances to verify Account limits.""" if self.parameters.get("Scheduler") == "awsbatch" or self.parameters.get("ClusterType", "ondemand") == "spot": return @@ -387,6 +387,9 @@ def __check_account_capacity(self): MaxCount=max_size, ImageId=test_ami_id, SubnetId=subnet_id, + Placement={"GroupName": self.parameters.get("PlacementGroup")} + if self.parameters.get("PlacementGroup") + else {}, DryRun=True, ) except ClientError as e: @@ -410,6 +413,8 @@ def __check_account_capacity(self): "The configured max size parameter {0} exceeds the number of free private IP addresses " "available in the Compute subnet.\n{1}".format(max_size, message) ) + elif code == "InvalidParameterCombination": + self.__fail(message) else: self.__fail( "Unable to check AWS Account limits. Please double check your cluster configuration.\n%s" % message From 415f05026fd3f1c5d5d303d83da78df937a331c5 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Thu, 1 Aug 2019 10:33:43 +0200 Subject: [PATCH 027/201] Fix broken sanity check for custom EC2 role https://github.com/aws/aws-parallelcluster/issues/1241 Signed-off-by: Francesco De Martino --- cli/pcluster/config_sanity.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/cli/pcluster/config_sanity.py b/cli/pcluster/config_sanity.py index cdd67a2532..0e67d8d88a 100644 --- a/cli/pcluster/config_sanity.py +++ b/cli/pcluster/config_sanity.py @@ -343,8 +343,7 @@ def validate(self, resource_type, resource_value): # noqa: C901 FIXME "arn:%s:dynamodb:%s:%s:table/parallelcluster-*" % (partition, self.region, account_id), ), ( - ["cloudformation:DescribeStacks"], - ["cloudformation:DescribeStackResource"], + ["cloudformation:DescribeStacks", "cloudformation:DescribeStackResource"], "arn:%s:cloudformation:%s:%s:stack/parallelcluster-*/*" % (partition, self.region, account_id), ), (["s3:GetObject"], "arn:%s:s3:::%s-aws-parallelcluster/*" % (partition, self.region)), From 9834b2eac8db98699b448f372182f7a5cf82a90d Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Thu, 1 Aug 2019 17:35:26 +0200 Subject: [PATCH 028/201] Master Server Launch Template Signed-off-by: Sean Smith --- cloudformation/aws-parallelcluster.cfn.json | 893 ++++++++++---------- 1 file changed, 460 insertions(+), 433 deletions(-) diff --git a/cloudformation/aws-parallelcluster.cfn.json b/cloudformation/aws-parallelcluster.cfn.json index 7c7a1cc286..558adfbb49 100644 --- a/cloudformation/aws-parallelcluster.cfn.json +++ b/cloudformation/aws-parallelcluster.cfn.json @@ -1857,465 +1857,492 @@ "MasterServer": { "Type": "AWS::EC2::Instance", "Properties": { - "InstanceType": { - "Ref": "MasterInstanceType" - }, - "BlockDeviceMappings": [ - { - "DeviceName": "/dev/xvdba", - "VirtualName": "ephemeral0" - }, - { - "DeviceName": "/dev/xvdbb", - "VirtualName": "ephemeral1" - }, - { - "DeviceName": "/dev/xvdbc", - "VirtualName": "ephemeral2" - }, - { - "DeviceName": "/dev/xvdbd", - "VirtualName": "ephemeral3" - }, - { - "DeviceName": "/dev/xvdbe", - "VirtualName": "ephemeral4" - }, - { - "DeviceName": "/dev/xvdbf", - "VirtualName": "ephemeral5" - }, - { - "DeviceName": "/dev/xvdbg", - "VirtualName": "ephemeral6" - }, - { - "DeviceName": "/dev/xvdbh", - "VirtualName": "ephemeral7" - }, - { - "DeviceName": "/dev/xvdbi", - "VirtualName": "ephemeral8" - }, - { - "DeviceName": "/dev/xvdbj", - "VirtualName": "ephemeral9" - }, - { - "DeviceName": "/dev/xvdbk", - "VirtualName": "ephemeral10" - }, - { - "DeviceName": "/dev/xvdbl", - "VirtualName": "ephemeral11" - }, - { - "DeviceName": "/dev/xvdbm", - "VirtualName": "ephemeral12" - }, - { - "DeviceName": "/dev/xvdbn", - "VirtualName": "ephemeral13" - }, - { - "DeviceName": "/dev/xvdbo", - "VirtualName": "ephemeral14" - }, - { - "DeviceName": "/dev/xvdbp", - "VirtualName": "ephemeral15" - }, - { - "DeviceName": "/dev/xvdbq", - "VirtualName": "ephemeral16" - }, - { - "DeviceName": "/dev/xvdbr", - "VirtualName": "ephemeral17" - }, - { - "DeviceName": "/dev/xvdbs", - "VirtualName": "ephemeral18" - }, - { - "DeviceName": "/dev/xvdbt", - "VirtualName": "ephemeral19" - }, - { - "DeviceName": "/dev/xvdbu", - "VirtualName": "ephemeral20" - }, - { - "DeviceName": "/dev/xvdbv", - "VirtualName": "ephemeral21" - }, - { - "DeviceName": "/dev/xvdbw", - "VirtualName": "ephemeral22" + "LaunchTemplate": { + "LaunchTemplateId": { + "Ref": "MasterServerLaunchTemplate" }, - { - "DeviceName": "/dev/xvdbx", - "VirtualName": "ephemeral23" + "Version": { + "Fn::GetAtt": [ + "MasterServerLaunchTemplate", + "LatestVersionNumber" + ] + } + } + } + }, + "MasterServerLaunchTemplate": { + "Type": "AWS::EC2::LaunchTemplate", + "Properties": { + "LaunchTemplateData": { + "InstanceType": { + "Ref": "MasterInstanceType" }, - { - "DeviceName": { - "Fn::FindInMap": [ - "OSFeatures", - { - "Ref": "BaseOS" - }, - "RootDevice" - ] + "BlockDeviceMappings": [ + { + "DeviceName": "/dev/xvdba", + "VirtualName": "ephemeral0" + }, + { + "DeviceName": "/dev/xvdbb", + "VirtualName": "ephemeral1" + }, + { + "DeviceName": "/dev/xvdbc", + "VirtualName": "ephemeral2" + }, + { + "DeviceName": "/dev/xvdbd", + "VirtualName": "ephemeral3" + }, + { + "DeviceName": "/dev/xvdbe", + "VirtualName": "ephemeral4" + }, + { + "DeviceName": "/dev/xvdbf", + "VirtualName": "ephemeral5" + }, + { + "DeviceName": "/dev/xvdbg", + "VirtualName": "ephemeral6" + }, + { + "DeviceName": "/dev/xvdbh", + "VirtualName": "ephemeral7" + }, + { + "DeviceName": "/dev/xvdbi", + "VirtualName": "ephemeral8" + }, + { + "DeviceName": "/dev/xvdbj", + "VirtualName": "ephemeral9" + }, + { + "DeviceName": "/dev/xvdbk", + "VirtualName": "ephemeral10" + }, + { + "DeviceName": "/dev/xvdbl", + "VirtualName": "ephemeral11" + }, + { + "DeviceName": "/dev/xvdbm", + "VirtualName": "ephemeral12" + }, + { + "DeviceName": "/dev/xvdbn", + "VirtualName": "ephemeral13" + }, + { + "DeviceName": "/dev/xvdbo", + "VirtualName": "ephemeral14" + }, + { + "DeviceName": "/dev/xvdbp", + "VirtualName": "ephemeral15" + }, + { + "DeviceName": "/dev/xvdbq", + "VirtualName": "ephemeral16" + }, + { + "DeviceName": "/dev/xvdbr", + "VirtualName": "ephemeral17" + }, + { + "DeviceName": "/dev/xvdbs", + "VirtualName": "ephemeral18" + }, + { + "DeviceName": "/dev/xvdbt", + "VirtualName": "ephemeral19" + }, + { + "DeviceName": "/dev/xvdbu", + "VirtualName": "ephemeral20" + }, + { + "DeviceName": "/dev/xvdbv", + "VirtualName": "ephemeral21" + }, + { + "DeviceName": "/dev/xvdbw", + "VirtualName": "ephemeral22" + }, + { + "DeviceName": "/dev/xvdbx", + "VirtualName": "ephemeral23" }, - "Ebs": { - "VolumeSize": { - "Ref": "MasterRootVolumeSize" + { + "DeviceName": { + "Fn::FindInMap": [ + "OSFeatures", + { + "Ref": "BaseOS" + }, + "RootDevice" + ] }, - "VolumeType": "gp2" - } - } - ], - "KeyName": { - "Ref": "KeyName" - }, - "Tags": [ - { - "Key": "Application", - "Value": { - "Ref": "AWS::StackName" + "Ebs": { + "VolumeSize": { + "Ref": "MasterRootVolumeSize" + }, + "VolumeType": "gp2" + } } + ], + "KeyName": { + "Ref": "KeyName" }, - { - "Key": "Name", - "Value": "Master" - }, - { - "Key": "aws-parallelcluster-attributes", - "Value": { - "Fn::Sub": [ - "${BaseOS}, ${Scheduler}, ${version}", + "TagSpecifications": [ + { + "ResourceType": "instance", + "Tags": [ { - "version": { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "parallelcluster" + "Key": "Application", + "Value": { + "Ref": "AWS::StackName" + } + }, + { + "Key": "Name", + "Value": "Master" + }, + { + "Key": "aws-parallelcluster-attributes", + "Value": { + "Fn::Sub": [ + "${BaseOS}, ${Scheduler}, ${version}", + { + "version": { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "parallelcluster" + ] + } + } + ] + } + }, + { + "Key": "aws-parallelcluster-networking", + "Value": { + "Fn::Sub": "EFA=${EFA}" + } + }, + { + "Key": "aws-parallelcluster-filesystem", + "Value": { + "Fn::Sub": [ + "efs=${efs}, multiebs=${NumberOfEBSVol}, raid=${raid}, fsx=${fsx}", + { + "efs": { + "Fn::If": [ + "CreateEFSSubstack", + "1", + "0" + ] + }, + "raid": { + "Fn::If": [ + "CreateRAIDSubstack", + "1", + "0" + ] + }, + "fsx": { + "Fn::If": [ + "CreateFSXSubstack", + "1", + "0" + ] + } + } ] } } ] } - }, - { - "Key": "aws-parallelcluster-networking", - "Value": { - "Fn::Sub": "EFA=${EFA}" + ], + "NetworkInterfaces": [ + { + "NetworkInterfaceId": { + "Ref": "MasterENI" + }, + "DeviceIndex": 0 } - }, - { - "Key": "aws-parallelcluster-filesystem", - "Value": { - "Fn::Sub": [ - "efs=${efs}, multiebs=${NumberOfEBSVol}, raid=${raid}, fsx=${fsx}", - { - "efs": { - "Fn::If": [ - "CreateEFSSubstack", - "1", - "0" - ] - }, - "raid": { - "Fn::If": [ - "CreateRAIDSubstack", - "1", - "0" - ] + ], + "ImageId": { + "Fn::If": [ + "UseCustomAMI", + { + "Ref": "CustomAMI" + }, + { + "Fn::FindInMap": [ + "AWSRegionOS2AMI", + { + "Ref": "AWS::Region" }, - "fsx": { - "Fn::If": [ - "CreateFSXSubstack", - "1", - "0" - ] + { + "Ref": "BaseOS" } - } - ] - } - } - ], - "NetworkInterfaces": [ - { - "NetworkInterfaceId": { - "Ref": "MasterENI" - }, - "DeviceIndex": "0" - } - ], - "ImageId": { - "Fn::If": [ - "UseCustomAMI", - { - "Ref": "CustomAMI" - }, - { - "Fn::FindInMap": [ - "AWSRegionOS2AMI", - { - "Ref": "AWS::Region" - }, - { - "Ref": "BaseOS" - } - ] + ] + } + ] + }, + "EbsOptimized": { + "Fn::If": [ + "IsMasterInstanceEbsOpt", + true, + false + ] + }, + "IamInstanceProfile": { + "Name": { + "Ref": "RootInstanceProfile" } - ] - }, - "EbsOptimized": { - "Fn::If": [ - "IsMasterInstanceEbsOpt", - true, - false - ] - }, - "IamInstanceProfile": { - "Ref": "RootInstanceProfile" - }, - "PlacementGroupName": { - "Fn::If": [ - "UseClusterPlacement", - { + }, + "Placement": { + "GroupName": { "Fn::If": [ - "CreatePlacementGroup", - { - "Ref": "DynamicPlacementGroup" - }, - { - "Ref": "PlacementGroup" - } - ] - }, - { - "Ref": "AWS::NoValue" - } - ] - }, - "UserData": { - "Fn::Base64": { - "Fn::Join": [ - "", - [ - "Content-Type: multipart/mixed; boundary=\"==BOUNDARY==\"\n", - "MIME-Version: 1.0\n\n", - "--==BOUNDARY==\n", - "Content-Type: text/cloud-config; charset=\"us-ascii\"\n", - "MIME-Version: 1.0\n\n", - "#cloud-config:\n", - "runcmd:\n", - " - [ sh, -c, 'which yum && echo \"proxy=", + "UseClusterPlacement", { "Fn::If": [ - "UseProxy", + "CreatePlacementGroup", { - "Ref": "ProxyServer" + "Ref": "DynamicPlacementGroup" }, - "_none_" - ] - }, - "\" >> /etc/yum.conf || echo \"Not yum system\"' ]\n", - " - [ sh, -c, 'which apt-get && echo \"Acquire::http::Proxy \\\"", - { - "Fn::If": [ - "UseProxy", { - "Ref": "ProxyServer" - }, - "false" - ] - }, - "\\\";\" >> /etc/apt/apt.conf || echo \"Not apt system\"' ]\n", - "--==BOUNDARY==\n", - "Content-Type: text/x-shellscript; charset=\"us-ascii\"\n", - "MIME-Version: 1.0\n\n", - "#!/bin/bash -x\n\n", - "function error_exit\n", - "{\n", - " cfn-signal ${proxy_args} --exit-code=1 --reason=\"$1\" --stack=", - { - "Ref": "AWS::StackName" - }, - " --resource=MasterServer --region=", - { - "Ref": "AWS::Region" - }, - "\n", - " exit 1\n", - "}\n", - "function vendor_cookbook\n", - "{\n", - " mkdir /tmp/cookbooks\n", - " cd /tmp/cookbooks\n", - " tar -xzf /etc/chef/aws-parallelcluster-cookbook.tgz\n", - " HOME_BAK=\"${HOME}\"\n", - " export HOME=\"/tmp\"\n", - " . /tmp/proxy.sh; for d in `ls /tmp/cookbooks`; do cd /tmp/cookbooks/$d;LANG=en_US.UTF-8 /opt/chef/embedded/bin/berks vendor /etc/chef/cookbooks --delete; done;\n", - " export HOME=\"${HOME_BAK}\"\n", - "}\n", - "function bootstrap_instance\n", - "{\n", - " which yum 2>/dev/null; yum=$?\n", - " which apt-get 2>/dev/null; apt=$?\n", - " if [ \"${yum}\" == \"0\" ]; then\n", - " yum -y groupinstall development && yum -y install curl wget jq awscli\n", - " fi\n", - " if [ \"${apt}\" == \"0\" ]; then\n", - " apt-cache search build-essential; apt-get clean; apt-get update; apt-get -y install build-essential curl wget jq python-setuptools awscli\n", - " fi\n", - " [[ ${_region} =~ ^cn- ]] && s3_url=\"cn-north-1.amazonaws.com.cn/cn-north-1-aws-parallelcluster\"", - " which cfn-init 2>/dev/null || ( curl -s -L -o /tmp/aws-cfn-bootstrap-latest.tar.gz https://s3.${s3_url}/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz; easy_install -U /tmp/aws-cfn-bootstrap-latest.tar.gz)\n", - " mkdir -p /etc/chef && chown -R root:root /etc/chef\n", - " curl --retry 3 -L https://www.chef.io/chef/install.sh | bash -s -- -v ${chef_version}\n", - " /opt/chef/embedded/bin/gem install --no-rdoc --no-ri ridley:${ridley_version} berkshelf:${berkshelf_version}\n", - " curl --retry 3 -s -L -o /etc/chef/aws-parallelcluster-cookbook.tgz ${cookbook_url}\n", - " curl --retry 3 -s -L -o /etc/chef/aws-parallelcluster-cookbook.tgz.date ${cookbook_url}.date\n", - " curl --retry 3 -s -L -o /etc/chef/aws-parallelcluster-cookbook.tgz.md5 ${cookbook_url}.md5\n", - " vendor_cookbook\n", - " mkdir /opt/parallelcluster && echo ${parallelcluster_version} | tee /opt/parallelcluster/.bootstrapped\n", - "}\n", - "proxy=", - { - "Ref": "ProxyServer" - }, - "\n", - "custom_cookbook=", - { - "Ref": "CustomChefCookbook" - }, - "\n", - "if [ \"${proxy}\" != \"NONE\" ]; then\n", - " proxy_args=\"--http-proxy=${proxy} --https-proxy=${proxy}\"\n", - " proxy_host=$(echo \"${proxy}\" | awk -F/ '{print $3}' | cut -d: -f1)\n", - " proxy_port=$(echo \"${proxy}\" | awk -F/ '{print $3}' | cut -d: -f2)\n", - " export http_proxy=${proxy}; export https_proxy=${http_proxy}\n", - " export HTTP_PROXY=${proxy}; export HTTPS_PROXY=${http_proxy}\n", - " export no_proxy=169.254.169.254; export NO_PROXY=169.254.169.254\n", - " echo -e \"export http_proxy=${proxy}; export https_proxy=${http_proxy}\nexport HTTP_PROXY=${proxy}; export HTTPS_PROXY=${http_proxy}\nexport no_proxy=169.254.169.254; export NO_PROXY=169.254.169.254\n\" >/tmp/proxy.sh\n", - " echo -e \"[Boto]\nproxy = ${proxy_host}\nproxy_port = ${proxy_port}\n\" >/etc/boto.cfg\n", - "else\n", - " proxy_args=\"\"\n", - " touch /tmp/proxy.sh\n", - "fi\n", - " export _region=", - { - "Ref": "AWS::Region" - }, - "\n", - "s3_url=", - { - "Fn::FindInMap": [ - "Partition2Url", - { - "Ref": "AWS::Partition" - }, - "url" - ] - }, - "\n", - "if [ \"${custom_cookbook}\" != \"NONE\" ]; then\n", - " cookbook_url=${custom_cookbook}\n", - "else\n", - " cookbook_url=https://s3.${_region}.${s3_url}/${_region}-aws-parallelcluster/cookbooks/", - { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "cookbook" - ] - }, - ".tgz\n", - "fi\n", - "export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/opt/aws/bin\n", - "export parallelcluster_version=aws-parallelcluster-", - { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "parallelcluster" - ] - }, - "\n", - "export cookbook_version=", - { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "cookbook" - ] - }, - "\n", - "export chef_version=", - { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "chef" - ] - }, - "\n", - "export ridley_version=", - { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "ridley" - ] - }, - "\n", - "export berkshelf_version=", - { - "Fn::FindInMap": [ - "PackagesVersions", - "default", - "berkshelf" + "Ref": "PlacementGroup" + } ] }, - "\n", - "if [ -f /opt/parallelcluster/.bootstrapped ]; then\n", - " installed_version=$(cat /opt/parallelcluster/.bootstrapped)\n", - " if [ \"${parallelcluster_version}\" != \"${installed_version}\" ]; then\n", - " bootstrap_instance\n", - " fi\n", - "else\n", - " bootstrap_instance\n", - "fi\n", - "if [ \"${custom_cookbook}\" != \"NONE\" ]; then\n", - " curl --retry 3 -v -L -o /etc/chef/aws-parallelcluster-cookbook.tgz -z \"$(cat /etc/chef/aws-parallelcluster-cookbook.tgz.date)\" ${cookbook_url}\n", - " vendor_cookbook\n", - "fi\n", - "cd /tmp\n", - "# Call CloudFormation\n", - "cfn-init ${proxy_args} -s ", { - "Ref": "AWS::StackName" - }, - " -v -c default -r MasterServer --region ", - { - "Ref": "AWS::Region" - }, - " || error_exit 'Failed to run cfn-init. If --norollback was specified, check /var/log/cfn-init.log and /var/log/cloud-init-output.log.'\n", - "cfn-signal ${proxy_args} --exit-code=0 --reason=\"MasterServer setup complete\" --stack=", - { - "Ref": "AWS::StackName" - }, - " --resource=MasterServerWaitCondition --region=", - { - "Ref": "AWS::Region" - }, - "\n", - "# End of file\n", - "--==BOUNDARY==\n" + "Ref": "AWS::NoValue" + } ] - ] + } + }, + "UserData": { + "Fn::Base64": { + "Fn::Join": [ + "", + [ + "Content-Type: multipart/mixed; boundary=\"==BOUNDARY==\"\n", + "MIME-Version: 1.0\n\n", + "--==BOUNDARY==\n", + "Content-Type: text/cloud-config; charset=\"us-ascii\"\n", + "MIME-Version: 1.0\n\n", + "#cloud-config:\n", + "runcmd:\n", + " - [ sh, -c, 'which yum && echo \"proxy=", + { + "Fn::If": [ + "UseProxy", + { + "Ref": "ProxyServer" + }, + "_none_" + ] + }, + "\" >> /etc/yum.conf || echo \"Not yum system\"' ]\n", + " - [ sh, -c, 'which apt-get && echo \"Acquire::http::Proxy \\\"", + { + "Fn::If": [ + "UseProxy", + { + "Ref": "ProxyServer" + }, + "false" + ] + }, + "\\\";\" >> /etc/apt/apt.conf || echo \"Not apt system\"' ]\n", + "--==BOUNDARY==\n", + "Content-Type: text/x-shellscript; charset=\"us-ascii\"\n", + "MIME-Version: 1.0\n\n", + "#!/bin/bash -x\n\n", + "function error_exit\n", + "{\n", + " cfn-signal ${proxy_args} --exit-code=1 --reason=\"$1\" --stack=", + { + "Ref": "AWS::StackName" + }, + " --resource=MasterServer --region=", + { + "Ref": "AWS::Region" + }, + "\n", + " exit 1\n", + "}\n", + "function vendor_cookbook\n", + "{\n", + " mkdir /tmp/cookbooks\n", + " cd /tmp/cookbooks\n", + " tar -xzf /etc/chef/aws-parallelcluster-cookbook.tgz\n", + " HOME_BAK=\"${HOME}\"\n", + " export HOME=\"/tmp\"\n", + " . /tmp/proxy.sh; for d in `ls /tmp/cookbooks`; do cd /tmp/cookbooks/$d;LANG=en_US.UTF-8 /opt/chef/embedded/bin/berks vendor /etc/chef/cookbooks --delete; done;\n", + " export HOME=\"${HOME_BAK}\"\n", + "}\n", + "function bootstrap_instance\n", + "{\n", + " which yum 2>/dev/null; yum=$?\n", + " which apt-get 2>/dev/null; apt=$?\n", + " if [ \"${yum}\" == \"0\" ]; then\n", + " yum -y groupinstall development && yum -y install curl wget jq awscli\n", + " fi\n", + " if [ \"${apt}\" == \"0\" ]; then\n", + " apt-cache search build-essential; apt-get clean; apt-get update; apt-get -y install build-essential curl wget jq python-setuptools awscli\n", + " fi\n", + " [[ ${_region} =~ ^cn- ]] && s3_url=\"cn-north-1.amazonaws.com.cn/cn-north-1-aws-parallelcluster\"", + " which cfn-init 2>/dev/null || ( curl -s -L -o /tmp/aws-cfn-bootstrap-latest.tar.gz https://s3.${s3_url}/cloudformation-examples/aws-cfn-bootstrap-latest.tar.gz; easy_install -U /tmp/aws-cfn-bootstrap-latest.tar.gz)\n", + " mkdir -p /etc/chef && chown -R root:root /etc/chef\n", + " curl --retry 3 -L https://www.chef.io/chef/install.sh | bash -s -- -v ${chef_version}\n", + " /opt/chef/embedded/bin/gem install --no-rdoc --no-ri ridley:${ridley_version} berkshelf:${berkshelf_version}\n", + " curl --retry 3 -s -L -o /etc/chef/aws-parallelcluster-cookbook.tgz ${cookbook_url}\n", + " curl --retry 3 -s -L -o /etc/chef/aws-parallelcluster-cookbook.tgz.date ${cookbook_url}.date\n", + " curl --retry 3 -s -L -o /etc/chef/aws-parallelcluster-cookbook.tgz.md5 ${cookbook_url}.md5\n", + " vendor_cookbook\n", + " mkdir /opt/parallelcluster && echo ${parallelcluster_version} | tee /opt/parallelcluster/.bootstrapped\n", + "}\n", + "proxy=", + { + "Ref": "ProxyServer" + }, + "\n", + "custom_cookbook=", + { + "Ref": "CustomChefCookbook" + }, + "\n", + "if [ \"${proxy}\" != \"NONE\" ]; then\n", + " proxy_args=\"--http-proxy=${proxy} --https-proxy=${proxy}\"\n", + " proxy_host=$(echo \"${proxy}\" | awk -F/ '{print $3}' | cut -d: -f1)\n", + " proxy_port=$(echo \"${proxy}\" | awk -F/ '{print $3}' | cut -d: -f2)\n", + " export http_proxy=${proxy}; export https_proxy=${http_proxy}\n", + " export HTTP_PROXY=${proxy}; export HTTPS_PROXY=${http_proxy}\n", + " export no_proxy=169.254.169.254; export NO_PROXY=169.254.169.254\n", + " echo -e \"export http_proxy=${proxy}; export https_proxy=${http_proxy}\nexport HTTP_PROXY=${proxy}; export HTTPS_PROXY=${http_proxy}\nexport no_proxy=169.254.169.254; export NO_PROXY=169.254.169.254\n\" >/tmp/proxy.sh\n", + " echo -e \"[Boto]\nproxy = ${proxy_host}\nproxy_port = ${proxy_port}\n\" >/etc/boto.cfg\n", + "else\n", + " proxy_args=\"\"\n", + " touch /tmp/proxy.sh\n", + "fi\n", + " export _region=", + { + "Ref": "AWS::Region" + }, + "\n", + "s3_url=", + { + "Fn::FindInMap": [ + "Partition2Url", + { + "Ref": "AWS::Partition" + }, + "url" + ] + }, + "\n", + "if [ \"${custom_cookbook}\" != \"NONE\" ]; then\n", + " cookbook_url=${custom_cookbook}\n", + "else\n", + " cookbook_url=https://s3.${_region}.${s3_url}/${_region}-aws-parallelcluster/cookbooks/", + { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "cookbook" + ] + }, + ".tgz\n", + "fi\n", + "export PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin:/opt/aws/bin\n", + "export parallelcluster_version=aws-parallelcluster-", + { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "parallelcluster" + ] + }, + "\n", + "export cookbook_version=", + { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "cookbook" + ] + }, + "\n", + "export chef_version=", + { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "chef" + ] + }, + "\n", + "export ridley_version=", + { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "ridley" + ] + }, + "\n", + "export berkshelf_version=", + { + "Fn::FindInMap": [ + "PackagesVersions", + "default", + "berkshelf" + ] + }, + "\n", + "if [ -f /opt/parallelcluster/.bootstrapped ]; then\n", + " installed_version=$(cat /opt/parallelcluster/.bootstrapped)\n", + " if [ \"${parallelcluster_version}\" != \"${installed_version}\" ]; then\n", + " bootstrap_instance\n", + " fi\n", + "else\n", + " bootstrap_instance\n", + "fi\n", + "if [ \"${custom_cookbook}\" != \"NONE\" ]; then\n", + " curl --retry 3 -v -L -o /etc/chef/aws-parallelcluster-cookbook.tgz -z \"$(cat /etc/chef/aws-parallelcluster-cookbook.tgz.date)\" ${cookbook_url}\n", + " vendor_cookbook\n", + "fi\n", + "cd /tmp\n", + "# Call CloudFormation\n", + "cfn-init ${proxy_args} -s ", + { + "Ref": "AWS::StackName" + }, + " -v -c default -r MasterServerLaunchTemplate --region ", + { + "Ref": "AWS::Region" + }, + " || error_exit 'Failed to run cfn-init. If --norollback was specified, check /var/log/cfn-init.log and /var/log/cloud-init-output.log.'\n", + "cfn-signal ${proxy_args} --exit-code=0 --reason=\"MasterServer setup complete\" --stack=", + { + "Ref": "AWS::StackName" + }, + " --resource=MasterServerWaitCondition --region=", + { + "Ref": "AWS::Region" + }, + "\n", + "# End of file\n", + "--==BOUNDARY==\n" + ] + ] + } } } }, From 0b5a3d27c0ac563d73146ad97431ac60f3032afc Mon Sep 17 00:00:00 2001 From: Luca Carrogu Date: Mon, 8 Jul 2019 16:27:31 +0200 Subject: [PATCH 029/201] Use regionalized STS endpoint Signed-off-by: Luca Carrogu --- cli/pcluster/config_sanity.py | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/cli/pcluster/config_sanity.py b/cli/pcluster/config_sanity.py index 0e67d8d88a..0e6866ed93 100644 --- a/cli/pcluster/config_sanity.py +++ b/cli/pcluster/config_sanity.py @@ -41,6 +41,11 @@ def __init__(self, region, aws_access_key_id, aws_secret_access_key): self.aws_access_key_id = aws_access_key_id self.aws_secret_access_key = aws_secret_access_key + def __get_sts_endpoint(self): + return "https://sts.{0}.{1}".format( + self.region, "amazonaws.com.cn" if self.region.startswith("cn-") else "amazonaws.com" + ) + def __get_partition(self): if self.region.startswith("us-gov"): return "aws-us-gov" @@ -291,6 +296,7 @@ def validate(self, resource_type, resource_value): # noqa: C901 FIXME boto3.client( "sts", region_name=self.region, + endpoint_url=self.__get_sts_endpoint(), aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, ) From 47742f926555dc70bc39aefe5788888c3f6fc111 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Mon, 5 Aug 2019 11:35:19 +0200 Subject: [PATCH 030/201] Second round of refactoring --- cli/pcluster/configure/easyconfig.py | 97 +++++------ cli/pcluster/configure/networking.py | 45 +++--- cli/pcluster/configure/utils.py | 3 +- cli/pcluster/subnet_computation.py | 151 ++++++++++++++++++ cli/pcluster/utils.py | 130 +-------------- .../configure/test_pclusterconfigure.py | 47 +----- .../pcluster/configure/test_subnet_cidr.py | 2 +- cli/tox.ini | 2 +- 8 files changed, 218 insertions(+), 259 deletions(-) create mode 100644 cli/pcluster/subnet_computation.py diff --git a/cli/pcluster/configure/easyconfig.py b/cli/pcluster/configure/easyconfig.py index 641203b097..d589b8443a 100644 --- a/cli/pcluster/configure/easyconfig.py +++ b/cli/pcluster/configure/easyconfig.py @@ -8,16 +8,13 @@ # or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. -# fmt: off -from __future__ import absolute_import, print_function # isort:skip +from __future__ import absolute_import, print_function +from future import standard_library - -import copy import errno import logging import os import stat -import tempfile import configparser @@ -32,14 +29,10 @@ from pcluster.configure.utils import get_regions, get_resource_tag, handle_client_exception, prompt, prompt_iterable from pcluster.utils import get_supported_os, get_supported_schedulers -from future import standard_library # isort:skip - - -# fmt: on standard_library.install_aliases() -LOGGER = logging.getLogger("pcluster.pcluster") +LOGGER = logging.getLogger(__name__) DEFAULT_VALUES = { "aws_region_name": "us-east-1", "cluster_template": "default", @@ -48,7 +41,6 @@ "max_size": "10", "master_instance_type": "t2.micro", "compute_instance_type": "t2.micro", - "vpc_name": "public", "min_size": "0", } VPC_PARAMETERS_TO_REMOVE = "vpc-id", "master_subnet_id", "compute_subnet_id", "use_public_ips", "compute_subnet_cidr" @@ -80,7 +72,7 @@ def _extract_subnet_size(cidr): @handle_client_exception def _get_vpcs_and_subnets(aws_region_name): """ - Return a dictionary containg a list of vpc in the given region and the associated vpcs. + Return a dictionary containing a list of vpc in the given region and the associated VPCs. Example: @@ -127,7 +119,7 @@ def _list_instances(): # Specifying the region does not make any difference return ec2_conn(DEFAULT_VALUES["aws_region_name"]).meta.service_model.shape_for("InstanceType").enum -def configure(args): # noqa: C901 FIXME!!! +def configure(args): # Determine config file name based on args or default config_file = ( args.config_file if args.config_file else os.path.expanduser(os.path.join("~", ".parallelcluster", "config")) @@ -138,19 +130,12 @@ def configure(args): # noqa: C901 FIXME!!! if os.path.isfile(config_file): config.read(config_file) - # Prompt for required values, using existing as defaults cluster_template = DEFAULT_VALUES["cluster_template"] cluster_label = "cluster " + cluster_template vpc_label = "vpc " + cluster_template # Use built in boto regions as an available option - aws_region_name = prompt_iterable( - "AWS Region ID", - get_regions(), - default_value=_get_config_parameter( - config, section="aws", parameter_name="aws_region_name", default_value=DEFAULT_VALUES["aws_region_name"] - ), - ) + aws_region_name = prompt_iterable("AWS Region ID", get_regions()) scheduler = prompt_iterable( "Scheduler", @@ -185,14 +170,14 @@ def configure(args): # noqa: C901 FIXME!!! vpc_label, aws_region_name, scheduler, scheduler_handler.max_cluster_size, automate_vpc_creation=automate_vpc ) global_parameters = { - "__name__": "global", + "name": "global", "cluster_template": cluster_template, "update_check": "true", "sanity_check": "true", } - aws_parameters = {"__name__": "aws", "aws_region_name": aws_region_name} + aws_parameters = {"name": "aws", "aws_region_name": aws_region_name} cluster_parameters = { - "__name__": cluster_label, + "name": cluster_label, "key_name": key_name, "vpc_settings": cluster_template, "scheduler": scheduler, @@ -200,24 +185,28 @@ def configure(args): # noqa: C901 FIXME!!! } cluster_parameters.update(scheduler_handler.get_scheduler_parameters()) - aliases_parameters = {"__name__": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} + aliases_parameters = {"name": "aliases", "ssh": "ssh {CFN_USER}@{MASTER_IP} {ARGS}"} sections = [aws_parameters, cluster_parameters, vpc_parameters, global_parameters, aliases_parameters] - # We first remove unnecessary parameters from the past configurations + # We remove parameters that may still be present from the past configuration but can conflict with the current. _remove_parameter_from_past_configuration(cluster_label, config, scheduler_handler.get_parameters_to_remove()) _remove_parameter_from_past_configuration(vpc_label, config, VPC_PARAMETERS_TO_REMOVE) - # Loop through the configuration sections we care about - for section in sections: - try: - config.add_section(section["__name__"]) - except configparser.DuplicateSectionError: - pass - for key, value in section.items(): - # Only update configuration if not set - if value is not None and key != "__name__": - config.set(section["__name__"], key, value) + _write_config(config, sections) + _check_destination_directory(config_file) + + # Write configuration to disk + with open(config_file, "w") as cf: + config.write(cf) + os.chmod(config_file, stat.S_IRUSR | stat.S_IWUSR) + args.config_file = config_file + args.cluster_template = cluster_template + if _is_config_valid(args): + print("The configuration is valid") + + +def _check_destination_directory(config_file): # ensure that the directory for the config file exists (because # ~/.parallelcluster is likely not to exist on first usage) try: @@ -226,14 +215,17 @@ def configure(args): # noqa: C901 FIXME!!! if e.errno != errno.EEXIST: raise # can safely ignore EEXISTS for this purpose... - # Write configuration to disk - open(config_file, "a").close() - os.chmod(config_file, stat.S_IRUSR | stat.S_IWUSR) - with open(config_file, "w") as cf: - config.write(cf) - if _is_config_valid(args, config): - print("The configuration is valid") +def _write_config(config, sections): + for section in sections: + try: + config.add_section(section["name"]) + except configparser.DuplicateSectionError: + pass + for key, value in section.items(): + # Only update configuration if not set + if value is not None and key != "name": + config.set(section["name"], key, value) def _remove_parameter_from_past_configuration(section, config, parameters_to_remove): @@ -243,7 +235,7 @@ def _remove_parameter_from_past_configuration(section, config, parameters_to_rem def _create_vpc_parameters(vpc_label, aws_region_name, scheduler, min_subnet_size, automate_vpc_creation=True): - vpc_parameters = {"__name__": vpc_label} + vpc_parameters = {"name": vpc_label} min_subnet_size = int(min_subnet_size) if automate_vpc_creation: vpc_parameters.update( @@ -289,30 +281,19 @@ def _ask_for_subnets(subnet_list): return vpc_parameters -def _is_config_valid(args, config): +def _is_config_valid(args): """ Validate the configuration of the pcluster configure. :param args: the arguments passed with the command line - :param config: the configParser :return True if the configuration is valid, false otherwise """ - # We create a temp_file_path to validate before overriding the original config - temp_file_path = os.path.join(tempfile.gettempdir(), "temp_config") - temp_args = copy.copy(args) # Defensive copy is needed because we change config_file - - temp_args.config_file = temp_file_path - with open(temp_file_path, "w+") as cf: - config.write(cf) - # Verify the configuration is_valid = True try: - cfnconfig.ParallelClusterConfig(temp_args) + cfnconfig.ParallelClusterConfig(args) except SystemExit: is_valid = False - finally: - os.remove(temp_file_path) - return is_valid + return is_valid def _get_config_parameter(config, section, parameter_name, default_value): diff --git a/cli/pcluster/configure/networking.py b/cli/pcluster/configure/networking.py index b5da06f612..4e144949cf 100644 --- a/cli/pcluster/configure/networking.py +++ b/cli/pcluster/configure/networking.py @@ -8,7 +8,6 @@ # or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. -# fmt: off from future.backports import datetime import abc @@ -22,23 +21,18 @@ from pcluster.configure.utils import handle_client_exception from pcluster.networking.vpc_factory import VpcFactory -from pcluster.utils import ( - evaluate_cidr, - get_stack_output_value, - get_subnet_cidr, - get_templates_bucket_path, - verify_stack_creation, -) +from pcluster.subnet_computation import evaluate_cidr, get_subnet_cidr +from pcluster.utils import get_stack_output_value, get_templates_bucket_path, verify_stack_creation DEFAULT_AWS_REGION_NAME = "us-east-1" -LOGGER = logging.getLogger("pcluster.pcluster") +LOGGER = logging.getLogger(__name__) TIMESTAMP = "-{:%Y%m%d%H%M%S}".format(datetime.datetime.utcnow()) MASTER_SUBNET_IPS = 250 if sys.version_info >= (3, 4): ABC = abc.ABC else: - ABC = abc.ABCMeta('ABC', (), {}) + ABC = abc.ABCMeta("ABC", (), {}) class BaseNetworkConfig(ABC): @@ -88,7 +82,7 @@ def __init__(self): super(PublicNetworkConfig, self).__init__( config_type="Master and compute fleet in the same public subnet", template_name="public", - stack_name_prefix="pub" + stack_name_prefix="pub", ) def get_cfn_parameters(self, aws_region_name, vpc_id, internet_gateway_id, public_cidr): @@ -97,7 +91,7 @@ def get_cfn_parameters(self, aws_region_name, vpc_id, internet_gateway_id, publi parameters.append(super(PublicNetworkConfig, self)._build_cfn_param("PublicCIDR", public_cidr)) return parameters - def _create(self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size): # noqa D102 + def _create(self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size): public_cidr = get_subnet_cidr( vpc_cidr=vpc_cidr, occupied_cidr=subnet_cidrs, min_subnet_size=compute_subnet_size + MASTER_SUBNET_IPS ) @@ -114,7 +108,7 @@ def __init__(self): super(PublicPrivateNetworkConfig, self).__init__( config_type="Master in a public subnet and compute fleet in a private subnet", template_name="public-private", - stack_name_prefix="pubpriv" + stack_name_prefix="pubpriv", ) def get_cfn_parameters(self, aws_region_name, vpc_id, internet_gateway_id, public_cidr, private_cidr): @@ -126,10 +120,10 @@ def get_cfn_parameters(self, aws_region_name, vpc_id, internet_gateway_id, publi parameters.append(super(PublicPrivateNetworkConfig, self)._build_cfn_param("PrivateCIDR", private_cidr)) return parameters - def _create(self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size): # noqa D102 - public_cidr = evaluate_cidr( - vpc_cidr=vpc_cidr, occupied_cidrs=subnet_cidrs, target_size=MASTER_SUBNET_IPS - ) + def _create( + self, aws_region_name, vpc_id, vpc_cidr, subnet_cidrs, internet_gateway_id, compute_subnet_size + ): # noqa D102 + public_cidr = evaluate_cidr(vpc_cidr=vpc_cidr, occupied_cidrs=subnet_cidrs, target_size=MASTER_SUBNET_IPS) _validate_cidr(public_cidr) subnet_cidrs.append(public_cidr) private_cidr = get_subnet_cidr( @@ -164,9 +158,8 @@ def _create_network_stack(aws_region_name, configuration, parameters): cfn = boto3.client("cloudformation", region_name=aws_region_name) stack = cfn.create_stack( StackName=stack_name, - TemplateURL=get_templates_bucket_path(aws_region_name) + "networking/%s-%s.cfn.json" % ( - configuration.template_name, version - ), + TemplateURL=get_templates_bucket_path(aws_region_name) + + "networking/%s-%s.cfn.json" % (configuration.template_name, version), Parameters=parameters, Capabilities=["CAPABILITY_IAM"], ) @@ -180,8 +173,10 @@ def _create_network_stack(aws_region_name, configuration, parameters): return cfn.describe_stacks(StackName=stack_name).get("Stacks")[0]["Outputs"] except KeyboardInterrupt: print() - LOGGER.info("Unable to update the configuration file with the selected network configuration. " - "Please manually check the status of the CloudFormation stack: {0}".format(stack_name)) + LOGGER.info( + "Unable to update the configuration file with the selected network configuration. " + "Please manually check the status of the CloudFormation stack: {0}".format(stack_name) + ) except Exception as e: # Any exception is a problem print() LOGGER.error( @@ -215,11 +210,7 @@ def _get_internet_gateway_id(aws_region_name, vpc_id): response = ec2_conn(aws_region_name).describe_internet_gateways( Filters=[{"Name": "attachment.vpc-id", "Values": [vpc_id]}] ) - return ( - response["InternetGateways"][0]["InternetGatewayId"] - if response["InternetGateways"] - else "" - ) + return response["InternetGateways"][0]["InternetGatewayId"] if response["InternetGateways"] else "" @handle_client_exception diff --git a/cli/pcluster/configure/utils.py b/cli/pcluster/configure/utils.py index 13944e75b6..7881aacf9d 100644 --- a/cli/pcluster/configure/utils.py +++ b/cli/pcluster/configure/utils.py @@ -8,7 +8,6 @@ # or in the 'LICENSE.txt' file accompanying this file. This file is distributed on an 'AS IS' BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. -# fmt: off import functools import logging import sys @@ -17,7 +16,7 @@ import boto3 from botocore.exceptions import BotoCoreError, ClientError -LOGGER = logging.getLogger("pcluster.pcluster") +LOGGER = logging.getLogger(__name__) unsupported_regions = ["ap-northeast-3"] diff --git a/cli/pcluster/subnet_computation.py b/cli/pcluster/subnet_computation.py new file mode 100644 index 0000000000..faaaa83d7a --- /dev/null +++ b/cli/pcluster/subnet_computation.py @@ -0,0 +1,151 @@ +# Copyright 2018 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance with +# the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. +from __future__ import unicode_literals + +from ipaddress import ip_address, ip_network, summarize_address_range + + +# py2.7 compatibility +def unicode(ip): + return "{0}".format(ip) + + +def get_subnet_cidr(vpc_cidr, occupied_cidr, min_subnet_size): + """ + Decide the parallelcluster subnet size of the compute fleet. + + :param vpc_cidr: the vpc_cidr in which the suitable subnet should be + :param occupied_cidr: a list of cidr of the already occupied subnets in the vpc + :param min_subnet_size: the minimum size of the subnet + :return: + """ + default_target_size = 4000 + target_size = max(default_target_size, 2 * min_subnet_size) + cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size) + while cidr is None: + if target_size < min_subnet_size: + return None + target_size = target_size // 2 + cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size) + return cidr + + +def evaluate_cidr(vpc_cidr, occupied_cidrs, target_size): + """ + Decide the first smallest suitable CIDR for a subnet with size >= target_size. + + In order to find a space in between all the subnets we have, we first start by making all the occupied subnets size + bigger or equal to the one we are targeting. In order to do that, if a subnet is smaller than target_size, we will + find the bigger one to which she is part of. + + After that, we will sort the subnet by cidr and than look for space between the end of one subnet and the beginning + of the other, not forgetting to also look for space in the begin and end. + :param vpc_cidr: the vpc_cidr in which the suitable subnet should be + :param occupied_cidrs: a list of cidr of the already occupied subnets in the vpc + :param target_size: the minimum target size of the subnet + :return: the suitable CIDR if found, else None + """ + subnet_size, subnet_bitmask = _evaluate_subnet_size(target_size) + vpc_begin_address_decimal, vpc_end_address_decimal = _get_cidr_limits_as_decimal(vpc_cidr) + + # if we do not have enough space + if vpc_end_address_decimal - vpc_begin_address_decimal + 1 < subnet_size: + return None + + # if we have space and no occupied cidr + if not occupied_cidrs: + return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size) + + lower_limit_index = 0 + upper_limit_index = 1 + + # Get subnets limits + occupied_cidrs = _align_subnet_cidrs(occupied_cidrs, subnet_bitmask) + subnets_limits = [_get_cidr_limits_as_decimal(subnet) for subnet in occupied_cidrs] + subnets_limits.sort(key=lambda x: x[upper_limit_index]) + + # Looking at space between occupied cidrs + resulting_cidr = None + + subnets_limits.append((vpc_end_address_decimal, vpc_end_address_decimal)) + for index in range(0, len(subnets_limits)): + current_lower_limit = subnets_limits[index][lower_limit_index] + # In the first case, vpc_begin_address is free, whereas upper_limit_index is not + previous_upper_limit = ( + subnets_limits[index - 1][upper_limit_index] if index > 0 else vpc_begin_address_decimal - 1 + ) + if current_lower_limit - previous_upper_limit > subnet_size: + resulting_cidr = _decimal_ip_limits_to_cidr(previous_upper_limit + 1, previous_upper_limit + subnet_size) + break + + return resulting_cidr + + +def _align_subnet_cidrs(occupied_cidr, target_bitmask): + """Transform the subnet cidr that are smaller than the minimum bitmask to bigger ones.""" + correct_cidrs = set() + for subnet_cidr in occupied_cidr: + if _get_bitmask(subnet_cidr) > target_bitmask: + correct_cidrs.add(expand_cidr(subnet_cidr, target_bitmask)) + else: + correct_cidrs.add(subnet_cidr) + return list(correct_cidrs) + + +def _get_bitmask(cidr): + return int(cidr.split("/")[1]) + + +def _evaluate_subnet_size(target_size): + aws_reserved_ip = 6 + min_bitmask = 28 + subnet_bitmask = min(32 - ((next_power_of_2(target_size + aws_reserved_ip) - 1).bit_length()), min_bitmask) + subnet_size = 2 ** (32 - subnet_bitmask) + return subnet_size, subnet_bitmask + + +def _decimal_ip_limits_to_cidr(begin, end): + """Given begin and end ip (as decimals number), return the CIDR that begins with begin ip and ends with end ip.""" + return str(next(summarize_address_range(ip_address(begin), ip_address(end)))) + + +def _get_cidr_limits_as_decimal(cidr): + """ + Given a cidr, return the begin ip and the end ip as decimal. + + For example, given the cidr 10.0.0.0/24, it will return 167772160, which is 10.0.0.0 and 167772416, + which is 10.0.1.0 + :param: cidr the cidr to convert + :return: a tuple (decimal begin address, decimal end address) + """ + address = ip_network(unicode(cidr)) + return _ip_to_decimal(str(address[0])), _ip_to_decimal(str(address[-1])) + + +def _ip_to_decimal(ip): + """Transform an ip into its decimal representantion.""" + return int(ip_address(unicode(ip))) + + +def expand_cidr(cidr, new_size): + """ + Given a cidr, it upgrade is netmask to new_size. + + :param cidr: the list of cidr to promote + :param new_size: the minimum bitmask required + """ + ip_addr = ip_network(unicode(cidr)) + return str(ip_addr.supernet(new_prefix=new_size)) + + +def next_power_of_2(x): + """Given a number returns the following power of 2 of that number.""" + return 1 if x == 0 else 2 ** (x - 1).bit_length() diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index 185356fdbd..9c17c354dd 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -8,7 +8,7 @@ # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import, print_function, unicode_literals +from __future__ import absolute_import, print_function import json import logging @@ -17,7 +17,6 @@ import time import zipfile from io import BytesIO -from ipaddress import ip_address, ip_network, summarize_address_range import boto3 from botocore.exceptions import ClientError @@ -207,133 +206,6 @@ def get_supported_schedulers(): return "sge", "torque", "slurm", "awsbatch" -def next_power_of_2(x): - """Given a number returns the following power of 2 of that number.""" - return 1 if x == 0 else 2 ** (x - 1).bit_length() - - -def get_subnet_cidr(vpc_cidr, occupied_cidr, min_subnet_size): - """ - Decide the parallelcluster subnet size of the compute fleet. - - :param vpc_cidr: the vpc_cidr in which the suitable subnet should be - :param occupied_cidr: a list of cidr of the already occupied subnets in the vpc - :param min_subnet_size: the minimum size of the subnet - :return: - """ - default_target_size = 4000 - target_size = max(default_target_size, 2 * min_subnet_size) - cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size) - while cidr is None: - if target_size < min_subnet_size: - return None - target_size = target_size // 2 - cidr = evaluate_cidr(vpc_cidr, occupied_cidr, target_size) - return cidr - - -def evaluate_cidr(vpc_cidr, occupied_cidrs, target_size): - """ - Decide the first smallest suitable CIDR for a subnet with size >= target_size. - - :param vpc_cidr: the vpc_cidr in which the suitable subnet should be - :param occupied_cidrs: a list of cidr of the already occupied subnets in the vpc - :param target_size: the minimum target size of the subnet - :return: the suitable CIDR if found, else None - """ - subnet_size, subnet_bitmask = _evaluate_subnet_size(target_size) - vpc_begin_address_decimal, vpc_end_address_decimal = _get_cidr_limits_as_decimal(vpc_cidr) - - # if we do not have enough space - if vpc_end_address_decimal - vpc_begin_address_decimal + 1 < subnet_size: - return None - - # if we have space and no occupied cidr - if not occupied_cidrs: - return _decimal_ip_limits_to_cidr(vpc_begin_address_decimal, vpc_begin_address_decimal + subnet_size) - - lower_limit_index = 0 - upper_limit_index = 1 - - # Get subnets limits - occupied_cidrs = _align_subnet_cidrs(occupied_cidrs, subnet_bitmask) - subnets_limits = [_get_cidr_limits_as_decimal(subnet) for subnet in occupied_cidrs] - subnets_limits.sort(key=lambda x: x[upper_limit_index]) - - # Looking at space between occupied cidrs - resulting_cidr = None - - subnets_limits.append((vpc_end_address_decimal, vpc_end_address_decimal)) - for index in range(0, len(subnets_limits)): - current_lower_limit = subnets_limits[index][lower_limit_index] - # In the first case, vpc_begin_address is free, whereas upper_limit_index is not - previous_upper_limit = ( - subnets_limits[index - 1][upper_limit_index] if index > 0 else vpc_begin_address_decimal - 1 - ) - if current_lower_limit - previous_upper_limit > subnet_size: - resulting_cidr = _decimal_ip_limits_to_cidr(previous_upper_limit + 1, previous_upper_limit + subnet_size) - break - - return resulting_cidr - - -def _align_subnet_cidrs(occupied_cidr, target_bitmask): - """Transform the subnet cidr that are smaller than the minimum bitmask to bigger ones.""" - correct_cidrs = set() - for subnet_cidr in occupied_cidr: - if _get_bitmask(subnet_cidr) > target_bitmask: - correct_cidrs.add(expand_cidr(subnet_cidr, target_bitmask)) - else: - correct_cidrs.add(subnet_cidr) - return list(correct_cidrs) - - -def _get_bitmask(cidr): - return int(cidr.split("/")[1]) - - -def _evaluate_subnet_size(target_size): - aws_reserved_ip = 6 - min_bitmask = 28 - subnet_bitmask = min(32 - ((next_power_of_2(target_size + aws_reserved_ip) - 1).bit_length()), min_bitmask) - subnet_size = 2 ** (32 - subnet_bitmask) - return subnet_size, subnet_bitmask - - -def _decimal_ip_limits_to_cidr(begin, end): - """Given begin and end ip (as decimals number), return the CIDR that begins with begin ip and ends with end ip.""" - return str(next(summarize_address_range(ip_address(begin), ip_address(end)))) - - -def _get_cidr_limits_as_decimal(cidr): - """Given a cidr, return the begin ip and the end ip as decimal.""" - address = ip_network(unicode(cidr)) - return _ip_to_decimal(str(address[0])), _ip_to_decimal(str(address[-1])) - - -def _ip_to_decimal(ip): - """Transform an ip into its decimal representantion.""" - return int(ip_address(unicode(ip))) - - -def expand_cidr(cidr, new_size): - """ - Given a list of cidrs, it upgrade the netmask of each one to min_size and returns the updated cidrs. - - For example, given the list of cidrs ["10.0.0.0/24", "10.0.4.0/23"] and min_size = 23, the resulting updated cidrs - will be ["10.0.0.0/23", "10.0.4.0/23]. Notice that any duplicate of the updated list will be removed. - :param cidr: the list of cidr to promote - :param new_size: the minimum bitmask required - """ - ip_addr = ip_network(unicode(cidr)) - return str(ip_addr.supernet(new_prefix=new_size)) - - -# py2.7 compatibility -def unicode(ip): - return "{0}".format(ip) - - def get_stack_output_value(stack_outputs, output_key): """ Get output value from Cloudformation Stack Output. diff --git a/cli/tests/pcluster/configure/test_pclusterconfigure.py b/cli/tests/pcluster/configure/test_pclusterconfigure.py index 6423e285ab..a1e2e8d1f2 100644 --- a/cli/tests/pcluster/configure/test_pclusterconfigure.py +++ b/cli/tests/pcluster/configure/test_pclusterconfigure.py @@ -107,10 +107,14 @@ def _side_effect_function(aws_region_name, config, parameters): mocker.patch(NETWORKING + "_create_network_stack", side_effect=_side_effect_function) +def _mock_parallel_cluster_config(mocker): + mocker.patch(EASYCONFIG + "cfnconfig.ParallelClusterConfig") + + def _launch_config(mocker, path, remove_path=True): if remove_path and os.path.isfile(path): os.remove(path) - args = mocker.Mock + args = mocker.MagicMock(autospec=True) args.config_file = path configure(args) @@ -135,14 +139,6 @@ def _are_configurations_equals(path_verify, path_verified): return True -def _write_output_and_error(capsys, error_path, output_path): - readouterr = capsys.readouterr() - with open(error_path, "w+") as file: - file.write(readouterr.err) - with open(output_path, "w+") as file: - file.write(readouterr.out) - - def _are_output_error_correct(capsys, output, error): readouterr = capsys.readouterr() with open(output) as f: @@ -192,6 +188,7 @@ def __init__(self, mocker, empty_region=False): _mock_aws_region(self.mocker) _mock_list_keys(self.mocker) _mock_list_vpcs_and_subnets(self.mocker, empty_region) + _mock_parallel_cluster_config(self.mocker) def add_subnet_automation(self, public_subnet_id, is_a_valid_vpc=True, private_subnet_id=None): _mock_vpc_factory(self.mocker, is_a_valid_vpc) @@ -215,38 +212,6 @@ def _verify_test(mocker, capsys, output, error, config, temp_path_for_config): os.remove(temp_path_for_config) -# note that user_prompt passed to input will not be shown. -def create_new_test(mocker, capsys): - """ - Create a new test for the pcluster configure. - - You have to be sure that pcluster configure is correct when you use this function. You will also have to check - output manually. Note that it does not print user_prompt passed as input, but neither does all the tests - """ - test_name = "test_vpc_automation_no_vpc_in_region_public" - config_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "test") - error_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "error.txt") - output_path = os.path.join(os.getcwd(), "test_pclusterconfigure", test_name, "output.txt") - - mock_handler = MockHandler(mocker, empty_region=True) - mock_handler.add_subnet_automation(public_subnet_id="subnet-pu") - input_composer = ComposeInput(aws_region_name="eu-west-1", scheduler="slurm") - input_composer.add_first_flow( - op_sys="centos6", - min_size="13", - max_size="14", - master_instance="t2.nano", - compute_instance="t2.micro", - key="key1", - ) - input_composer.add_vpc_sub_automation_empty_region(network_configuration=PUBLIC_CONFIGURATION) - input_composer.finalize_config(mocker) - - _launch_config(mocker, config_path) - _write_output_and_error(capsys, error_path, output_path) - assert_that(True).is_true() - - def test_no_automation_no_awsbatch_no_errors(mocker, capsys, test_datadir): config, error, output = get_file_path(test_datadir) diff --git a/cli/tests/pcluster/configure/test_subnet_cidr.py b/cli/tests/pcluster/configure/test_subnet_cidr.py index 9c1973fdea..330ebaa563 100644 --- a/cli/tests/pcluster/configure/test_subnet_cidr.py +++ b/cli/tests/pcluster/configure/test_subnet_cidr.py @@ -1,5 +1,5 @@ from assertpy import assert_that -from pcluster.utils import evaluate_cidr, get_subnet_cidr +from pcluster.subnet_computation import evaluate_cidr, get_subnet_cidr def test_empty_vpc(): diff --git a/cli/tox.ini b/cli/tox.ini index fab7965601..5a6e1e082b 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -17,7 +17,7 @@ commands = # Running with discover and not unittest discover for Python 2.6 compatibility python -m discover -s tests/pcluster -p "*_test.py" # awsbatch-cli is not currently compatible with Python2.6 - py{27,34,35,36,37}: py.test -l -v --basetemp={envtmpdir} --html=report.html --cov={envsitepackagesdir}/awsbatch + py{27,34,35,36,37}: py.test -l -v --basetemp={envtmpdir} --html=report.html --cov={envsitepackagesdir}/awsbatch tests/ --cov={envsitepackagesdir}/pcluster # Section used to define common variables used by multiple testenvs. [vars] From 4dfe89ca1d9750940f2e7af35adf333557fc5fb7 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Tue, 6 Aug 2019 10:49:43 +0200 Subject: [PATCH 031/201] integ tests: fixes after pcluster configure merge Signed-off-by: Francesco De Martino --- tests/integration-tests/conftest.py | 10 +++++++--- .../tests/networking/test_networking.py | 2 +- 2 files changed, 8 insertions(+), 4 deletions(-) diff --git a/tests/integration-tests/conftest.py b/tests/integration-tests/conftest.py index d57b5ea895..5cca72a19d 100644 --- a/tests/integration-tests/conftest.py +++ b/tests/integration-tests/conftest.py @@ -357,14 +357,14 @@ def vpc_stacks(cfn_stacks_factory, request): availability_zone = random.choice(AVAILABILITY_ZONE_OVERRIDES.get(region, [None])) # defining subnets per region to allow AZs override public_subnet = SubnetConfig( - name="PublicSubnet", + name="Public", cidr="10.0.124.0/22", # 1,022 IPs map_public_ip_on_launch=True, has_nat_gateway=True, default_gateway=Gateways.INTERNET_GATEWAY, ) private_subnet = SubnetConfig( - name="PrivateSubnet", + name="Private", cidr="10.0.128.0/17", # 32766 IPs map_public_ip_on_launch=False, has_nat_gateway=False, @@ -379,7 +379,11 @@ def vpc_stacks(cfn_stacks_factory, request): # If stack creation fails it'll retry once more. This is done to mitigate failures due to resources # not available in randomly picked AZs. -@retry(stop_max_attempt_number=2, wait_fixed=5000) +@retry( + stop_max_attempt_number=2, + wait_fixed=5000, + retry_on_exception=lambda exception: not isinstance(exception, KeyboardInterrupt), +) def _create_vpc_stack(request, template, region, cfn_stacks_factory): if request.config.getoption("vpc_stack"): logging.info("Using stack {0} in region {1}".format(request.config.getoption("vpc_stack"), region)) diff --git a/tests/integration-tests/tests/networking/test_networking.py b/tests/integration-tests/tests/networking/test_networking.py index 70f23304eb..3120f972b5 100644 --- a/tests/integration-tests/tests/networking/test_networking.py +++ b/tests/integration-tests/tests/networking/test_networking.py @@ -23,7 +23,7 @@ @pytest.fixture() def networking_stack_factory(request): """Define a fixture to manage the creation and destruction of CloudFormation stacks.""" - factory = CfnStacksFactory() + factory = CfnStacksFactory(request.config.getoption("credential")) def _create_network(region, template_path, parameters): file_content = extract_template(template_path) From 3be5ce662cc25c2e8fce8d4bcfa42629eede8341 Mon Sep 17 00:00:00 2001 From: Luca Carrogu Date: Fri, 2 Aug 2019 16:08:17 +0200 Subject: [PATCH 032/201] Update CHANGELOG for "Speed up cluster creation" enhancement Signed-off-by: Luca Carrogu --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 3f967702dc..d4fec2bc32 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -16,7 +16,7 @@ x.x.x * Add support for ap-east-1 region (Hong Kong) * Add possibility to specify instance type to use when building custom AMIs with ``pcluster createami`` -* Speed up cluster creation by having compute nodes starting together with master node +* Speed up cluster creation by having compute nodes starting together with master node. **Note** this requires one new IAM permissions in the `ParallelClusterInstancePolicy `_, ``cloudformation:DescribeStackResource`` * Enable ASG CloudWatch metrics for the ASG managing compute nodes * Install Intel MPI 2019u4 on Amazon Linux, Centos 7 and Ubuntu 1604 * Upgrade Elastic Fabric Adapter (EFA) to version 1.4.1 that supports Intel MPI From c75d63d2e44c08369e58d3549111726d3bae8bac Mon Sep 17 00:00:00 2001 From: ddeidda Date: Wed, 31 Jul 2019 15:52:50 +0200 Subject: [PATCH 033/201] parameters check for integration tests Signed-off-by: ddeidda --- tests/integration-tests/test_runner.py | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/tests/integration-tests/test_runner.py b/tests/integration-tests/test_runner.py index b0c668ce64..8d3cd4053a 100644 --- a/tests/integration-tests/test_runner.py +++ b/tests/integration-tests/test_runner.py @@ -352,6 +352,17 @@ def _run_parallel(args): job.join() +def _check_args(args): + # If --cluster is set only one os, scheduler, instance type and region can be provided + if args.cluster: + if len(args.oss) > 1 or len(args.schedulers) > 1 or len(args.instances) > 1 or len(args.regions) > 1: + logger.error( + "when cluster option is specified, you can have a single value for oss, regions, instances " + "and schedulers and you need to make sure they match the cluster specific ones" + ) + exit(1) + + def _run_sequential(args): # Redirect stdout to file if not args.show_output: @@ -369,6 +380,7 @@ def main(): exit(1) args = _init_argparser().parse_args() + _check_args(args) logger.info("Starting tests with parameters {0}".format(args)) _make_logging_dirs(args.output_dir) From 5cfda604cccff7a0366f232fa4f89aa626c8bc4e Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Wed, 7 Aug 2019 15:54:51 +0200 Subject: [PATCH 034/201] Fixed a problem in the integration test for networking Signed-off-by: Matteo Fiordarancio --- .../integration-tests/tests/networking/test_networking.py | 8 ++++++++ 1 file changed, 8 insertions(+) diff --git a/tests/integration-tests/tests/networking/test_networking.py b/tests/integration-tests/tests/networking/test_networking.py index 3120f972b5..8e2b17cb3f 100644 --- a/tests/integration-tests/tests/networking/test_networking.py +++ b/tests/integration-tests/tests/networking/test_networking.py @@ -50,6 +50,7 @@ def vpc_stack(vpc_stacks, region): return vpc_stacks[region] +@pytest.mark.regions(["eu-central-1", "us-gov-east-1", "cn-northwest-1"]) def test_public_network_topology(region, vpc_stack, networking_stack_factory): ec2_client = boto3.client("ec2", region_name=region) vpc_id = vpc_stack.cfn_outputs["VpcId"] @@ -57,6 +58,9 @@ def test_public_network_topology(region, vpc_stack, networking_stack_factory): availability_zone = AVAILABILITY_ZONE_OVERRIDES.get(region, "") internet_gateway_id = vpc_stack.cfn_resources["InternetGateway"] + if isinstance(availability_zone, list): + availability_zone = availability_zone[0] + parameters = _get_cfn_parameters( availability_zone, internet_gateway_id=internet_gateway_id, vpc_id=vpc_id, public_cidr=public_subnet_cidr ) @@ -72,6 +76,7 @@ def test_public_network_topology(region, vpc_stack, networking_stack_factory): ) +@pytest.mark.regions(["eu-central-1", "us-gov-east-1", "cn-northwest-1"]) def test_public_private_network_topology(region, vpc_stack, networking_stack_factory): ec2_client = boto3.client("ec2", region_name=region) vpc_id = vpc_stack.cfn_outputs["VpcId"] @@ -80,6 +85,9 @@ def test_public_private_network_topology(region, vpc_stack, networking_stack_fac availability_zone = AVAILABILITY_ZONE_OVERRIDES.get(region, "") internet_gateway_id = vpc_stack.cfn_resources["InternetGateway"] + if isinstance(availability_zone, list): + availability_zone = availability_zone[0] + parameters = _get_cfn_parameters( availability_zone, internet_gateway_id=internet_gateway_id, From 3b16d05a02be831fec0d9afac726bc56efe7cd59 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 6 Aug 2019 14:17:36 -0700 Subject: [PATCH 035/201] Don't check placement group capacity if NONE or DYNAMIC Signed-off-by: Sean Smith --- cli/pcluster/cfnconfig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/pcluster/cfnconfig.py b/cli/pcluster/cfnconfig.py index 7bc2774427..d93d381884 100644 --- a/cli/pcluster/cfnconfig.py +++ b/cli/pcluster/cfnconfig.py @@ -386,7 +386,7 @@ def __check_account_capacity(self): # noqa: C901 ImageId=test_ami_id, SubnetId=subnet_id, Placement={"GroupName": self.parameters.get("PlacementGroup")} - if self.parameters.get("PlacementGroup") + if self.parameters.get("PlacementGroup") not in [None, "NONE", "DYNAMIC"] else {}, DryRun=True, ) From 0d4ccdf6cc934f32d03b29b7021da92fde2cf4a1 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 7 Aug 2019 10:14:24 -0700 Subject: [PATCH 036/201] Add note on IAM permissions Signed-off-by: Sean Smith --- CHANGELOG.rst | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index d4fec2bc32..476ca07807 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -17,7 +17,7 @@ x.x.x * Add support for ap-east-1 region (Hong Kong) * Add possibility to specify instance type to use when building custom AMIs with ``pcluster createami`` * Speed up cluster creation by having compute nodes starting together with master node. **Note** this requires one new IAM permissions in the `ParallelClusterInstancePolicy `_, ``cloudformation:DescribeStackResource`` -* Enable ASG CloudWatch metrics for the ASG managing compute nodes +* Enable ASG CloudWatch metrics for the ASG managing compute nodes. **Note** this requires two new IAM permissions in the `ParallelClusterUserPolicy `_, ``autoscaling:DisableMetricsCollection`` and ``autoscaling:EnableMetricsCollection`` * Install Intel MPI 2019u4 on Amazon Linux, Centos 7 and Ubuntu 1604 * Upgrade Elastic Fabric Adapter (EFA) to version 1.4.1 that supports Intel MPI * Run all node daemons and cookbook recipes in isolated Python virtualenvs. This allows our code to always run with the From c1b23adbeb5fc22f2c5cb3f303ab8f4a42bced90 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Thu, 8 Aug 2019 09:40:50 +0200 Subject: [PATCH 037/201] Wrote integration test code for networking better Signed-off-by: Matteo Fiordarancio --- .../tests/networking/test_networking.py | 11 +++-------- 1 file changed, 3 insertions(+), 8 deletions(-) diff --git a/tests/integration-tests/tests/networking/test_networking.py b/tests/integration-tests/tests/networking/test_networking.py index 8e2b17cb3f..fadef11496 100644 --- a/tests/integration-tests/tests/networking/test_networking.py +++ b/tests/integration-tests/tests/networking/test_networking.py @@ -10,6 +10,7 @@ # This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. # See the License for the specific language governing permissions and limitations under the License. import os +import random import boto3 import pytest @@ -55,12 +56,9 @@ def test_public_network_topology(region, vpc_stack, networking_stack_factory): ec2_client = boto3.client("ec2", region_name=region) vpc_id = vpc_stack.cfn_outputs["VpcId"] public_subnet_cidr = "10.0.3.0/24" - availability_zone = AVAILABILITY_ZONE_OVERRIDES.get(region, "") + availability_zone = random.choice(AVAILABILITY_ZONE_OVERRIDES.get(region, [""])) internet_gateway_id = vpc_stack.cfn_resources["InternetGateway"] - if isinstance(availability_zone, list): - availability_zone = availability_zone[0] - parameters = _get_cfn_parameters( availability_zone, internet_gateway_id=internet_gateway_id, vpc_id=vpc_id, public_cidr=public_subnet_cidr ) @@ -82,12 +80,9 @@ def test_public_private_network_topology(region, vpc_stack, networking_stack_fac vpc_id = vpc_stack.cfn_outputs["VpcId"] public_subnet_cidr = "10.0.5.0/24" private_subnet_cidr = "10.0.4.0/24" - availability_zone = AVAILABILITY_ZONE_OVERRIDES.get(region, "") + availability_zone = random.choice(AVAILABILITY_ZONE_OVERRIDES.get(region, [""])) internet_gateway_id = vpc_stack.cfn_resources["InternetGateway"] - if isinstance(availability_zone, list): - availability_zone = availability_zone[0] - parameters = _get_cfn_parameters( availability_zone, internet_gateway_id=internet_gateway_id, From 06e682e72e3b6aa35069877e471b81e88629c7a6 Mon Sep 17 00:00:00 2001 From: Luca Carrogu Date: Thu, 8 Aug 2019 16:06:52 +0200 Subject: [PATCH 038/201] Add missing policy autoScaling:SetInstanceHealth Policy used when reporting instance as unhealthy Signed-off-by: Luca Carrogu --- cli/pcluster/config_sanity.py | 1 + 1 file changed, 1 insertion(+) diff --git a/cli/pcluster/config_sanity.py b/cli/pcluster/config_sanity.py index 0e6866ed93..f51c94dac3 100644 --- a/cli/pcluster/config_sanity.py +++ b/cli/pcluster/config_sanity.py @@ -335,6 +335,7 @@ def validate(self, resource_type, resource_value): # noqa: C901 FIXME "autoscaling:SetDesiredCapacity", "autoscaling:DescribeTags", "autoScaling:UpdateAutoScalingGroup", + "autoScaling:SetInstanceHealth", ], "*", ), From 0c91206fd91f606fb5b467422ebd50f917a0d374 Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Tue, 2 Jul 2019 14:57:32 +0200 Subject: [PATCH 039/201] createami: add --cluster-template option Signed-off-by: Enrico Usai --- cli/pcluster/cli.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/cli/pcluster/cli.py b/cli/pcluster/cli.py index 4801fff544..96e045e536 100644 --- a/cli/pcluster/cli.py +++ b/cli/pcluster/cli.py @@ -335,6 +335,11 @@ def _get_parser(): help="Specifies the cookbook to use to build the AWS ParallelCluster AMI.", ) _addarg_config(pami) + pami.add_argument( + "-t", + "--cluster-template", + help="Specifies the cluster section of the ParallelCluster configuration file to retrieve VPC settings." + ) _addarg_region(pami) pami.set_defaults(template_url=None) pami.set_defaults(func=create_ami) From 694be72c6f95eb3103e6aa7954ed8164c303f531 Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Tue, 2 Jul 2019 15:54:04 +0200 Subject: [PATCH 040/201] createami: improve description of --cluster-template parameter Signed-off-by: Enrico Usai --- cli/pcluster/cli.py | 12 ++++++++---- 1 file changed, 8 insertions(+), 4 deletions(-) diff --git a/cli/pcluster/cli.py b/cli/pcluster/cli.py index 96e045e536..9a7e99ea6f 100644 --- a/cli/pcluster/cli.py +++ b/cli/pcluster/cli.py @@ -156,9 +156,11 @@ def _get_parser(): pcreate.add_argument( "-u", "--template-url", - help="Specifies the URL for a custom CloudFormation template, " "if it was used at creation time.", + help="Specifies the URL for a custom CloudFormation template, if it was used at creation time.", + ) + pcreate.add_argument( + "-t", "--cluster-template", help="Indicates the cluster section of the configuration file to use." ) - pcreate.add_argument("-t", "--cluster-template", help="Indicates which section of the cluster template to use.") pcreate.add_argument("-p", "--extra-parameters", type=json.loads, help="Adds extra parameters to the stack create.") pcreate.add_argument("-g", "--tags", type=json.loads, help="Specifies additional tags to be added to the stack.") pcreate.set_defaults(func=create) @@ -182,7 +184,9 @@ def _get_parser(): default=False, help="Disable CloudFormation stack rollback on error.", ) - pupdate.add_argument("-t", "--cluster-template", help="Indicates which section of the cluster template to use.") + pupdate.add_argument( + "-t", "--cluster-template", help="Indicates the cluster section of the configuration file to use." + ) pupdate.add_argument("-p", "--extra-parameters", help="Adds extra parameters to the stack update.") pupdate.add_argument( "-rd", @@ -338,7 +342,7 @@ def _get_parser(): pami.add_argument( "-t", "--cluster-template", - help="Specifies the cluster section of the ParallelCluster configuration file to retrieve VPC settings." + help="Specifies the cluster section of the configuration file to retrieve VPC settings.", ) _addarg_region(pami) pami.set_defaults(template_url=None) From 02b5b7c7a83223b1ddcef9c1c731903360c0f934 Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Wed, 3 Jul 2019 11:06:39 +0200 Subject: [PATCH 041/201] createami: add possibility to specify custom VPC and Subnet Signed-off-by: Enrico Usai --- cli/pcluster/cli.py | 12 +++++++++--- cli/pcluster/pcluster.py | 8 ++++---- 2 files changed, 13 insertions(+), 7 deletions(-) diff --git a/cli/pcluster/cli.py b/cli/pcluster/cli.py index 9a7e99ea6f..b6ba602634 100644 --- a/cli/pcluster/cli.py +++ b/cli/pcluster/cli.py @@ -159,7 +159,9 @@ def _get_parser(): help="Specifies the URL for a custom CloudFormation template, if it was used at creation time.", ) pcreate.add_argument( - "-t", "--cluster-template", help="Indicates the cluster section of the configuration file to use." + "-t", + "--cluster-template", + help="Indicates which section of the configuration file to use for cluster creation.", ) pcreate.add_argument("-p", "--extra-parameters", type=json.loads, help="Adds extra parameters to the stack create.") pcreate.add_argument("-g", "--tags", type=json.loads, help="Specifies additional tags to be added to the stack.") @@ -185,7 +187,7 @@ def _get_parser(): help="Disable CloudFormation stack rollback on error.", ) pupdate.add_argument( - "-t", "--cluster-template", help="Indicates the cluster section of the configuration file to use." + "-t", "--cluster-template", help="Indicates which section of the configuration file to use for cluster update." ) pupdate.add_argument("-p", "--extra-parameters", help="Adds extra parameters to the stack update.") pupdate.add_argument( @@ -339,11 +341,15 @@ def _get_parser(): help="Specifies the cookbook to use to build the AWS ParallelCluster AMI.", ) _addarg_config(pami) - pami.add_argument( + pami_group1 = pami.add_argument_group("Build AMI by using VPC settings from configuration file") + pami_group1.add_argument( "-t", "--cluster-template", help="Specifies the cluster section of the configuration file to retrieve VPC settings.", ) + pami_group2 = pami.add_argument_group("Build AMI in a custom VPC and Subnet") + pami_group2.add_argument("--vpc-id", help="Specifies the VPC to use to build the AWS ParallelCluster AMI.") + pami_group2.add_argument("--subnet-id", help="Specifies the Subnet to use to build the AWS ParallelCluster AMI.") _addarg_region(pami) pami.set_defaults(template_url=None) pami.set_defaults(func=create_ami) diff --git a/cli/pcluster/pcluster.py b/cli/pcluster/pcluster.py index 39341d3a05..77a40244f9 100644 --- a/cli/pcluster/pcluster.py +++ b/cli/pcluster/pcluster.py @@ -994,15 +994,15 @@ def create_ami(args): try: config = cfnconfig.ParallelClusterConfig(args) - vpc_id = config.parameters.get("VPCId") - master_subnet_id = config.parameters.get("MasterSubnetId") + vpc_id = args.vpc_id if args.vpc_id else config.parameters.get("VPCId") + subnet_id = args.subnet_id if args.subnet_id else config.parameters.get("MasterSubnetId") packer_env = { "CUSTOM_AMI_ID": args.base_ami_id, "AWS_FLAVOR_ID": instance_type, "AMI_NAME_PREFIX": args.custom_ami_name_prefix, "AWS_VPC_ID": vpc_id, - "AWS_SUBNET_ID": master_subnet_id, + "AWS_SUBNET_ID": subnet_id, } if config.aws_access_key_id: @@ -1015,7 +1015,7 @@ def create_ami(args): LOGGER.info("Instance Type: %s", instance_type) LOGGER.info("Region: %s", config.region) LOGGER.info("VPC ID: %s", vpc_id) - LOGGER.info("Subnet ID: %s", master_subnet_id) + LOGGER.info("Subnet ID: %s", subnet_id) tmp_dir = mkdtemp() cookbook_dir = get_cookbook_dir(config, tmp_dir) From d2c9eb311a3b9156dedc4ba23d5e96015db8c8c4 Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Tue, 14 May 2019 17:29:37 +0200 Subject: [PATCH 042/201] cli: show info message for new package version only with create command I'm changing warning to info and removing the message from all the commands but the create one Signed-off-by: Enrico Usai --- CHANGELOG.rst | 1 + cli/.flake8 | 1 + cli/pcluster/cfnconfig.py | 37 +++++++++++++++++-------------------- cli/pcluster/pcluster.py | 5 ++--- cli/pcluster/utils.py | 25 ++++++++++++++++++++++++- 5 files changed, 45 insertions(+), 24 deletions(-) diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 476ca07807..007285a488 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,7 @@ x.x.x **CHANGES** * Increase default EBS volume size from 17GB to 20GB +* Search for new available version only at ``pcluster create`` action 2.4.1 ===== diff --git a/cli/.flake8 b/cli/.flake8 index 6e505b5476..f64eaf16ef 100644 --- a/cli/.flake8 +++ b/cli/.flake8 @@ -17,6 +17,7 @@ per-file-ignores = pcluster/config_sanity.py: E402, D103 pcluster/easyconfig.py: E402, D103 pcluster/cfnconfig.py: E402, D103 + pcluster/utils.py: E402 tests/pcluster/pcluster-unittest.py: D101, D102, D103 tests/pcluster/configure/test_*.py: D101, D102, D103 tests/pcluster/test_*.py: D101, D102, D103 diff --git a/cli/pcluster/cfnconfig.py b/cli/pcluster/cfnconfig.py index d93d381884..8440b58eb6 100644 --- a/cli/pcluster/cfnconfig.py +++ b/cli/pcluster/cfnconfig.py @@ -18,19 +18,21 @@ import json import os import sys -import urllib.error -import urllib.parse -import urllib.request from builtins import object from collections import OrderedDict import boto3 import configparser -import pkg_resources from botocore.exceptions import ClientError from pcluster.config_sanity import ResourceValidator -from pcluster.utils import get_instance_vcpus, get_supported_features, get_templates_bucket_path +from pcluster.utils import ( + check_if_latest_version, + get_installed_version, + get_instance_vcpus, + get_supported_features, + get_templates_bucket_path, +) class ParallelClusterConfig(object): @@ -41,7 +43,7 @@ class ParallelClusterConfig(object): def __init__(self, args): self.args = args self.parameters = {} - self.version = pkg_resources.get_distribution("aws-parallelcluster").version + self.version = get_installed_version() # Initialize configuration attribute by parsing config file self.__config = self.__init_config() @@ -242,21 +244,16 @@ def __get_cluster_template(self): def __check_for_updates(self): """Check for updates, if required.""" - # verify if package updates should be checked - try: - update_check = self.__config.getboolean("global", "update_check") - except configparser.NoOptionError: - update_check = True - - if update_check is True: + args_func = self.args.func.__name__ + if args_func == "create": + # verify if package updates should be checked try: - latest = json.loads( - urllib.request.urlopen("https://pypi.python.org/pypi/aws-parallelcluster/json").read() - )["info"]["version"] - if self.version < latest: - print("warning: There is a newer version %s of AWS ParallelCluster available." % latest) - except Exception: - pass + update_check = self.__config.getboolean("global", "update_check") + except configparser.NoOptionError: + update_check = True + + if update_check is True: + check_if_latest_version() def __init_sanity_check(self): """ diff --git a/cli/pcluster/pcluster.py b/cli/pcluster/pcluster.py index 77a40244f9..4f387e7509 100644 --- a/cli/pcluster/pcluster.py +++ b/cli/pcluster/pcluster.py @@ -36,7 +36,7 @@ from botocore.exceptions import ClientError from tabulate import tabulate -from pcluster.utils import get_stack_output_value, verify_stack_creation +from pcluster.utils import get_installed_version, get_stack_output_value, verify_stack_creation from . import cfnconfig, utils @@ -67,8 +67,7 @@ def create_bucket_with_batch_resources(stack_name, aws_client_config, resources_ def version(): - pcluster_version = pkg_resources.get_distribution("aws-parallelcluster").version - return pcluster_version + return get_installed_version() def create(args): # noqa: C901 FIXME!!! diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index 9c17c354dd..b4329f1102 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -8,17 +8,23 @@ # or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES # OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and # limitations under the License. -from __future__ import absolute_import, print_function +# fmt: off +from __future__ import absolute_import, print_function # isort:skip +from future import standard_library # isort:skip +standard_library.install_aliases() +# fmt: on import json import logging import os import sys import time +import urllib.request import zipfile from io import BytesIO import boto3 +import pkg_resources from botocore.exceptions import ClientError LOGGER = logging.getLogger("pcluster.pcluster") @@ -260,3 +266,20 @@ def get_templates_bucket_path(aws_region_name): return "https://s3.{REGION}.amazonaws.com{S3_SUFFIX}/{REGION}-aws-parallelcluster/templates/".format( REGION=aws_region_name, S3_SUFFIX=s3_suffix ) + + +def get_installed_version(): + """Get the version of the installed aws-parallelcluster package.""" + return pkg_resources.get_distribution("aws-parallelcluster").version + + +def check_if_latest_version(): + """Check if the current package version is the latest one.""" + try: + latest = json.loads(urllib.request.urlopen("https://pypi.python.org/pypi/aws-parallelcluster/json").read())[ + "info" + ]["version"] + if get_installed_version() < latest: + print("Info: There is a newer version %s of AWS ParallelCluster available." % latest) + except Exception: + pass From 0c9b62c51bc7e76a2227703dd3efdc0aa0d7d90d Mon Sep 17 00:00:00 2001 From: ddeidda Date: Wed, 31 Jul 2019 15:52:50 +0200 Subject: [PATCH 043/201] python script to push cookbook files Signed-off-by: ddeidda --- util/upload-cookbook.py | 257 ++++++++++++++++++++++++++++++++++++++++ 1 file changed, 257 insertions(+) create mode 100755 util/upload-cookbook.py diff --git a/util/upload-cookbook.py b/util/upload-cookbook.py new file mode 100755 index 0000000000..7fdde76cf1 --- /dev/null +++ b/util/upload-cookbook.py @@ -0,0 +1,257 @@ +#!/usr/bin/python +# +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not +# use this file except in compliance with the License. A copy of the License +# is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. This file is +# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY +# KIND, express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +# +# Upload cookbook +# +# usage: ./upload-cookbook.py --regions "[,, ...]" --full_name "" \ +# --partition \ +# [--unsupportedregions "[, , ...]"] [--dryrun] [--override] \ +# [--credential ,,,]* + +import re +from datetime import datetime + +import argparse +import boto3 +from botocore.exceptions import ClientError + +_BACKUP_DIR = "backup" +_COOKBOOKS_DIR = "cookbooks" +_bck_date = datetime.now().strftime("%Y-%m-%d_%H-%M-%S") +_bck_error_array = set() +_cp_error_array = set() +_ls_error_array = set() +_credentials = [] +_main_region = None + + +def _get_all_aws_regions(region): + ec2 = boto3.client("ec2", region_name=region) + return set(sorted(r.get("RegionName") for r in ec2.describe_regions().get("Regions"))) + + +def _aws_s3_ls(s3, region, bucket_name, key): + out = s3.list_objects_v2(Bucket=bucket_name, Prefix=key) + if len(out.get("Contents", [])) > 0: + _ls_error_array.add(region) + + +def _aws_s3_bck(s3, args, region, bucket_name, full_name): + if args.dryrun: + print( + "Not backing up {0} to bucket {1} override is {2}, dryrun is {3}".format( + full_name, bucket_name, args.override, args.dryrun + ) + ) + else: + try: + copy_source = {"Bucket": bucket_name, "Key": _COOKBOOKS_DIR + "/" + full_name} + s3.copy(copy_source, bucket_name, _BACKUP_DIR + "/" + full_name + _bck_date) + except ClientError as e: + print("Couldn't backup {0}".format(full_name)) + if e.response["Error"]["Code"] == "NoSuchBucket": + print("Bucket is not present.") + _bck_error_array.add(region) + + +def _aws_s3_cp(s3, args, region, bucket_name, folder, full_name): + if args.dryrun: + print( + "Not uploading {0} to bucket {1}, override is {2}, dryrun is {3}".format( + full_name, bucket_name, args.override, args.dryrun + ) + ) + else: + key = folder + "/" + full_name + try: + s3.upload_file(full_name, bucket_name, key, ExtraArgs={"ACL": "public-read"}) + + print("Successfully uploaded {0} to s3://{1}/{2}".format(full_name, bucket_name, key)) + except ClientError as e: + print("Couldn't upload {0} to bucket s3://{1}/{2}".format(full_name, bucket_name, key)) + _cp_error_array.add(region) + if e.response["Error"]["Code"] == "NoSuchBucket": + print("Bucket is not present.") + + raise e + + +def _create_s3_client(region): + reg_credentials = [c for c in _credentials if c[0] == region] + + if reg_credentials: + credential = reg_credentials[0] + credential_region = credential[0] + credential_endpoint = credential[1] + credential_arn = credential[2] + credential_external_id = credential[3] + + try: + sts = boto3.client("sts", region_name=_main_region, endpoint_url=credential_endpoint) + + assumed_role_object = sts.assume_role( + RoleArn=credential_arn, + ExternalId=credential_external_id, + RoleSessionName=credential_region + "upload_cfn_templates_sts_session", + ) + aws_credentials = assumed_role_object["Credentials"] + s3 = boto3.client( + "s3", + region_name=credential_region, + aws_access_key_id=aws_credentials.get("AccessKeyId"), + aws_secret_access_key=aws_credentials.get("SecretAccessKey"), + aws_session_token=aws_credentials.get("SessionToken"), + ) + + except ClientError as e: + print("Warning: non authorized in region '{0}', skipping".format(credential_region)) + raise e + else: + s3 = boto3.client("s3") + return s3 + + +def _get_bucket_name(args, region): + return region + "-aws-parallelcluster" if not args.bucket else args.bucket + + +def _parse_args(): + global _credentials + global _main_region + parser = argparse.ArgumentParser(description="Uploads cookbook to S3") + + parser.add_argument( + "--regions", + type=str, + help='Valid Regions, can include "all", or comma separated list of regions', + required=True, + ) + parser.add_argument( + "--unsupportedregions", type=str, help="Unsupported regions, comma separated", default="", required=False + ) + parser.add_argument( + "--override", + action="store_true", + help="If override is false, the file will not be pushed if it already exists in the bucket", + default=False, + required=False, + ) + parser.add_argument( + "--bucket", type=str, help="Buckets to upload to, defaults to [region]-aws-parallelcluster", required=False + ) + parser.add_argument("--full_name", type=str, help="Full Name of bucket resource", required=True) + parser.add_argument( + "--dryrun", action="store_true", help="Doesn't push anything to S3, just outputs", default=False, required=False + ) + + parser.add_argument("--partition", type=str, help="commercial | china | govcloud", required=True) + parser.add_argument( + "--credential", + type=str, + action="append", + help="STS credential endpoint, in the format ,,,. Could be specified multiple times", + required=False, + ) + + args = parser.parse_args() + if args.partition == "commercial": + _main_region = "us-east-1" + elif args.partition == "govcloud": + _main_region = "us-gov-west-1" + elif args.partition == "china": + _main_region = "cn-north-1" + else: + print("Unsupported partition {0}".format(args.partition)) + exit(1) + + if args.credential: + _credentials = [ + tuple(credential_tuple.strip().split(",")) + for credential_tuple in args.credential + if credential_tuple.strip() + ] + + if args.regions == "all": + args.regions = _get_all_aws_regions(_main_region) + else: + args.regions = [x.strip() for x in args.regions.split(",")] + + args.unsupportedregions = [x.strip() for x in args.unsupportedregions.split(",")] + + # Purging regions + args.regions = set(args.regions) - set(args.unsupportedregions) + + # Adds all opt-in regions + for credential in _credentials: + args.regions.add(credential[0]) + + return args + + +def main(): + args = _parse_args() + for region in args.regions: + s3 = _create_s3_client(region) + bucket_name = _get_bucket_name(args, region) + + print("Listing cookbook for region ({0})".format(region)) + _aws_s3_ls(s3, region, bucket_name, _COOKBOOKS_DIR + "/" + args.full_name + ".tgz") + + if len(_ls_error_array) > 0 and not args.override: + print("We know the cookbook archives are already there, in this round we need to upload the .date files!") + print("Failed to push cookbook, already present for regions: {0} ".format(" ".join(_ls_error_array))) + exit(1) + elif len(_ls_error_array) > 0 and args.override: + print("Some or all of the cookbook archives are already there but OVERRIDE=true") + + for region in args.regions: + s3 = _create_s3_client(region) + bucket_name = _get_bucket_name(args, region) + + if args.override: + print("Backup cookbook for region: {0}".format(region)) + _aws_s3_bck(s3, args, region, bucket_name, args.full_name + ".tgz") + _aws_s3_bck(s3, args, region, bucket_name, args.full_name + ".md5") + _aws_s3_bck(s3, args, region, bucket_name, args.full_name + ".tgz.date") + + print("Pushing cookbook for region: {0}".format(region)) + _aws_s3_cp(s3, args, region, bucket_name, _COOKBOOKS_DIR, args.full_name + ".tgz") + _aws_s3_cp(s3, args, region, bucket_name, _COOKBOOKS_DIR, args.full_name + ".md5") + + if not args.dryrun: + # Stores LastModified info into .tgz.date file and uploads it back to bucket + with (open(args.full_name + ".tgz.date", "w+")) as f: + response = s3.head_object(Bucket=bucket_name, Key=_COOKBOOKS_DIR + "/" + args.full_name + ".tgz") + f.write(response.get("LastModified").strftime("%Y-%m-%d_%H-%M-%S")) + + _aws_s3_cp(s3, args, region, bucket_name, _COOKBOOKS_DIR, args.full_name + ".tgz.date") + else: + print( + "File {0}.{1} not stored to bucket {2} due to dryrun mode".format( + args.full_name, "tgz.date", bucket_name + ) + ) + + if len(_bck_error_array) > 0: + print("Failed to backup cookbook for region ({0})".format(" ".join(_bck_error_array))) + + if len(_cp_error_array) > 0: + print("Failed to push cookbook for region ({0})".format(" ".join(_cp_error_array))) + exit(1) + + +if __name__ == "__main__": + main() From e2bb284337534c9dc0cc2c11d6d740df2794fdc3 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 7 Aug 2019 10:58:36 -0700 Subject: [PATCH 044/201] Update issue templates Signed-off-by: Sean Smith --- .github/ISSUE_TEMPLATE/bug_report.md | 27 +++++++++++++++++++++++++++ 1 file changed, 27 insertions(+) create mode 100644 .github/ISSUE_TEMPLATE/bug_report.md diff --git a/.github/ISSUE_TEMPLATE/bug_report.md b/.github/ISSUE_TEMPLATE/bug_report.md new file mode 100644 index 0000000000..6dbbf8b6f7 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug_report.md @@ -0,0 +1,27 @@ +--- +name: Bug report +about: Please create a detailed report by completing the following information +title: '' +labels: '' +assignees: '' + +--- + +**Environment:** + - AWS ParallelCluster / CfnCluster version [e.g. aws-parallelcluster-2.4.1] + - OS: [e.g. alinux] + - Scheduler: [e.g. SGE] + - Master instance type: [e.g. m5.xlarge] + - Compute instance type: [e.g. c5.8xlarge] + +**Bug description and how to reproduce:** +A clear and concise description of what the bug is and the steps to reproduce the behavior. + +**Additional context:** +Any other context about the problem. E.g.: + - configuration file without any credentials or personal data. + - pre/post-install scripts, if any + - screenshots, if useful + - if the cluster fails creation, please re-execute `create` action using `--norollback` option and attach `/var/log/cfn-init.log`, `/var/log/cloud-init.log` and `/var/log/cloud-init-output.log` files from the Master node + - if a compute node was terminated due to failure, there will be a directory `/home/logs/compute`. Attach one of the `instance-id.tar.gz` from that directory + - if you encounter scaling problems please attach `/var/log/nodewatcher` from the Compute node and `/var/log/jobwatcher` and `/var/log/sqswatcher` from the Master node From 4b2d8fccffbffc61c956aa695324637fa8736414 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Tue, 13 Aug 2019 11:53:40 +0200 Subject: [PATCH 045/201] Fixed wrong dir name Signed-off-by: Matteo Fiordarancio --- cli/tox.ini | 8 ++++---- .../public-private.cfn.json | 0 .../public.cfn.json | 0 .../integration-tests/tests/networking/test_networking.py | 4 ++-- 4 files changed, 6 insertions(+), 6 deletions(-) rename cloudformation/{networking_configuration => networking}/public-private.cfn.json (100%) rename cloudformation/{networking_configuration => networking}/public.cfn.json (100%) diff --git a/cli/tox.ini b/cli/tox.ini index 5a6e1e082b..2211e17985 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -217,8 +217,8 @@ commands = cfn-lint ebs-substack.cfn.json cfn-lint efs-substack.cfn.json cfn-lint raid-substack.cfn.json - cfn-lint -iW2508 networking_configuration/public.cfn.json - cfn-lint -iW2508 networking_configuration/public-private.cfn.json + cfn-lint -iW2508 networking/public.cfn.json + cfn-lint -iW2508 networking/public-private.cfn.json # Validates that cfn json templates are correctly formatted. [testenv:cfn-format-check] @@ -228,7 +228,7 @@ changedir = ../cloudformation commands = python utils/json_formatter.py -c *.cfn.json - python utils/json_formatter.py -c networking_configuration/*.cfn.json + python utils/json_formatter.py -c networking/*.cfn.json # Formats all cfn.json files. [testenv:cfn-format] @@ -238,7 +238,7 @@ changedir = ../cloudformation commands = python utils/json_formatter.py *.cfn.json - python utils/json_formatter.py networking_configuration/*.cfn.json + python utils/json_formatter.py networking/*.cfn.json # Runs tests for cfn templates. [testenv:cfn-tests] diff --git a/cloudformation/networking_configuration/public-private.cfn.json b/cloudformation/networking/public-private.cfn.json similarity index 100% rename from cloudformation/networking_configuration/public-private.cfn.json rename to cloudformation/networking/public-private.cfn.json diff --git a/cloudformation/networking_configuration/public.cfn.json b/cloudformation/networking/public.cfn.json similarity index 100% rename from cloudformation/networking_configuration/public.cfn.json rename to cloudformation/networking/public.cfn.json diff --git a/tests/integration-tests/tests/networking/test_networking.py b/tests/integration-tests/tests/networking/test_networking.py index fadef11496..0ce144e43b 100644 --- a/tests/integration-tests/tests/networking/test_networking.py +++ b/tests/integration-tests/tests/networking/test_networking.py @@ -62,7 +62,7 @@ def test_public_network_topology(region, vpc_stack, networking_stack_factory): parameters = _get_cfn_parameters( availability_zone, internet_gateway_id=internet_gateway_id, vpc_id=vpc_id, public_cidr=public_subnet_cidr ) - path = os.path.join("..", "..", "cloudformation", "networking_configuration", "public.cfn.json") + path = os.path.join("..", "..", "cloudformation", "networking", "public.cfn.json") stack = networking_stack_factory(region, path, parameters) public_subnet_id = stack.cfn_outputs["PublicSubnetId"] @@ -90,7 +90,7 @@ def test_public_private_network_topology(region, vpc_stack, networking_stack_fac public_cidr=public_subnet_cidr, private_cidr=private_subnet_cidr, ) - path = os.path.join("..", "..", "cloudformation", "networking_configuration", "public-private.cfn.json") + path = os.path.join("..", "..", "cloudformation", "networking", "public-private.cfn.json") stack = networking_stack_factory(region, path, parameters) public_subnet_id = stack.cfn_outputs["PublicSubnetId"] From de343c29496b2582aefa0815be3520bdcf477364 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Fri, 9 Aug 2019 10:53:31 +0200 Subject: [PATCH 046/201] integ tests: test overscaling when locked nodes in scheduler Signed-off-by: Francesco De Martino --- .../tests/common/assertions.py | 21 ++++++--- .../tests/common/schedulers_common.py | 25 +++++++++++ .../tests/schedulers/common.py | 44 +++++++++++++++++++ .../tests/schedulers/test_sge.py | 4 ++ .../tests/schedulers/test_slurm.py | 4 ++ .../tests/schedulers/test_torque.py | 4 ++ 6 files changed, 97 insertions(+), 5 deletions(-) create mode 100644 tests/integration-tests/tests/schedulers/common.py diff --git a/tests/integration-tests/tests/common/assertions.py b/tests/integration-tests/tests/common/assertions.py index 7074da1f2d..f296440c1c 100644 --- a/tests/integration-tests/tests/common/assertions.py +++ b/tests/integration-tests/tests/common/assertions.py @@ -41,7 +41,16 @@ def assert_no_errors_in_logs(remote_command_executor, log_files): assert_that(log).does_not_contain(error_level) -def assert_scaling_worked(scheduler_commands, region, stack_name, scaledown_idletime, expected_max, expected_final): +def assert_scaling_worked( + scheduler_commands, + region, + stack_name, + scaledown_idletime, + expected_max, + expected_final, + assert_asg=True, + assert_scheduler=True, +): jobs_execution_time = 1 estimated_scaleup_time = 5 max_scaledown_time = 10 @@ -54,7 +63,9 @@ def assert_scaling_worked(scheduler_commands, region, stack_name, scaledown_idle + minutes(estimated_scaleup_time) + minutes(max_scaledown_time), ) - assert_that(max(asg_capacity_time_series)).is_equal_to(expected_max) - assert_that(max(compute_nodes_time_series)).is_equal_to(expected_max) - assert_that(asg_capacity_time_series[-1]).is_equal_to(expected_final) - assert_that(compute_nodes_time_series[-1]).is_equal_to(expected_final) + if assert_asg: + assert_that(max(asg_capacity_time_series)).is_equal_to(expected_max) + assert_that(asg_capacity_time_series[-1]).is_equal_to(expected_final) + if assert_scheduler: + assert_that(max(compute_nodes_time_series)).is_equal_to(expected_max) + assert_that(compute_nodes_time_series[-1]).is_equal_to(expected_final) diff --git a/tests/integration-tests/tests/common/schedulers_common.py b/tests/integration-tests/tests/common/schedulers_common.py index 9b581693e4..2e4291d53e 100644 --- a/tests/integration-tests/tests/common/schedulers_common.py +++ b/tests/integration-tests/tests/common/schedulers_common.py @@ -95,6 +95,11 @@ def get_compute_nodes(self): """Retrieve the list of compute nodes attached to the scheduler.""" pass + @abstractmethod + def wait_for_locked_node(self): + """Wait for at least one node to be locked.""" + pass + class AWSBatchCommands(SchedulerCommands): """Implement commands for awsbatch scheduler.""" @@ -138,6 +143,9 @@ def compute_nodes_count(self): # noqa: D102 def get_compute_nodes(self): # noqa: D102 raise NotImplementedError + def wait_for_locked_node(self): # noqa: D102 + raise NotImplementedError + class SgeCommands(SchedulerCommands): """Implement commands for sge scheduler.""" @@ -208,6 +216,14 @@ def get_compute_nodes(self): # noqa: D102 result = self._remote_command_executor.run_remote_command("qhost | grep ip- | awk '{print $1}'") return result.stdout.splitlines() + @retry( + retry_on_result=lambda result: "d" not in result, + wait_fixed=seconds(3), + stop_max_delay=minutes(5), + ) + def wait_for_locked_node(self): # noqa: D102 + return self._remote_command_executor.run_remote_command("qstat -f -xml").stdout + class SlurmCommands(SchedulerCommands): """Implement commands for slurm scheduler.""" @@ -282,6 +298,10 @@ def get_compute_nodes(self): # noqa: D102 ) return result.stdout.splitlines() + @retry(retry_on_result=lambda result: "drain" not in result, wait_fixed=seconds(3), stop_max_delay=minutes(5)) + def wait_for_locked_node(self): # noqa: D102 + return self._remote_command_executor.run_remote_command("/opt/slurm/bin/sinfo -h -o '%t'").stdout + class TorqueCommands(SchedulerCommands): """Implement commands for torque scheduler.""" @@ -347,6 +367,11 @@ def get_compute_nodes(self): # noqa: D102 ) return result.stdout.splitlines() + @retry(retry_on_result=lambda result: "offline" not in result, wait_fixed=seconds(5), stop_max_delay=minutes(5)) + def wait_for_locked_node(self): # noqa: D102 + # discard the first node since that is the master server + return self._remote_command_executor.run_remote_command(r'pbsnodes | grep -e "\sstate = " | tail -n +2').stdout + def get_scheduler_commands(scheduler, remote_command_executor): scheduler_commands = { diff --git a/tests/integration-tests/tests/schedulers/common.py b/tests/integration-tests/tests/schedulers/common.py new file mode 100644 index 0000000000..59f73df734 --- /dev/null +++ b/tests/integration-tests/tests/schedulers/common.py @@ -0,0 +1,44 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import logging + +from assertpy import assert_that +from tests.common.assertions import assert_scaling_worked +from tests.common.schedulers_common import get_scheduler_commands + + +def test_overscaling_when_job_submitted_during_scaledown( + remote_command_executor, scheduler, region, stack_name, scaledown_idletime +): + """Test that if a job gets submitted when a node is locked the cluster does not overscale""" + logging.info("Testing cluster does not overscale when a job is submitted and a node is being terminated.") + scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor) + if scheduler_commands.compute_nodes_count() == 0: + result = scheduler_commands.submit_command("sleep 1") + job_id = scheduler_commands.assert_job_submitted(result.stdout) + scheduler_commands.wait_job_completed(job_id) + assert_that(scheduler_commands.compute_nodes_count()).is_equal_to(1) + + scheduler_commands.wait_for_locked_node() + + result = scheduler_commands.submit_command("sleep 1") + scheduler_commands.assert_job_submitted(result.stdout) + # do not check scheduler scaling but only ASG. + assert_scaling_worked( + scheduler_commands, + region, + stack_name, + scaledown_idletime, + expected_max=1, + expected_final=0, + assert_scheduler=False, + ) diff --git a/tests/integration-tests/tests/schedulers/test_sge.py b/tests/integration-tests/tests/schedulers/test_sge.py index f56cc2c1b7..0271bd2391 100644 --- a/tests/integration-tests/tests/schedulers/test_sge.py +++ b/tests/integration-tests/tests/schedulers/test_sge.py @@ -19,6 +19,7 @@ from remote_command_executor import RemoteCommandExecutor from tests.common.assertions import assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import SgeCommands +from tests.schedulers.common import test_overscaling_when_job_submitted_during_scaledown @pytest.mark.regions(["ap-southeast-1"]) @@ -42,6 +43,9 @@ def test_sge(region, pcluster_config_reader, clusters_factory): _test_non_runnable_jobs(remote_command_executor, max_queue_size, max_slots, region, cluster, scaledown_idletime) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_slots) + test_overscaling_when_job_submitted_during_scaledown( + remote_command_executor, "sge", region, cluster.cfn_name, scaledown_idletime + ) # TODO: _test_dynamic_max_cluster_size assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"]) diff --git a/tests/integration-tests/tests/schedulers/test_slurm.py b/tests/integration-tests/tests/schedulers/test_slurm.py index e8bd989502..668eb4580b 100644 --- a/tests/integration-tests/tests/schedulers/test_slurm.py +++ b/tests/integration-tests/tests/schedulers/test_slurm.py @@ -19,6 +19,7 @@ from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor from tests.common.assertions import assert_asg_desired_capacity, assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import SlurmCommands +from tests.schedulers.common import test_overscaling_when_job_submitted_during_scaledown @pytest.mark.regions(["us-west-1"]) @@ -42,6 +43,9 @@ def test_slurm(region, pcluster_config_reader, clusters_factory): _test_cluster_limits(remote_command_executor, max_queue_size, region, cluster.asg) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_queue_size) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) + test_overscaling_when_job_submitted_during_scaledown( + remote_command_executor, "slurm", region, cluster.cfn_name, scaledown_idletime + ) _test_dynamic_dummy_nodes(remote_command_executor, max_queue_size) assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"]) diff --git a/tests/integration-tests/tests/schedulers/test_torque.py b/tests/integration-tests/tests/schedulers/test_torque.py index d244f13bca..02bc2e187b 100644 --- a/tests/integration-tests/tests/schedulers/test_torque.py +++ b/tests/integration-tests/tests/schedulers/test_torque.py @@ -20,6 +20,7 @@ from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor from tests.common.assertions import assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import TorqueCommands +from tests.schedulers.common import test_overscaling_when_job_submitted_during_scaledown @pytest.mark.regions(["us-west-2"]) @@ -44,6 +45,9 @@ def test_torque(region, pcluster_config_reader, clusters_factory): _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_slots) _test_dynamic_cluster_limits(remote_command_executor, max_queue_size, max_slots, region, cluster.asg) + test_overscaling_when_job_submitted_during_scaledown( + remote_command_executor, "torque", region, cluster.cfn_name, scaledown_idletime + ) assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"]) From 100f5190b8977585b12245f1fbd31c52d0e86e5f Mon Sep 17 00:00:00 2001 From: Mili Shah Date: Mon, 19 Aug 2019 14:58:39 -0700 Subject: [PATCH 047/201] Check S3 object existence/permission --- cli/pcluster/config_sanity.py | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/cli/pcluster/config_sanity.py b/cli/pcluster/config_sanity.py index f51c94dac3..c03213a7ae 100644 --- a/cli/pcluster/config_sanity.py +++ b/cli/pcluster/config_sanity.py @@ -14,6 +14,7 @@ standard_library.install_aliases() # fmt: on +import re import sys import urllib.error import urllib.parse @@ -451,7 +452,21 @@ def validate(self, resource_type, resource_value): # noqa: C901 FIXME elif resource_type == "URL": scheme = urlparse(resource_value).scheme if scheme == "s3": - pass + try: + s3 = boto3.client( + "s3", + region_name=self.region, + aws_access_key_id=self.aws_access_key_id, + aws_secret_access_key=self.aws_secret_access_key, + ) + m = re.match(r"s3://(\w*)/(.*)", resource_value) + bucket, key = m.group(1), m.group(2) + s3.head_object(Bucket=bucket, Key=key) + except ClientError: + self.__fail( + resource_type, + "S3 object {0} does not exist or you do not have access to it.".format(resource_value), + ) else: try: urllib.request.urlopen(resource_value) From a0eb85336483565805df73981f1f3d1d9ab61221 Mon Sep 17 00:00:00 2001 From: Matteo Fiordarancio Date: Wed, 21 Aug 2019 14:51:16 +0200 Subject: [PATCH 048/201] Fixed the bug same subnet for both master and compute --> fail Signed-off-by: Matteo Fiordarancio --- cloudformation/aws-parallelcluster.cfn.json | 26 ++++++++++++++++----- 1 file changed, 20 insertions(+), 6 deletions(-) diff --git a/cloudformation/aws-parallelcluster.cfn.json b/cloudformation/aws-parallelcluster.cfn.json index 558adfbb49..52c9160bcc 100644 --- a/cloudformation/aws-parallelcluster.cfn.json +++ b/cloudformation/aws-parallelcluster.cfn.json @@ -854,21 +854,35 @@ ] }, "UseMasterSubnetForCompute": { - "Fn::And": [ + "Fn::Or": [ { - "Fn::Equals": [ + "Fn::And": [ { - "Ref": "ComputeSubnetId" + "Fn::Equals": [ + { + "Ref": "ComputeSubnetId" + }, + "NONE" + ] }, - "NONE" + { + "Fn::Equals": [ + { + "Ref": "ComputeSubnetCidr" + }, + "NONE" + ] + } ] }, { "Fn::Equals": [ { - "Ref": "ComputeSubnetCidr" + "Ref": "ComputeSubnetId" }, - "NONE" + { + "Ref": "MasterSubnetId" + } ] } ] From 25434db165e189734c837fd91e9668e4fcae98c4 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 21 Aug 2019 09:24:13 -0700 Subject: [PATCH 049/201] Support inline comments on Config See https://docs.python.org/3/library/configparser.html#customizing-parser-behaviour ```ini [cluster test] key_name = test ; here is a comment ``` ```bash pcluster create test Beginning cluster creation for cluster: test Config sanity error on resource EC2KeyPair: The key pair 'test ; here is a comment' does not exist ``` ```ini [cluster test] key_name = test ; here is a comment ``` ```bash pcluster create test Beginning cluster creation for cluster: efa-test ... ``` Signed-off-by: Sean Smith --- cli/pcluster/cfnconfig.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/pcluster/cfnconfig.py b/cli/pcluster/cfnconfig.py index 8440b58eb6..1f79a356fe 100644 --- a/cli/pcluster/cfnconfig.py +++ b/cli/pcluster/cfnconfig.py @@ -156,7 +156,7 @@ def __init_config(self): else: self.__fail("Config file %s not found" % config_file) - config = configparser.ConfigParser() + config = configparser.ConfigParser(inline_comment_prefixes=(";", "#")) config.read(config_file) return config From 4a26d5c304b78dfeabd5fea0b2df926331132938 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Tue, 13 Aug 2019 15:59:17 -0700 Subject: [PATCH 050/201] Check for InvalidAccessKeyId in get_supported_features Signed-off-by: Sean Smith --- cli/pcluster/utils.py | 10 ++++++++-- 1 file changed, 8 insertions(+), 2 deletions(-) diff --git a/cli/pcluster/utils.py b/cli/pcluster/utils.py index b4329f1102..2d56f7ebe1 100644 --- a/cli/pcluster/utils.py +++ b/cli/pcluster/utils.py @@ -166,9 +166,15 @@ def get_supported_features(region, feature): try: features = _get_json_from_s3(region, "features/feature_whitelist.json") supported_features = features.get("Features").get(feature) - except (ValueError, ClientError, KeyError): + except (ValueError, ClientError, KeyError) as e: + if type(e) is ClientError: + code = e.response.get("Error").get("Code") + if code == "InvalidAccessKeyId": + print(e.response.get("Error").get("Message")) + exit(1) print( - "Failed validate %s. This is probably a bug on our end. Please set sanity_check = false and retry" % feature + "Failed validate {0}. This is probably a bug on our end. Please submit an issue " + "https://github.com/aws/aws-parallelcluster/issues/new/choose".format(feature) ) exit(1) From 5d3cdd6ef2937746cf33b83056f01766a233c2f5 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Fri, 23 Aug 2019 11:57:19 -0700 Subject: [PATCH 051/201] Change generate_json_report to parse root tag On August 15th, `pytest` released version 5.1.0 https://pypi.org/project/pytest/#history This version adds a new root `` tag. See https://github.com/pytest-dev/pytest/issues/5477 ```xml ... ``` ```xml ... ``` This patch parses that tag properly. Signed-off-by: Sean Smith --- tests/integration-tests/reports_generator.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/tests/integration-tests/reports_generator.py b/tests/integration-tests/reports_generator.py index 69aa37d7e2..4448d5a478 100644 --- a/tests/integration-tests/reports_generator.py +++ b/tests/integration-tests/reports_generator.py @@ -76,7 +76,7 @@ def generate_json_report(test_results_dir, save_to_file=True): result_to_label_mapping = {"skipped": "skipped", "failure": "failures", "error": "errors"} results = {"all": _empty_results_dict()} xml = untangle.parse(test_report_file) - for testcase in xml.testsuite.children: + for testcase in xml.testsuites.testsuite.children: label = "succeeded" for key, value in result_to_label_mapping.items(): if hasattr(testcase, key): From 7f13aed7ae0c766af13109c01416943e582c6400 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Mon, 26 Aug 2019 09:02:39 -0700 Subject: [PATCH 052/201] Take into account multiple testsuites Signed-off-by: Sean Smith --- tests/integration-tests/reports_generator.py | 31 ++++++++++---------- 1 file changed, 16 insertions(+), 15 deletions(-) diff --git a/tests/integration-tests/reports_generator.py b/tests/integration-tests/reports_generator.py index 4448d5a478..abc912dea4 100644 --- a/tests/integration-tests/reports_generator.py +++ b/tests/integration-tests/reports_generator.py @@ -76,21 +76,22 @@ def generate_json_report(test_results_dir, save_to_file=True): result_to_label_mapping = {"skipped": "skipped", "failure": "failures", "error": "errors"} results = {"all": _empty_results_dict()} xml = untangle.parse(test_report_file) - for testcase in xml.testsuites.testsuite.children: - label = "succeeded" - for key, value in result_to_label_mapping.items(): - if hasattr(testcase, key): - label = value - break - results["all"][label] += 1 - results["all"]["total"] += 1 - - if hasattr(testcase, "properties"): - for property in testcase.properties.children: - _record_result(results, property["name"], property["value"], label) - - feature = re.sub(r"test_|_test|.py", "", os.path.splitext(os.path.basename(testcase["file"]))[0]) - _record_result(results, "feature", feature, label) + for testsuite in xml.testsuites.children: + for testcase in testsuite.children: + label = "succeeded" + for key, value in result_to_label_mapping.items(): + if hasattr(testcase, key): + label = value + break + results["all"][label] += 1 + results["all"]["total"] += 1 + + if hasattr(testcase, "properties"): + for property in testcase.properties.children: + _record_result(results, property["name"], property["value"], label) + + feature = re.sub(r"test_|_test|.py", "", os.path.splitext(os.path.basename(testcase["file"]))[0]) + _record_result(results, "feature", feature, label) if save_to_file: with open("{0}/test_report.json".format(test_results_dir), "w") as out_f: From 0b39db132f6ddc08d99f6f9c9749b5c28566d5c3 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Fri, 23 Aug 2019 11:07:39 -0700 Subject: [PATCH 053/201] Only Check for valid instance type if sanity_check = true Signed-off-by: Sean Smith --- cli/pcluster/cfnconfig.py | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/cli/pcluster/cfnconfig.py b/cli/pcluster/cfnconfig.py index 1f79a356fe..487f9dd1d5 100644 --- a/cli/pcluster/cfnconfig.py +++ b/cli/pcluster/cfnconfig.py @@ -537,7 +537,11 @@ def __init_efa_parameters(self): supported_features = get_supported_features(self.region, "efa") valid_instances = supported_features.get("instances") - self.__validate_instance("EFA", self.parameters.get("ComputeInstanceType"), valid_instances) + # validate instance type only when sanity_check = true + # This relies on a file in S3, which could be out of date, in which case the customer can set + # sanity_check = false + if self.__sanity_check: + self.__validate_instance("EFA", self.parameters.get("ComputeInstanceType"), valid_instances) self.__validate_os("EFA", self.__get_os(), ["alinux", "centos7", "ubuntu1604"]) self.__validate_scheduler("EFA", self.__get_scheduler(), ["sge", "slurm", "torque"]) self.__validate_resource("EFA", self.parameters) From 191680878c3f4be0993a32751aedf1c04e152cad Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Wed, 28 Aug 2019 12:03:54 +0200 Subject: [PATCH 054/201] Disable bugged rule for cfn-python-lint https://github.com/awslabs/cfn-python-lint/issues/564 Signed-off-by: Enrico Usai --- cli/tox.ini | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/cli/tox.ini b/cli/tox.ini index 2211e17985..b86764c1e3 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -211,9 +211,10 @@ deps = cfn-lint # W2507 disabled since we want to have nullable String type parameters # E2523 disabled since we have both a Launch Template and Launch Configuration # W2508 disabled since we don't want to always specify Availability Zone +# iE3008 disabled because of https://github.com/awslabs/cfn-python-lint/issues/564 commands = cfn-lint -iE2504 -iW2507 -iE2523 aws-parallelcluster.cfn.json - cfn-lint batch-substack.cfn.json + cfn-lint -iE3008 batch-substack.cfn.json cfn-lint ebs-substack.cfn.json cfn-lint efs-substack.cfn.json cfn-lint raid-substack.cfn.json From 5fb0adfc20c647209ebce6221192989e802644a6 Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Wed, 28 Aug 2019 11:20:30 +0200 Subject: [PATCH 055/201] Change prefix of the overscaling common function from test_ to assert_ It is required to avoid that the common function is executed from the integration test framework. Signed-off-by: Enrico Usai --- tests/integration-tests/tests/schedulers/common.py | 2 +- tests/integration-tests/tests/schedulers/test_sge.py | 4 ++-- tests/integration-tests/tests/schedulers/test_slurm.py | 4 ++-- tests/integration-tests/tests/schedulers/test_torque.py | 4 ++-- 4 files changed, 7 insertions(+), 7 deletions(-) diff --git a/tests/integration-tests/tests/schedulers/common.py b/tests/integration-tests/tests/schedulers/common.py index 59f73df734..03b3e5f99c 100644 --- a/tests/integration-tests/tests/schedulers/common.py +++ b/tests/integration-tests/tests/schedulers/common.py @@ -16,7 +16,7 @@ from tests.common.schedulers_common import get_scheduler_commands -def test_overscaling_when_job_submitted_during_scaledown( +def assert_overscaling_when_job_submitted_during_scaledown( remote_command_executor, scheduler, region, stack_name, scaledown_idletime ): """Test that if a job gets submitted when a node is locked the cluster does not overscale""" diff --git a/tests/integration-tests/tests/schedulers/test_sge.py b/tests/integration-tests/tests/schedulers/test_sge.py index 0271bd2391..1712bb5671 100644 --- a/tests/integration-tests/tests/schedulers/test_sge.py +++ b/tests/integration-tests/tests/schedulers/test_sge.py @@ -19,7 +19,7 @@ from remote_command_executor import RemoteCommandExecutor from tests.common.assertions import assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import SgeCommands -from tests.schedulers.common import test_overscaling_when_job_submitted_during_scaledown +from tests.schedulers.common import assert_overscaling_when_job_submitted_during_scaledown @pytest.mark.regions(["ap-southeast-1"]) @@ -43,7 +43,7 @@ def test_sge(region, pcluster_config_reader, clusters_factory): _test_non_runnable_jobs(remote_command_executor, max_queue_size, max_slots, region, cluster, scaledown_idletime) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_slots) - test_overscaling_when_job_submitted_during_scaledown( + assert_overscaling_when_job_submitted_during_scaledown( remote_command_executor, "sge", region, cluster.cfn_name, scaledown_idletime ) # TODO: _test_dynamic_max_cluster_size diff --git a/tests/integration-tests/tests/schedulers/test_slurm.py b/tests/integration-tests/tests/schedulers/test_slurm.py index 668eb4580b..3beb92bbe3 100644 --- a/tests/integration-tests/tests/schedulers/test_slurm.py +++ b/tests/integration-tests/tests/schedulers/test_slurm.py @@ -19,7 +19,7 @@ from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor from tests.common.assertions import assert_asg_desired_capacity, assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import SlurmCommands -from tests.schedulers.common import test_overscaling_when_job_submitted_during_scaledown +from tests.schedulers.common import assert_overscaling_when_job_submitted_during_scaledown @pytest.mark.regions(["us-west-1"]) @@ -43,7 +43,7 @@ def test_slurm(region, pcluster_config_reader, clusters_factory): _test_cluster_limits(remote_command_executor, max_queue_size, region, cluster.asg) _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_queue_size) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) - test_overscaling_when_job_submitted_during_scaledown( + assert_overscaling_when_job_submitted_during_scaledown( remote_command_executor, "slurm", region, cluster.cfn_name, scaledown_idletime ) _test_dynamic_dummy_nodes(remote_command_executor, max_queue_size) diff --git a/tests/integration-tests/tests/schedulers/test_torque.py b/tests/integration-tests/tests/schedulers/test_torque.py index 02bc2e187b..d723937ca8 100644 --- a/tests/integration-tests/tests/schedulers/test_torque.py +++ b/tests/integration-tests/tests/schedulers/test_torque.py @@ -20,7 +20,7 @@ from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor from tests.common.assertions import assert_no_errors_in_logs, assert_scaling_worked from tests.common.schedulers_common import TorqueCommands -from tests.schedulers.common import test_overscaling_when_job_submitted_during_scaledown +from tests.schedulers.common import assert_overscaling_when_job_submitted_during_scaledown @pytest.mark.regions(["us-west-2"]) @@ -45,7 +45,7 @@ def test_torque(region, pcluster_config_reader, clusters_factory): _test_job_dependencies(remote_command_executor, region, cluster.cfn_name, scaledown_idletime) _test_job_arrays_and_parallel_jobs(remote_command_executor, region, cluster.cfn_name, scaledown_idletime, max_slots) _test_dynamic_cluster_limits(remote_command_executor, max_queue_size, max_slots, region, cluster.asg) - test_overscaling_when_job_submitted_during_scaledown( + assert_overscaling_when_job_submitted_during_scaledown( remote_command_executor, "torque", region, cluster.cfn_name, scaledown_idletime ) From 0b12996bd42dcd1b3afd1068246491b0b8ed79c8 Mon Sep 17 00:00:00 2001 From: Sean Smith Date: Wed, 28 Aug 2019 19:28:06 -0700 Subject: [PATCH 056/201] Intel HPC Platform Spec Integration Tests Signed-off-by: Sean Smith --- .../remote_command_executor.py | 2 +- tests/integration-tests/tests/common/utils.py | 2 +- tests/integration-tests/tests/efa/test_efa.py | 4 +- .../tests/intel_hpc/__init__.py | 10 ++ .../tests/intel_hpc/test_intel_hpc.py | 83 +++++++++++ .../test_intel_hpc/test_intel_hpc/clck.xml | 133 ++++++++++++++++++ .../test_intel_hpc/install_clck.sh | 7 + .../test_intel_hpc/install_clck_compute.sh | 6 + .../test_intel_hpc/pcluster.config.ini | 27 ++++ .../test_intel_hpc/test_intel_hpc/run_clck.sh | 6 + .../tests/scaling/test_mpi.py | 4 +- 11 files changed, 278 insertions(+), 6 deletions(-) create mode 100644 tests/integration-tests/tests/intel_hpc/__init__.py create mode 100644 tests/integration-tests/tests/intel_hpc/test_intel_hpc.py create mode 100644 tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/clck.xml create mode 100644 tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck.sh create mode 100644 tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck_compute.sh create mode 100644 tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/pcluster.config.ini create mode 100644 tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/run_clck.sh diff --git a/tests/integration-tests/remote_command_executor.py b/tests/integration-tests/remote_command_executor.py index 4349c09140..db325a05a1 100644 --- a/tests/integration-tests/remote_command_executor.py +++ b/tests/integration-tests/remote_command_executor.py @@ -92,7 +92,7 @@ def run_remote_script(self, script_file, args=None, log_error=True, additional_f :param script_file: local path to the script to execute remotely. :param args: args to pass to the script when invoked. :param log_error: log errors. - :param additional_files: additional files to copy before executing script. + :param additional_files: list of additional files (full path) to copy before executing script. :param hide: do not print command output to the local stdout :return: result of the execution. """ diff --git a/tests/integration-tests/tests/common/utils.py b/tests/integration-tests/tests/common/utils.py index bc76fedeca..dc39759eb4 100644 --- a/tests/integration-tests/tests/common/utils.py +++ b/tests/integration-tests/tests/common/utils.py @@ -16,7 +16,7 @@ @retry(stop_max_attempt_number=3, wait_fixed=5000) -def _fetch_instance_slots(region, instance_type): +def fetch_instance_slots(region, instance_type): bucket_name = "{0}-aws-parallelcluster".format(region) try: s3 = boto3.resource("s3", region_name=region) diff --git a/tests/integration-tests/tests/efa/test_efa.py b/tests/integration-tests/tests/efa/test_efa.py index 6815b0282d..299f17d022 100644 --- a/tests/integration-tests/tests/efa/test_efa.py +++ b/tests/integration-tests/tests/efa/test_efa.py @@ -19,7 +19,7 @@ from tests.common.assertions import assert_no_errors_in_logs from tests.common.mpi_common import _test_mpi from tests.common.schedulers_common import get_scheduler_commands -from tests.common.utils import _fetch_instance_slots +from tests.common.utils import fetch_instance_slots @pytest.mark.regions(["us-east-1"]) @@ -33,7 +33,7 @@ def test_efa(region, scheduler, instance, os, pcluster_config_reader, clusters_f Grouped all tests in a single function so that cluster can be reused for all of them. """ max_queue_size = 2 - slots_per_instance = _fetch_instance_slots(region, instance) + slots_per_instance = fetch_instance_slots(region, instance) cluster_config = pcluster_config_reader(max_queue_size=max_queue_size) cluster = clusters_factory(cluster_config) remote_command_executor = RemoteCommandExecutor(cluster) diff --git a/tests/integration-tests/tests/intel_hpc/__init__.py b/tests/integration-tests/tests/intel_hpc/__init__.py new file mode 100644 index 0000000000..221b7a2eca --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/__init__.py @@ -0,0 +1,10 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). You may not use this file except in compliance +# with the License. A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES +# OR CONDITIONS OF ANY KIND, express or implied. See the License for the specific language governing permissions and +# limitations under the License. diff --git a/tests/integration-tests/tests/intel_hpc/test_intel_hpc.py b/tests/integration-tests/tests/intel_hpc/test_intel_hpc.py new file mode 100644 index 0000000000..c461057504 --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/test_intel_hpc.py @@ -0,0 +1,83 @@ +# Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"). +# You may not use this file except in compliance with the License. +# A copy of the License is located at +# +# http://aws.amazon.com/apache2.0/ +# +# or in the "LICENSE.txt" file accompanying this file. +# This file is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, express or implied. +# See the License for the specific language governing permissions and limitations under the License. +import logging + +import pytest + +from assertpy import assert_that +from remote_command_executor import RemoteCommandExecutor +from tests.common.assertions import assert_no_errors_in_logs +from tests.common.schedulers_common import get_scheduler_commands +from tests.common.utils import fetch_instance_slots + + +@pytest.mark.regions(["us-east-1"]) +@pytest.mark.instances(["c5n.18xlarge"]) +@pytest.mark.oss(["centos7"]) +@pytest.mark.schedulers(["sge"]) +def test_intel_hpc(region, scheduler, instance, os, pcluster_config_reader, clusters_factory, test_datadir): + """ + Test Intel Cluster Checker + """ + slots_per_instance = fetch_instance_slots(region, instance) + cluster_config = pcluster_config_reader() + cluster = clusters_factory(cluster_config) + remote_command_executor = RemoteCommandExecutor(cluster) + scheduler_commands = get_scheduler_commands(scheduler, remote_command_executor) + _test_intel_clck(remote_command_executor, scheduler_commands, slots_per_instance, test_datadir) + + assert_no_errors_in_logs(remote_command_executor, ["/var/log/sqswatcher", "/var/log/jobwatcher"]) + + +def _test_intel_clck(remote_command_executor, scheduler_commands, slots_per_instance, test_datadir): + # Install Intel Cluster Checker CLCK Master + logging.info("Installing Intel Cluster Checker") + remote_command_executor.run_remote_script(str(test_datadir / "install_clck.sh"), hide=False) + + # Install Intel Cluster Checker CLCK Compute + result = scheduler_commands.submit_script( + str(test_datadir / "install_clck_compute.sh"), slots=2 * slots_per_instance + ) + job_id = scheduler_commands.assert_job_submitted(result.stdout) + scheduler_commands.wait_job_completed(job_id) + scheduler_commands.assert_job_succeeded(job_id) + + # Create nodefile + # ip-172-31-15-31 # role: head + # ip-172-31-12-237 # role: compute + # ip-172-31-8-49 # role: compute + remote_command_executor.run_remote_command("echo $HOSTNAME | awk '{print $1 \" # role: head\" }' > nodefile") + remote_command_executor.run_remote_command( + "qhost | tail -n +4 | awk '{print $1 \" # role: compute\" }' >> nodefile" + ) + result = remote_command_executor.run_remote_command("cat nodefile | wc -l") + assert_that(result.stdout).contains("3") + + # Setup network interface + # + # + # /opt/intel/clck/2019.3.5/etc/clck.xml + remote_command_executor.run_remote_command( + "sudo cp ~/clck.xml /opt/intel/clck/2019.3.5/etc/clck.xml", additional_files=[str(test_datadir / "clck.xml")] + ) + + # Run Cluster Checker + result = remote_command_executor.run_remote_script(str(test_datadir / "run_clck.sh")) + try: + assert_that(result.stdout).contains("Overall Result: PASS") + except AssertionError as e: + logging.error(remote_command_executor.run_remote_command("cat clck_results.log")) + raise (e) diff --git a/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/clck.xml b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/clck.xml new file mode 100644 index 0000000000..eed5f92519 --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/clck.xml @@ -0,0 +1,133 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + ens5 + + + + + + + + + + + + + + + + + + + + + diff --git a/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck.sh b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck.sh new file mode 100644 index 0000000000..83122f5437 --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck.sh @@ -0,0 +1,7 @@ +#!/usr/bin/env bash +set -e + +rpm --import https://yum.repos.intel.com/intel-gpg-keys/GPG-PUB-KEY-INTEL-SW-PRODUCTS-2019.PUB +sudo yum-config-manager --add-repo https://yum.repos.intel.com/clck/2019/setup/intel-clck-2019.repo +sudo yum-config-manager --add-repo https://yum.repos.intel.com/clck-ext/2019/setup/intel-clck-ext-2019.repo +sudo yum -y install intel-clck-2019.3.5-025 \ No newline at end of file diff --git a/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck_compute.sh b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck_compute.sh new file mode 100644 index 0000000000..8ed40d48fd --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/install_clck_compute.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +module load openmpi +chmod +x ${HOME}/install_clck.sh +mpirun --map-by ppr:1:node ${HOME}/install_clck.sh diff --git a/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/pcluster.config.ini b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/pcluster.config.ini new file mode 100644 index 0000000000..ce020bff0c --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/pcluster.config.ini @@ -0,0 +1,27 @@ +[global] +cluster_template = default + +[aws] +aws_region_name = {{ region }} + +[cluster default] +base_os = {{ os }} +key_name = {{ key_name }} +vpc_settings = parallelcluster-vpc +scheduler = {{ scheduler }} +master_instance_type = {{ instance }} +compute_instance_type = {{ instance }} +initial_queue_size = 2 +maintain_initial_size = true +master_root_volume_size = 80 +compute_root_volume_size = 80 +ebs_settings = large + +[ebs large] +shared_dir = /shared +volume_size = 200 + +[vpc parallelcluster-vpc] +vpc_id = {{ vpc_id }} +master_subnet_id = {{ public_subnet_id }} +compute_subnet_id = {{ private_subnet_id }} diff --git a/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/run_clck.sh b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/run_clck.sh new file mode 100644 index 0000000000..9eb9247c4b --- /dev/null +++ b/tests/integration-tests/tests/intel_hpc/test_intel_hpc/test_intel_hpc/run_clck.sh @@ -0,0 +1,6 @@ +#!/bin/bash +set -e + +source /opt/intel/clck/2019.3.5/bin/clckvars.sh +module load intelpsxe intelpython/2 intelpython/3 +clck -f nodefile -F intel_hpc_platform_compat-hpc-2018.0 \ No newline at end of file diff --git a/tests/integration-tests/tests/scaling/test_mpi.py b/tests/integration-tests/tests/scaling/test_mpi.py index 2965ba9c35..045348668f 100644 --- a/tests/integration-tests/tests/scaling/test_mpi.py +++ b/tests/integration-tests/tests/scaling/test_mpi.py @@ -17,7 +17,7 @@ from remote_command_executor import RemoteCommandExecutionError, RemoteCommandExecutor from tests.common.mpi_common import OS_TO_OPENMPI_MODULE_MAP, _test_mpi from tests.common.schedulers_common import get_scheduler_commands -from tests.common.utils import _fetch_instance_slots +from tests.common.utils import fetch_instance_slots from wrapt_timeout_decorator import timeout @@ -28,7 +28,7 @@ def test_mpi(scheduler, region, os, instance, pcluster_config_reader, clusters_factory): scaledown_idletime = 3 max_queue_size = 3 - slots_per_instance = _fetch_instance_slots(region, instance) + slots_per_instance = fetch_instance_slots(region, instance) cluster_config = pcluster_config_reader(scaledown_idletime=scaledown_idletime, max_queue_size=max_queue_size) cluster = clusters_factory(cluster_config) remote_command_executor = RemoteCommandExecutor(cluster) From f2b9cce6bce47513640c753c130ed61d8d127bff Mon Sep 17 00:00:00 2001 From: Enrico Usai Date: Mon, 2 Sep 2019 14:21:06 +0200 Subject: [PATCH 057/201] Fix instance.json creation to take into account *.metal instances For *.metal instances the productFamily is "Compute Instance (bare metal)". Signed-off-by: Enrico Usai --- util/upload-instance-slot-map.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/util/upload-instance-slot-map.py b/util/upload-instance-slot-map.py index 4b679cb0b5..be4367a7d5 100644 --- a/util/upload-instance-slot-map.py +++ b/util/upload-instance-slot-map.py @@ -31,7 +31,7 @@ def dump_instances(instance_details): data = json.load(data_file) instances = {} for sku, product in data.get("products").iteritems(): - if product.get("productFamily") == "Compute Instance": + if "Compute Instance" in product.get("productFamily"): instance = product.get("attributes") instances[instance.get("instanceType")] = {"vcpus": instance.get("vcpu")} print(json.dumps(instances)) From 0fe9e2137074ee53c14cb81b2c39df0a0da4b7ad Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Tue, 3 Sep 2019 10:27:57 +0200 Subject: [PATCH 058/201] Fix sanity check for s3 urls Signed-off-by: Francesco De Martino --- cli/pcluster/config_sanity.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cli/pcluster/config_sanity.py b/cli/pcluster/config_sanity.py index c03213a7ae..40ff634e20 100644 --- a/cli/pcluster/config_sanity.py +++ b/cli/pcluster/config_sanity.py @@ -459,7 +459,9 @@ def validate(self, resource_type, resource_value): # noqa: C901 FIXME aws_access_key_id=self.aws_access_key_id, aws_secret_access_key=self.aws_secret_access_key, ) - m = re.match(r"s3://(\w*)/(.*)", resource_value) + m = re.match(r"s3://(.*?)/(.*)", resource_value) + if not m or len(m.groups()) < 2: + self.__fail(resource_type, "S3 url {0} is invalid.".format(resource_value)) bucket, key = m.group(1), m.group(2) s3.head_object(Bucket=bucket, Key=key) except ClientError: From fdcb8f115b431d16b828a82b68de7370223f006e Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Tue, 3 Sep 2019 10:33:50 +0200 Subject: [PATCH 059/201] Remove python 2.6 requirements since now it is not supported Signed-off-by: Francesco De Martino --- cli/setup.py | 5 ----- 1 file changed, 5 deletions(-) diff --git a/cli/setup.py b/cli/setup.py index 19f5f78821..a414736821 100644 --- a/cli/setup.py +++ b/cli/setup.py @@ -24,11 +24,6 @@ def readme(): VERSION = "2.4.1" REQUIRES = ["boto3>=1.9.54", "future>=0.16.0,<=0.17.1", "tabulate>=0.8.2,<=0.8.3", "ipaddress>=1.0.22", "enum34>=1.1.6"] -if sys.version_info[:2] == (2, 6): - # For python2.6 we have to require argparse since it - # was not in stdlib until 2.7. - REQUIRES.append("argparse==1.4.0") - if sys.version_info[0] == 2: REQUIRES.append("configparser>=3.5.0,<=3.5.3") From 70b5726588cf40bf2d3524db0358654099d42c30 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Tue, 3 Sep 2019 10:36:04 +0200 Subject: [PATCH 060/201] Bump configparser version Signed-off-by: Francesco De Martino --- cli/setup.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/cli/setup.py b/cli/setup.py index a414736821..3e91712a30 100644 --- a/cli/setup.py +++ b/cli/setup.py @@ -25,7 +25,7 @@ def readme(): REQUIRES = ["boto3>=1.9.54", "future>=0.16.0,<=0.17.1", "tabulate>=0.8.2,<=0.8.3", "ipaddress>=1.0.22", "enum34>=1.1.6"] if sys.version_info[0] == 2: - REQUIRES.append("configparser>=3.5.0,<=3.5.3") + REQUIRES.append("configparser>=3.5.0,<=3.8.1") setup( name="aws-parallelcluster", From 29203f158bfc7f43542d2e985fb29f12fbab6d18 Mon Sep 17 00:00:00 2001 From: Francesco De Martino Date: Wed, 4 Sep 2019 14:54:47 +0200 Subject: [PATCH 061/201] Remove docs sources from current repo The new sources are located here https://github.com/awsdocs/aws-parallelcluster-user-guide Signed-off-by: Francesco De Martino --- cli/tox.ini | 107 -- docs/Makefile | 191 --- docs/README.rst | 27 - docs/_static/custom.js | 6 - docs/_static/logo.png | Bin 14004 -> 0 bytes docs/_static/theme_overrides.css | 4 - docs/_templates/feedback.html | 5 - docs/_templates/page.html | 8 - docs/_templates/sidebarlogo.html | 5 - docs/_templates/userguide.html | 6 - docs/autoscaling.rst | 77 -- docs/aws_services.rst | 162 --- docs/awsbatchcli.rst | 20 - docs/awsbatchcli/awsbhosts.rst | 9 - docs/awsbatchcli/awsbkill.rst | 9 - docs/awsbatchcli/awsbout.rst | 9 - docs/awsbatchcli/awsbqueues.rst | 9 - docs/awsbatchcli/awsbstat.rst | 9 - docs/awsbatchcli/awsbsub.rst | 19 - docs/commands.rst | 23 - docs/conf.py | 369 ------ docs/configuration.rst | 1091 ----------------- docs/custom_cookbook.rst | 32 - docs/custom_node_package.rst | 40 - docs/development.rst | 18 - docs/functional.rst | 13 - docs/getting_started.rst | 241 ---- docs/guzzle_sphinx_theme/LICENSE | 19 - docs/guzzle_sphinx_theme/__init__.py | 157 --- .../guzzle_sphinx_theme/comments.html | 16 - .../guzzle_sphinx_theme/layout.html | 156 --- .../guzzle_sphinx_theme/searchbox.html | 12 - .../static/bootstrap-responsive.min.css | 9 - .../static/bootstrap.min.css | 9 - .../static/bootstrap.min.js | 6 - .../static/glyphicons-halflings-white.png | Bin 8777 -> 0 bytes .../static/glyphicons-halflings.png | Bin 12799 -> 0 bytes .../guzzle_sphinx_theme/static/guzzle.css_t | 729 ----------- .../static/jquery-1.9.1.min.js | 5 - .../guzzle_sphinx_theme/theme.conf | 35 - docs/iam.rst | 657 ---------- docs/images/as-basic-diagram.png | Bin 7841 -> 0 bytes docs/images/jobwatcher.svg | 2 - docs/images/networking_batch.jpg | Bin 60938 -> 0 bytes docs/images/networking_private_dx.jpg | Bin 43448 -> 0 bytes docs/images/networking_single_subnet.jpg | Bin 37109 -> 0 bytes docs/images/networking_two_subnets.jpg | Bin 39912 -> 0 bytes docs/images/nodewatcher.svg | 2 - docs/images/parallelcluster_networking.pptx | Bin 157898 -> 0 bytes docs/images/sqswatcher.svg | 2 - docs/images/workflow.svg | 2 - docs/index.rst | 62 - docs/make.bat | 260 ---- docs/networking.rst | 129 -- docs/pre_post_install.rst | 137 --- docs/processes.rst | 59 - docs/requirements.txt | 8 - docs/s3_resources.rst | 39 - docs/spelling_wordlist.txt | 78 -- docs/staging/README | 2 - docs/staging/autoscaling.txt | 30 - docs/staging/first_job.rst | 11 - docs/staging/pre_post-install-actions.txt | 69 -- docs/tutorials.rst | 11 - docs/tutorials/01_hello_world.rst | 116 -- docs/tutorials/02_ami_customization.rst | 121 -- docs/tutorials/03_batch_mpi.rst | 227 ---- docs/tutorials/04_encrypted_kms_fs.rst | 71 -- .../batch_mpi/batch_hello_world.sh | 5 - .../code_samples/batch_mpi/cluster_config.ini | 25 - .../code_samples/batch_mpi/mpi_hello_world.c | 35 - .../code_samples/batch_mpi/submit_mpi.sh | 35 - docs/utils/format_docs.sh | 17 - docs/working.rst | 14 - 74 files changed, 5888 deletions(-) delete mode 100644 docs/Makefile delete mode 100644 docs/README.rst delete mode 100644 docs/_static/custom.js delete mode 100644 docs/_static/logo.png delete mode 100644 docs/_static/theme_overrides.css delete mode 100644 docs/_templates/feedback.html delete mode 100644 docs/_templates/page.html delete mode 100644 docs/_templates/sidebarlogo.html delete mode 100644 docs/_templates/userguide.html delete mode 100644 docs/autoscaling.rst delete mode 100644 docs/aws_services.rst delete mode 100644 docs/awsbatchcli.rst delete mode 100644 docs/awsbatchcli/awsbhosts.rst delete mode 100644 docs/awsbatchcli/awsbkill.rst delete mode 100644 docs/awsbatchcli/awsbout.rst delete mode 100644 docs/awsbatchcli/awsbqueues.rst delete mode 100644 docs/awsbatchcli/awsbstat.rst delete mode 100644 docs/awsbatchcli/awsbsub.rst delete mode 100644 docs/commands.rst delete mode 100644 docs/conf.py delete mode 100644 docs/configuration.rst delete mode 100644 docs/custom_cookbook.rst delete mode 100644 docs/custom_node_package.rst delete mode 100644 docs/development.rst delete mode 100644 docs/functional.rst delete mode 100644 docs/getting_started.rst delete mode 100644 docs/guzzle_sphinx_theme/LICENSE delete mode 100644 docs/guzzle_sphinx_theme/__init__.py delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/comments.html delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/layout.html delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/searchbox.html delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap-responsive.min.css delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.css delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.js delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/glyphicons-halflings-white.png delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/glyphicons-halflings.png delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/guzzle.css_t delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/jquery-1.9.1.min.js delete mode 100644 docs/guzzle_sphinx_theme/guzzle_sphinx_theme/theme.conf delete mode 100644 docs/iam.rst delete mode 100644 docs/images/as-basic-diagram.png delete mode 100644 docs/images/jobwatcher.svg delete mode 100644 docs/images/networking_batch.jpg delete mode 100644 docs/images/networking_private_dx.jpg delete mode 100644 docs/images/networking_single_subnet.jpg delete mode 100644 docs/images/networking_two_subnets.jpg delete mode 100644 docs/images/nodewatcher.svg delete mode 100644 docs/images/parallelcluster_networking.pptx delete mode 100644 docs/images/sqswatcher.svg delete mode 100644 docs/images/workflow.svg delete mode 100644 docs/index.rst delete mode 100644 docs/make.bat delete mode 100644 docs/networking.rst delete mode 100644 docs/pre_post_install.rst delete mode 100644 docs/processes.rst delete mode 100644 docs/requirements.txt delete mode 100644 docs/s3_resources.rst delete mode 100644 docs/spelling_wordlist.txt delete mode 100644 docs/staging/README delete mode 100644 docs/staging/autoscaling.txt delete mode 100644 docs/staging/first_job.rst delete mode 100644 docs/staging/pre_post-install-actions.txt delete mode 100644 docs/tutorials.rst delete mode 100644 docs/tutorials/01_hello_world.rst delete mode 100644 docs/tutorials/02_ami_customization.rst delete mode 100644 docs/tutorials/03_batch_mpi.rst delete mode 100644 docs/tutorials/04_encrypted_kms_fs.rst delete mode 100644 docs/tutorials/code_samples/batch_mpi/batch_hello_world.sh delete mode 100644 docs/tutorials/code_samples/batch_mpi/cluster_config.ini delete mode 100644 docs/tutorials/code_samples/batch_mpi/mpi_hello_world.c delete mode 100644 docs/tutorials/code_samples/batch_mpi/submit_mpi.sh delete mode 100755 docs/utils/format_docs.sh delete mode 100644 docs/working.rst diff --git a/cli/tox.ini b/cli/tox.ini index b86764c1e3..0709526836 100644 --- a/cli/tox.ini +++ b/cli/tox.ini @@ -3,8 +3,6 @@ toxworkdir=../.tox envlist = py{27,34,35,36,37} code-linters - docs-{linters,linkcheck,spelling} - doc8 cfn-{tests,lint,format-check} # Default testenv. Used to run tests on all python versions. @@ -251,111 +249,6 @@ commands = py.test -l --basetemp={envtmpdir} tests/ -############################# -### DOCUMENTATION ### -############################# - -# Doc8 is an opinionated style checker for rst: https://github.com/openstack/doc8. -[testenv:doc8] -basepython = python3 -skip_install = true -changedir = ../docs -deps = -r../docs/requirements.txt -commands = - doc8 --max-line-length 120 --ignore-path build . - -# Builds docs in html and man format. -[testenv:docs] -basepython = python3 -whitelist_externals = - make -deps = -r../docs/requirements.txt -changedir = ../docs -setenv = SPHINXOPTS=-E -W -commands = - make html - make man - -# Runs a python local server to serve the html doc. -# Useful for local testing. -[testenv:serve-docs] -basepython = python3 -skip_install = true -changedir = ../docs/build/html -deps = -commands = - python -m http.server {posargs} - -# Automatically builds the docs on every change. It also includes a livereload enabled web server. -# https://github.com/GaretJax/sphinx-autobuild -[testenv:docs-autobuild] -basepython = python3 -whitelist_externals = - make -deps = - -r../docs/requirements.txt - sphinx-autobuild -changedir = ../docs -setenv = SPHINXOPTS=-E -W -B -commands = make livehtml - -# sphinxcontrib-spelling is a spelling checker for Sphinx-based documentation: -# https://github.com/sphinx-contrib/spelling. -# You can add globally allowed words to the docs/spelling_wordlist.txt file. -# In case you want to whitelist a word in a single file use the .. spelling:: directive. -[testenv:docs-spelling] -basepython = python3 -skip_install = true -whitelist_externals = - make -deps = -r../docs/requirements.txt -changedir = ../docs -setenv = SPHINXOPTS=-E -W -commands = - make spelling - -# Checks that all links in the docs are valid. -[testenv:docs-linkcheck] -basepython = python3 -skip_install = true -whitelist_externals = - make -deps = -r../docs/requirements.txt -changedir = ../docs -commands = - make linkcheck - -# Target that groups all docs linters to run in Travis. -[testenv:docs-linters] -basepython = python3 -skip_install = true -changedir = ../docs -setenv = - {[testenv:docs-spelling]setenv} -whitelist_externals = - {[testenv:docs-linkcheck]whitelist_externals} - {[testenv:docs-spelling]whitelist_externals} -deps = - {[testenv:doc8]deps} - {[testenv:docs-linkcheck]deps} - {[testenv:docs-spelling]deps} -commands = - {[testenv:doc8]commands} - {[testenv:docs-linkcheck]commands} - {[testenv:docs-spelling]commands} - -# Runs a simple script that reformats the rst files. -# WARNING: Carefully review the diffs after running this script. -# The script doesn't evaluate RST syntax. -[testenv:docs-autoformat] -basepython = python3 -skip_install = true -changedir = ../docs -whitelist_externals = - bash -commands = - bash utils/format_docs.sh - ############################# ### TOOLING ### ############################# diff --git a/docs/Makefile b/docs/Makefile deleted file mode 100644 index b922f405fd..0000000000 --- a/docs/Makefile +++ /dev/null @@ -1,191 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS ?= -SPHINXBUILD ?= sphinx-build -PAPER ?= -BUILDDIR ?= build - -# User-friendly check for sphinx-build -ifeq ($(shell which $(SPHINXBUILD) >/dev/null 2>&1; echo $$?), 1) -$(error The '$(SPHINXBUILD)' command was not found. Make sure you have Sphinx installed, then set the SPHINXBUILD environment variable to point to the full path of the '$(SPHINXBUILD)' executable. Alternatively you can add the directory with the executable to your PATH. If you don't have Sphinx installed, grab it from http://sphinx-doc.org/) -endif - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . -# the i18n builder cannot share the environment and doctrees with the others -I18NSPHINXOPTS = $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) . - -.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest gettext spelling livehtml - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " latexpdfja to make LaTeX files and run them through platex/dvipdfmx" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " texinfo to make Texinfo files" - @echo " info to make Texinfo files and run them through makeinfo" - @echo " gettext to make PO message catalogs" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " xml to make Docutils-native XML files" - @echo " pseudoxml to make pseudoxml-XML files for display purposes" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - @echo " spelling to check for typos in documentation" - @echo " livehtml to rebuild docs when a change is detected" - - -clean: - rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/aws-parallelcluster.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/aws-parallelcluster.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/aws-parallelcluster" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/aws-parallelcluster" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -latexpdfja: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through platex and dvipdfmx..." - $(MAKE) -C $(BUILDDIR)/latex all-pdf-ja - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -texinfo: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo - @echo "Build finished. The Texinfo files are in $(BUILDDIR)/texinfo." - @echo "Run \`make' in that directory to run these through makeinfo" \ - "(use \`make info' here to do that automatically)." - -info: - $(SPHINXBUILD) -b texinfo $(ALLSPHINXOPTS) $(BUILDDIR)/texinfo - @echo "Running Texinfo files through makeinfo..." - make -C $(BUILDDIR)/texinfo info - @echo "makeinfo finished; the Info files are in $(BUILDDIR)/texinfo." - -gettext: - $(SPHINXBUILD) -b gettext $(I18NSPHINXOPTS) $(BUILDDIR)/locale - @echo - @echo "Build finished. The message catalogs are in $(BUILDDIR)/locale." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." - -xml: - $(SPHINXBUILD) -b xml $(ALLSPHINXOPTS) $(BUILDDIR)/xml - @echo - @echo "Build finished. The XML files are in $(BUILDDIR)/xml." - -pseudoxml: - $(SPHINXBUILD) -b pseudoxml $(ALLSPHINXOPTS) $(BUILDDIR)/pseudoxml - @echo - @echo "Build finished. The pseudo-XML files are in $(BUILDDIR)/pseudoxml." - -spelling: - $(SPHINXBUILD) -b spelling $(ALLSPHINXOPTS) $(BUILDDIR)/spelling - @echo - @echo "Check finished. Wrong words can be found in " \ - "$(BUILDDIR)/spelling/output.txt." - -livehtml: - sphinx-autobuild -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. Auto-reloading enabled." diff --git a/docs/README.rst b/docs/README.rst deleted file mode 100644 index e0ccc71fe1..0000000000 --- a/docs/README.rst +++ /dev/null @@ -1,27 +0,0 @@ -============================================== -How to build AWS ParallelCluster documentation -============================================== - -First, install the libraries needed to build Sphinx doc: - -.. code-block:: sh - - $ pip install -r docs/requirements.txt - -Next, execute the :code:`make html` command. - -.. code-block:: sh - - $ make html - -The documentation will be available in the :code:`build/html` folder. -See Makefile for other available targets. - -Alternatively you can also use tox to build and serve the documentation. -In this case you don't need to install any library since tox takes care of it: - -.. code-block:: sh - - $ cd cli - $ tox -e docs - $ tox -e serve-docs diff --git a/docs/_static/custom.js b/docs/_static/custom.js deleted file mode 100644 index 6f859c6fc5..0000000000 --- a/docs/_static/custom.js +++ /dev/null @@ -1,6 +0,0 @@ -$(document).ready(function() { - // hide argparse internal headers - // waiting for https://github.com/ribozz/sphinx-argparse/issues/78 - jQuery("li a.reference.internal:contains('Positional Arguments')").parent().hide() - jQuery("li a.reference.internal:contains('Named Arguments')").parent().hide() -}); \ No newline at end of file diff --git a/docs/_static/logo.png b/docs/_static/logo.png deleted file mode 100644 index c47d2d64b3cb20f5263b25894c15fc598f103126..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 14004 zcmV;lHcQEgP)003YJ1^@s6nkRXg00001b5ch_0Itp) z=>Px#1ZP1_K>z@;j|==^1poj532;bRa{vGi!~g&e!~vBn4jTXf8&-N$SaechcOY|MTD1{TE-Bnz>wRvu@`3%02fuc+mXk>+?JK{C>W#@qLNEe|g^7 zj~60OC4Q&RpLV|QA3R_F_(09y=jX4_ySl!|Y2O2dp9g+lNoUsM@B7;KKq-En58uCC z|9)=dKP~6`Kd$d#zX$*Q?_3GSE*vjK7f%Yw^RwwJi7QAg4St6A)1sd7JWlUn>wT3&qLxBHpaKfaXM zLVtu9OvR+_ciTI+4}*b@*|y7lUM>%EV`Ne71> ze0uPXF=m`;=2@oAI@|1XEXrr)RhF%~+UjfUxYMTnx9#e7+kKA{4yAPRDaTGd?esG) zwRY3Zw_LmRw%hOcnYC|Lf6ZF>EpvaLweZcFGM3-_>K|F-=~{mrA_z{3az@5t4rIJ4 z0~B;r&U_0wM`cbq^F7iOC6cvJl$;%uF*2A>h~NC^>xY}yR2bua=k`;cB8*`?ql~{NQ8m4F}9If$$AHE$LIX0yKu&An`BPk zGZ(Z8UU$QC8e>nex5d0_JDmOWz5Q5grpPOzAS26U*ISBYC(pj)2t5aD^Hy74m>qcT ztdep~v0K~VsIO!`-w{`DWsjtG3O)L|DTA|ZSq*vTvUiN5AMzMnnRgKDHe1VcjANyQ zWoh17YC2Rkpu$J2L{&xWdz}tk^K!|4o;;?#%XBpwXS|AGFUgNSeIsl6T)?5Pd1@g< zcg~t%I!k(f+qAsaIqT#NRX_;MokTK^Z-em7oeykL4rgjy)G>k93`V%1_`;GX0e)Uu zX9JMAADV@wRdZuCvUJX%8hdKv4D)R*+K@OULNdG=KgP+c_W(6gc=yy6>nVjEB7S5f zxmI(!k;Tn})%Jxxm-PT#B8 z%rrGcs#0G@$o8Z~Xe$>-dlKeI`GAF^aCjY@LZ$%&ft0{`YO(pm=O;Zwifs>GYmq{@ zR~j<$g3)k$PFk8SC0PQ98feFKSfW|!bp{G8RU_V%Ttx*|>g-mKC-UCzY6I>X7ULYB(x7a+-f-c1>uLU-qdG6Y(42~@s|TCE;(`CC_f3e=b-e%cu; z3E(Cm);dvc-|p2(3$`t==w5D0eDV#=%aVB@O{;4{XqDXOEz$X(?%57 za3Bx;v#oE!P9JUh@F3D??=Tw}i*UYlDTt2jtF#tTS#6Wj5>8p0`e=7e*UqN4=&@Ey zMiZK63Hq17S3O?|IJlWdJBv8LkbNBLSL{duQju%dd|hc3t&zvAeyjNDi$im*P1MT* z0Rg)MNv_i<4S(ornwkbJ1%!j@ZLTR*yKJ{5*?Y;)?1ea31NX(XK{X=Np}YEQJ-;2J3Yo<$kZVL&_qEC>8GN*ac|VDH%5 z8i(tc3r8ygufTAr3)lf=x4*mb7d=2c~KUjHRU>hd!Bs7nKvI@Bcab`6gi=45So`rJLbxMNEI zc!;qRvX0c>wH{4GU>rWQkWY(?2NUB&NusksQ+XJg9JK*Z zKG6?xBZ9l@ALch{fPz2im5dWs)$mvWPs4|4E6#cX3P09Ce~?~8;Rl_(oF3_qtePBK zglbcL{fvI%(WKzelSx)7QJ+YWhGC<*6PWNJ?!5+-LsKqCiqWeS&H_77a=l40Qv5RS zydN`y?x3mb9%6=ts+g1v@|{NFB}5Q3pB)KDG*OJpB%MI&^(!t(YO7{_EES5P@I|E5 zk4MROv5chnaupB+BsxrA6uYB~0Sytj;157=5Il4NnS$#?HJ>?HGw>2IQpO2TOMbfb zOb1NO#c<-SW8&say8U7T2YgW?2u`HoFaQ9P7~nZj?n-A!O@cxWwq6wBm#rJjg~!Cn zfT!W*W+*rIq$q{d4dsxDJpMtW)I;auh)6$}Nv9-ObCD4lyP06WyT!l0bd2FiKq_*;1owpK!Mn~i&}kkyPK{u* zx`(U)-wI&3Cqv;xlqCNuI9ZU9vbR7ir3Z3g1~GVDmQ&saZ*l0&Nz`jThjKZ`lEY09 zg%f1P4ix}gV~nwXynx{}TB!x;W|fy@G7W1U@Y~^3-#HJ#X?{aTxvKYBF=`N~QZbXC zT%|?feIEphVXUb-r1K4h2IY8Um~w+vq^(GQ0>x33I6_J{^iI=F6@Xxa0=oj}6Nf#y z{w!~m45Gx9S}ql8=SXh8BOwH-Ac!7+|7<&K0&sE>s4uAMITra9v_l8o5K^E%c7^D( z-UIhz#O{@oA~2XZ^cC)?;Fgibn9(a|PUNPh9nxJG8Q^%5E)e+yuGT+9!a@wJrM&^m z-4$VuhAx{U1I5q;G7dPYh1RoIim|Y@^K;fM^K6ciN9-B~iNyQDhaeNv2q=S^1qPs?kuS)*=Fv<@ zuNVQl-J~--y=V)-@@_zGc~0D;s+3iD(dDJTV!xhaDv`+i>nk2^5L-MO_6oyON-Dt3 z)@UQScy^>_qUOiv@&|euiM3?Cfdt-#-o-Edr+iu~nBlo0aXVC)0xZ?}=lL6d4q z5EzBFHxi>(jW6gsP7DZKa2(d#`RP089aP+{oV)3s87ZXV)Z9Z?6DhHXnQ0&cMlN=w zBS{&U@~+ZhA;c9l0yQ8(O&6A^EK!qPGd8#$IL4tN=ZX1}n6`=+94LtM7Kir-#qL@3 zpGr%hs%wKs(e`m*ldIzrjXJ7QWb{9XkNXTT>F{%Y2a|H(9FD@7NmF)$WrJQoj$I$( zxCjOZ@S{v*jSZ*-?2n-^<0B3>Wc3R3g5&foWM2AwDSaDPy@ci)xxbdd|@a-M;_~UTGSy(Q*msCwZ{!pz7N0MmflVVej`^2SqaW+_|n82C|JV50%b= z`_tm}eIwG3Yql!Wv?^6g2fzg^(fX`sCoK`EgERm~l8AAzSz57CQIYP*UT*EzjG9`- zFN~xTSabv`JJ-NkkmI1X+Ws2^55@sw1NoDM*gPDys>vfsv)1o{Hs%EkSb<0!glfH}_J! zdvQ&-*RB}>*k8=i_O#E^#7bK5f*!~r#F;12m?;z|rX0`$kk`7JX^;EhnfK7nqf_G$ zr6ZmJK7%Y*#8c2MfXdL6Rg*Q8{#>%r^7o#zQ4=F!)FO&ZD@S%{0^+Y@?W8rD@xnqf zX~150e-(SZX3dCh8XOai(t@tgpbz(vOVG(@e1sBkKOX$2zsm#FI^9L@+;m`eu<+Cj zN2o6jFo6@tZ25#Lr36qgB$~2Dm<-win~w29ytR_39oMDTgT}4o5|6bddhU$3O}(91 zXiG01yrssy*gmW`Iz?mUB=SRzgK5!WOEr$x6y*R+5aEK?HC?sI#Ra%~Aq@P%v%$0n zRf&y&hm-G;6@**BhBs<7eRe=Uk z{wD?^0+?I`-Z^RSQJThZ_zr-Qo{%tzQ-Yk#vF66R1iR?d93!d?ffpQWLf-=U$7|5F zam~~pP7EizJwlU%ZYT_=M|UJmr{Pz6_SDed%)YcdTXi=Ox(%>sKBHk8!fk0z5>aav zu#?`cvSx!6bkoMiMpd>0g54V6B~Og6)7(=d*8+A2)Q~r5q#3XdsTmS-WvGt@)rxmR z!`0^33X-9?R`>)Iu4xa17CksM!NO%)f%LUE!!rosq)EW2^e9n4i3kcIOazwSAmj)d z$d@`BiW0xJrZ(l*Srq9cZPE%HPXmr}N0I|Uz-rr=kdLG(f1%!h4%PtiUeWWVj?8^`kiy+Yu3Kq$7}AdATYF}*a^vh4wltPEu&1qsFT zW;zcI%RhwH*p2sdDcB^42a|OWZD^*5mAjaQ`mFJc2tvwO%Y*j{Qrp(Plvve%&{We3 zWn0uvjhCWEL{XnT+(hb3%SChrLFl0;DI_1XF(AEnI)KUBFHpP58ug>ArI#f>LQP>v z0RuP|7IIg=3p9lSMUcxyq~UufEX(XykyW4cZQ2SxahOln9t(9kxVUuOObb(Kr}-LAxcIqI5L0H~$7 zj%!s@pm7nw_&*H>X>B3`DLtXo+rPjCuJCrFJt3t@qkFEs+SZ^gcr3-D&(L@D{Hjx3 zH6(&s5%;f6`;}YyKn2*SZ$yFSzEmnXqC)WIu-rgpmPlUYjWqT%2s+&dNeUdQ2jFo~$W25%0 zZM9^XKAY5BzA>bV93Q|-QeRaJ7HEwk5SLqD@EcD+1{vJMVu43}=0>USsId@=Vk~3| zj-QllDo0j`zflubeU@_xM??C98)m7cptD+D4tay&xhP3c3+5lX=ZMG_iI`gPwqkiQ zS`*Ka_K37tS&$k>H|=Mx)DxIJF2)ECgb<%s6oV_irp=xGw(B&;(**IBdNAX#dBAhw zBfR!6katikUF$q_F^7k@IAINo#d#PRhS4M`Ox>A+T~HxwoPZ<4JJIkg8BC%$hOFpxV)+d~XhQ>i z3C4gw9Ye)+gxG+6+PI);;Y&<%myYY2R3aa<)SJZXvF$WQ8ViT8h2xe=iNY@GH0q?A z_fOD~I>bfd!(uteI)tu+4-K5M(D;kFP5 zUOUDoS_bL&L@AQD9kxSdzy*n=EFvg|M$8~tQLs)=tYaQHoQ2$K6VL*u_A7RUmCiaT*CzG zHnuE{_%7JWsqI7&mat?@8kLT1_!x6QenBsrga(C8L4)7(NKMES4Bhik=%4#`L&@lP z_SW-)IQ;0|6itZT6yd>0lq@cJ!Vg&k*8;+=cPE2sD53#B01(@xj+074ZV(5a?ZP!X z(rdmq+{Z*f;JPT&YOVHxWQblG2?wL0Sdn568vW~>EhP2W(v5B`&R}Rq0I~N$6K&n= zFmy)%$e`W`H} z0Axt-;DE`K(7GLkJh>lqX=nmclcqL&yAs4cr$91!7F@L(?wKl9j?NtDnH{q@7@?cv6t=Y3Z~0MvK(<0NT|Z2ZIG) z*-He3?CI488(J6jfcf00|9`ev_seQEH$@v@s67q7NYCin#H6tf@QnF0^}23gp)_bL zkST#u?~bU~4qi(xntu8P#c%|SdYCWG#6G+M6uiL$u3=0r{-P-)UK9X`z~Of2Ca>`) zDgzZ#RS!%1qf`{v4&r5%5awSKv7D=PE+Rv)2j>RernJqaws9sAAIsOf$#ko3O2k#` z<{7w7w>PaOcZ&f8P}>9p@4nhBgG{3>rC;jxPL1=GT@H~(kB~D`5c!8|s@ncq+SZyl zcRIB3BT;+fLT(rkY%eEq2fE{mAo8s(;zqm4NLkk=zV>uh^%Oh+f|7uvFQUJ&SUf*V zmHC=jS#OR%ki5DZ_hJaBd$fk3pDB2C8xTSbLLMpzmcD__cL|psZK!UVu$^ny0Ud`g z>YU~8LJN`aD;U)s#8fihMH!ud<7h|`XoCj@IW-|J^q<#lGfvY9Bm&p<;Rm^6=oPmS z!4we2@IfHn)*ZRJM^Qaa!5!@)Y1V$tBqiL?Kx7Z?gfy<5wd2(c!vK09a3HV_Xr3uZ zmwhrY`A|dFVMgsmpxq;9bVD14QTGI7s!}4Ah3bymyR{6^z;+tw%o3U=M`-MLP!A;k zQnLfwK~WTQMbq>ffHE*r9&Z3sLS-qj8VK0(Jsb%f8U&kQ+Og86E1`&MMom5V(T|ar-lekn*;DpmolzZ17}z%O0wR%icwgYb!ezK-Mwv9T>G3_<8o+@ z+I0J&0ioafu{0=Xf4(i@&%b5N9TB0_Pl+fBGf=4WnQ8KDLvCPcJ6uy*;4jgmSxOR( zQUJjP*r8Ss7)LQe;sY0tpD+)cLc5t^Gzr@XJ0_{bF-p2k29S!<;AA67Lkkf$LXPL* zvNqE)T4*9NPxfy#KY)u?dXmb!@1Cn9)TcOf4=U6gAIa-(okU+~`00xFNi{LB#kp6` zs!3UAngUmAfUn4SE#4jw7??;*uqL8jw~W3%L=<4x;cjiCcO;LtAfoQyg0c^*2gCa( z1sBhKkKq35hLeeVkRn~ywgwZWKu39tEMX#`b8R{V3$8svTs$@X+5Vsq!`ZrhRySR1 z$G^&9cr5-!qtEs*8}kjSlWj=Syah`6-J^k)Auw0lwHN09qM9%?s`sHQ{0o(>AzlxT z3q~K^^yUuOKbaSdtj1yVSATb9XrH~CBt0h6Z*3v~ZCmJsz0*Ej?ap-jsz_4fTTY%) zb`$s_w^x&bTCf}_kOkj8)uhdMfDPyeoz|6(l_YlP++&3>(0fa}9ynK2ctq&)xuyHm z=+q#UiH1j6p{jvXltS4>OZ1Nb*}%RG)vd#bZ<6+c>%CL7Qe$4gDhBfe~G zIj{5GhiX|y(*2dQWDOT?^6q~D11@6J{?U^e00006VoOIv0RI600RN!9r;`8x00(qQ zO+^Rd3lIq=BB}n5=l}p3!bwCyRCr$Pod=i{Mc&3=QF6{fBMUiHgXQGs+1FNL-eja|X#sP78kZ_SDEQ+g(*XVRwP~Jvuo z{dUzqR^Rid0Ab+TwQD$X^f=C*JCE}hF5=?Fi@1K{22!U=g^U?8AVY?9$dxk(GG)vl z|3!qP=H#idFKya1*3Wgz#cs)dHKVeQ&kB3t4}{bmdu&t zr-5_u(C_Hg=R>Srx5@VBWzCWque|sSI(KX%KM9U);(_kfr$4ss*af@W^+5i7m^opj zB}5a>oH>hOqsC#}#Oe4$90yC0!UfQ}MH4*Qp%t=c%PK!L!?ggpq>dRk6)_X1;rAoQ zlGw!dm#v)Jd zobr>v>D>MQuxiZ)IkIQ&j2IMnAfFuB!&^OZ$}9{J*XqpKbMn*BkuF_Y>%n{K@o3`} zz}37xVe(8gdgL*DHfU&A#YYYtJd9^v=!I{F{UAS4Q_5iJp7GOM%UlQsF1ZVr#LAH~ zw4dfJSS&|@!|R|EUxxJQaeuzta%9h?OPBGocyQk8J1{KbBNs1TLjQq-FnrWl`H3E5 z0j^lJHmoOa2gUOFpkes(n-TI8XE;;RK2dS%^dH#1bGIA?#^$X%tP5?z8nHwN%c7-m z^2nLG4^_jx_ueZ<2|0KE0=hoi6aSmLP<|3#VkS(NM|z9}xPIdnd6f8kJ!BN-&R--) z_GHVN1r^Fi$&oWlm#>mXfx!pSl$ER3%cHb2_S z(3itUjv9HEPVo2Eoo~vNDa8y}U5*m+-=U+he8p-xGK2T4WQn4vQLPf{*QtrxHLIe0 z+0vG`YvSm*aVw0Skio-7TehDQJh4g@%39$Ro@@bevWyw|FN<_y_UzqnJ&_9*EyI#! zD{VbVJ6`rcjKo!~Xtci@m^ zig0%Rb>UJg-s=lvyDJyXk|Q<7cfro7VMv3n_pm_&uwuas>q1Z@b!t_!o)){C%#*87 zt_-?9)*i#Y9*Cc({$SZKCj2;V3a(taDn|)nI6xPZq7A!6T)aTowP&9z@sVKcz$MG| z_Ftp=56Pp1aMI9;k1Nx#uLt0}Z$CquN17_dM<~`p1@fas(*}6&U$0`}>5;VkaaL-^_#{6 zRoO#cmbqM#ysTKcP9B*P=fY<6CpJ!P|4|H$8`P1bTcOwc^PKr|WDl3`;r|_I-OoVb zIaIImAifyzo)F`BeDU$S`1;d+^1FJB1=y@{Jxm@m1WV>lwobZvllmdcF7TIl=gpod zRYD(1S+IDyJhI2k4%8d=cC6jtem2{=d!H+#+zXutUHB!cbP2~er*zTc)HHvnnMOp}K8vu9%BN`T>}LEV~K@i51%io_Sh9SjJaWcq`6VnS(2+Nfxvds1l9d1{ zo@&CLP{A4H(CA|vcln^HR-J-Mqn5ryMtgsv{slxz-b!#dZ>S6VZ3SNAcQ<>rAuGG)!u&I+_~hDGe3@<;_4cNgJdVbTsgDLBWHBp9cft4IX*@% zT)2phTg*hk;Q7v-d*tX=iWMpNm)**x*ohT!<}>b6jd{S>T}8!`PbybbKY(QC?!D;K z_Yu;l(;-YpPYLD362(t`Nm47b(1DE7a#j8&{Xf zDPFXYm5I_ZWyWlDd8Q|J?6QBr!cDRhV2XP}b4vH;%cC^X$#RXM??n}lW7x+_)k+oQ zQ9>AsF)0P(?5->PXlU?o(H?!glUYG5^V_XEK8@G@)z5m^+~jhDCD{oulr+-VNxax5 z&Vi}|hPiJqoN@BzvA4Y#jAl?;2W?YT`iv`tX_0Q{=r)#ZY655Y=|9au$0wdg%joVH z^zHZ9zGHXT#TYUH9y)vkQ)m8y_xpd2XJ71vmL0pJdi~ZYSG|e#Ri(}&XwvF&tGbuv zLH!1NX%)EAxYt#=1d47M=4_)YQ=%9)ZML@+DLDU3L=}bYWs;WDr3oLZQPsGrD1G{L z=<`NTIWmXKF>5e+51X}p!m2i3yWVUDcT2!MA>{mp3z#_dCrp_>8(X*A_bZq{PqIM% ze26Mj64|q5!Kg8lf)91mM)gpw za)pqjVgs518NeL={a7?;@t9SD&9qT4Q#FOc(xF+i=V`^~1`3;SGp4jMa7!7{5Cpw`jK;RPW61O z8n!Dyp!ezP8T+G3>FK=kMh*1duM=DiSgSd4^k6G%6DoL(=Kr=7Z619FpMEveb$ZWO zfEzb&v%1u7hgr`TdW|)!SCS)pV&m)$|J{4{I~Jg-Qg2MnIiV_0)dirND2fkEVfj_e z$giy8Vb*a{n4y3(_qW4G*4PXLq440yEQKe9-L{VhPk_o9& zrnEkf7bnWl2X8DZTaFz+iM{&{VE@6x#$J}G6XIpuq#1bogHPlrAr#Zt?*_@yO=2cY z!@D1TCP%l@`=2kO`xBky=vMf!wtxKZa&#+`O85E0 z87FrBG-UQGJ7c3e8{VDJ^}~b$PBaS^EytW+f5YNsaYiXmC;ry0GkU$!Lynx$&c9o? z@5J<<=E{*XG>%6P{}S)N^{Q*}5q8xxsLQ^DP{T@HRpl>ZeRh>0PF`!q4C$@?Y@A?t zqC(lya&#+v2<(2=3XT1zDn1Pr%xSl6*%V)Y)(^1@repAb{*CCiEgYRH6(fEaZ}rgV z&`bWW*Rbaz-gYE+un*RX2o_Ze2TB<@%xE`t{&# z5IY)as$={M094@@-}IXk)hav31N{qBDR$byPH^@!z476IGdjgb5l)F2s`T=GnLT%b zb{SU+kl77gUjl8KCp)*7qu{x8$!yyTx6p{cU(qhOY0HjVY&KOvs7hR3%0`uRG{RXM z=o@>~g;(&F5LFck z3p0I1=hT8pbA#g2!PsL|DSNwK2-qzO@;wlBm z5Qz?_Zm>Yvy;pCz28ymob%k6SnG)OKu!&KblEp3i$b@QDZ`KO>{gTUzbTO&IW$}L zDg|h=-!CsBZKhyhxQE-vny_t0;G0a=6E>A9hPPz3Q@CZ@4qUn%Sel5@ zepU0(1o03!{ogfQ*neJBl_!&?{p`58JHuN#r%46FXRNUd7yQ2`i}P2(*_%biDwF~& zU8=ZI`S(|YM~bERupF5~8)(?~F?gY#hq!0DNuY7V+TeR(RH$u0A-*~Wd-w5| z>(=8{5(pZ9R0=L6pgq)id zGGgwn-sOq!J8;nIvRWa$+`01?E8i=^e`m50Vg`6o)Z9-Y44J zbk622SYqq|c52@e{od&%N6r|PV?8AV=yo$7!Y~4#1cpsa?Yk#Bx3|jZS=gw9A(au| zIM?~9FnO-B#3BRSH;YNFXw%wA7nhfaXuhBN-;m?1_UymATl!WYJ{ zT*@u9>u6J%ijR?XzDS0`GD-Mz3p2M{=6Z44+!KiX=f$pfL`ATb&!(B0aKP1zWn8A= zBO?KF`C&U@s2W=QVSMeMFZ~Vg^nS&fF$s$qy~j>)`-x00!y;a|w~CtvAKkWbD!7%J zn>Qxu8tQeheviA{CpP>YM-BPhRXwPY00}EHKKtmMfcJuSg0*hL#WMQbGhJ^fIjUGb z3SAy`&NZ^1fqf0SJyM+DzcWp9<;#;>yZN~elNp_`_qKf5QgReH+^ad%d3qIW{+jgT zx2}cn>h3+?Rfo_=dI#TUuY!L790B2$3a)& zJkhy>(X8IF{Uh=yaN4$PE|1i7eYBl+<~M;oIWxwOuuK~6lM^mj;x?e)Th?R{$o(*K zLi}O6jD|JKuHAT&geTm-&BJ)iaNiO(>##S270b+G@Paz+zY+)xf={0QGmf83xO+mn zw4OErh7)ufUYfvNB)I>|*|Xe>V(Krj5KOyDnCvcjtSq8J$yq~pP-n0u2jb;V-PlEsRkR*foFmt0a- z_eSt|$*b3FwC)-2A6pNO9y{*1DH5|s+y|#(g(xfR#rx>SepV5b03+NvPzH?gF|Y64 zHI-Qm#s&k4fS3{yV1y(kcxyL8gdm6rFhY_N5nzNQB_hBGNlHY35t5XM03#$R5dlU> zQX+A{2!RuS0l0J=xN;h}`iB^^MOVn_-n2mKEaEF0kR~UPGK1&u+Yk|81T!uk0e)Wy z9E$}`t^@wqDn2vp14^GC$W|7}T@%RP9LU99XHPmdBESgFoZAEJ7!B;53Y<{Czr+>U zD**LI068j20mfev)95Lo)Zl8~BHSSyo)4@W2ppI#24R|?vXuv-*9if}U-}ahqJXoz z#q`(&sPT#Cn-T6lPHg~|{R22OPks7tnh+qIG&dc%m?QHlPUp=dWiKsJlmv2 zNREhEZgoFka^*x49}z$E@(Ce8dMdR(7lM=#nLn=sTfPOxl>k<~2V4tXLvw^=%Jxyf z%C~_VnwM8oBlo2PQV9cS!}LsBv#Vx-+RI{D!QHa^Ma2o27YO0O^pfQ2|V})Q2Ot{z54AsBHZOLY%{%%@GW-A z5Bx}(QQCmM;Ae=Y?w z34yg;1{8335e$X6ycaO0ps*X>5woR!kGKeTDGWY4k}Dd=xON39x^b z`Ugk2%@HmGeFcVXwhmL9V&@eb&gZRb{b81hV@hUTXHOo`1bR}Xo&`#F7q&oN`C)|H znyY7koj(Gb2YWnSFS%*}ZG<52vFT-A(HmH6vnf-mOu&=EbW4%SP=JInqByZ6(*ZG^ z;Zmd&wqy&B7@34lk^Eja5J3fJyQRRk;lQr(&a+*|jX+kh0{Szv5peidIq;!STcGI_ z>(E?)UYI9b&`nIQ?si150++=e2a0#GlF;Hudp?hl{NReWbBrfIeMa+ga)wj79Ps!J zzTAihMr9W>r_FwX^+y6FgoXs}1xOfRWX_Pm#z4qY3Ml#*P$b&^$%&BoFmAVJqL}H% z0!Npbt#ZtYG>QQVww53GaBMj+wWb{Sz!-AZ!+-e|feFyd{+Yr9ejd0G=w{er7R*`( zDBR97Ek|a?#D%NazUe}ElYrkB3DLRODw$p^#b*~YCQRe5dS4EFDAEaNJWdXR6(FH; z8uu2k>AzwS+<%1(1%L-07Ee?wAYa2s>M!Jo`*VDym^gkC!kYn{Tw^xV`9f!-!s|fg zcg1X`*a&m7m`qOCyaVNtxxoFxHk1S}Krftf7r$WtLj!^0%Tq@qlnCKw;wG zkZ=SJ{!e`U0$lQJULFt(EY}qdNiNOC_2>2iF?>OfpR^c!j^;~%Zr;bx36O9Kr<=~{ z*DjwN3`~UQttY;OD09~mf~+jg-Z_kU*K*;Iu;G??oW;WnJWfQgaar1W6~!u7;WaCC zqJ_n*v0|3=r{QSfhzcjvhtLYp3#+SF^%Y_i%d)P-Cs62p<*E+k6hib0@@}i>zHwEo zJaL|Kd9FZ5gz&8BL`JxxKp!E6ko2*W|nDxT_it~zdDN_M=> z`}CR%hqXY$oo2%;Krbg&2_Xujm1(-fhT>ygTUH@TKE+u|ia&*Y!VC?=K`KK!vEjD{F)fE zh=Y#6r6Zp2amhS?z%#=oW%YUH5*|B`$$~&LRt!F~IK{VJlh=!x5(o1raRumw%Lh|> ztit9=-;VGm(Vnq{D7`uw`*g&t-zo7gYe3|v=n1QE#tSx8$`x+5Um0JMN#g%*F9!)p z8UcEtyT0KIVEcETcw&S^LOd|c(U*Hg#^gd2-ugudjphAbDD3JV%Yi*fD?l$?dYE_I z!b%JMEBhk^2zxtLeaVo67V4WG$ zLy?ZW;GqDRRVw+kC*2vY?2>QnuUKcG{`W@Fhh!!|AFiGEh>%qo>{WG>r;Omq)w1BD zcvsKfEa8SRUwXi0Uj{0_CzNIOu9M^|Kwsz~(qY*n>Q7skWrZ84O?ogD6F=jFciVtK763pm<%;+?k{x1RugVHSYZ9o$5RxzFQq!1XKg zn|C0zshHuRO_#qhkgI0+Y`R3^_78O-p*~K42WB~ZXq=GZ0D@% z35?%C+*txlfcVwU?ehrnw9KJ0SIHDtateSm0L-umQ-gFKn7`&sm$9JuW{6^R$&@=> zfDUk(X5RyItY>z5Qb!y!CB_+(<>~%= z6rc%oZs_oES-&Q}yg6Nd(e|{`ZnDmfu{PRg+#8wAIm|C}sZ1%H1aA0~hOec)vuCov a0RA6J{K<|3^IWX}0000 -

Feedback

- - diff --git a/docs/_templates/page.html b/docs/_templates/page.html deleted file mode 100644 index 24dd5f6a5b..0000000000 --- a/docs/_templates/page.html +++ /dev/null @@ -1,8 +0,0 @@ -{%- extends "!page.html" %} - -{%- block nav_links %} -
  • Home
  • -
  • Forum
  • -
  • GitHub
  • -{%- endblock %} - diff --git a/docs/_templates/sidebarlogo.html b/docs/_templates/sidebarlogo.html deleted file mode 100644 index e3206e77ff..0000000000 --- a/docs/_templates/sidebarlogo.html +++ /dev/null @@ -1,5 +0,0 @@ - diff --git a/docs/_templates/userguide.html b/docs/_templates/userguide.html deleted file mode 100644 index a4947c676e..0000000000 --- a/docs/_templates/userguide.html +++ /dev/null @@ -1,6 +0,0 @@ -
    -

    Getting Started

    -

    First time using AWS ParallelCluster? See the - Getting Started section for - help getting started.

    -
    diff --git a/docs/autoscaling.rst b/docs/autoscaling.rst deleted file mode 100644 index cb6c908462..0000000000 --- a/docs/autoscaling.rst +++ /dev/null @@ -1,77 +0,0 @@ -.. _autoscaling: - -================================ -AWS ParallelCluster Auto Scaling -================================ - -The auto scaling strategy described here applies to HPC clusters deployed with one of the -supported traditional job scheduler, either SGE, Slurm or Torque. -In these cases AWS ParallelCluster directly implements the scaling capabilities by managing -the `Auto Scaling Group`_ (ASG) of the compute nodes and changing the scheduler configuration -accordingly. -For HPC clusters based on AWS Batch, ParallelCluster relies on the elastic scaling capabilities -provided by the AWS-managed job scheduler. - -Clusters deployed with AWS ParallelCluster are elastic in several ways. The first is by -simply setting the ``initial_queue_size`` and ``max_queue_size`` parameters of a cluster -settings. The ``initial_queue_size`` sets the minimum size value of the ComputeFleet ASG and also -the desired capacity value. -The ``max_queue_size`` sets the maximum size value of the ComputeFleet ASG. - -.. image:: images/as-basic-diagram.png - -Scaling Up -========== - -Every minute, a process called jobwatcher_ runs on the master instance and evaluates -the current number of instances required by the pending jobs in the queue. -If the total number of busy nodes and requested nodes is greater than the current desired value in the ASG, -it adds more instances. -If you submit more jobs, the queue will get re-evaluated and the ASG updated up to the ``max_queue_size``. - -With SGE each job requires a number of slots to run (one slot corresponds to one processing unit, e.g. a vCPU). -When evaluating the number of instances required to serve the currently pending jobs, the jobwatcher -divides the total number of requested slots by the capacity of a single compute node. -The capacity of a compute node that is the number of available vCPUs depends on the EC2 instance type selected -in the cluster configuration. - -With Slurm and Torque schedulers each job can require both a number of nodes and a number of slots per node. -The jobwatcher takes into account the request of each job and determines the number of compute nodes to fulfill -the new computational requirements. -For example, assuming a cluster with c5.2xlarge (8 vCPU) as compute instance type, and three queued pending jobs -with the following requirements: job1 2 nodes / 4 slots each, job2 3 nodes / 2 slots and job3 1 node / 4 slots, -the jobwatcher will require three new compute instances to the ASG to serve the three jobs. - -*Current limitation*: the auto scale up logic does not consider partially loaded busy nodes, i.e. each node running -a job is considered busy even if there are empty slots. - -Scaling Down -============ - -On each compute node, a process called nodewatcher_ runs and evaluates the idle time of -the node. If an instance had no jobs for longer than ``scaledown_idletime`` -(which defaults to 10 minutes) and currently there are no pending jobs in the cluster, -the instance is terminated. - -It specifically calls the TerminateInstanceInAutoScalingGroup_ API call, -which will remove an instance as long as the size of the ASG is at least the -minimum ASG size. That handles scale down of the cluster, without -affecting running jobs and also enables an elastic cluster with a fixed base -number of instances. - -Static Cluster -============== - -The value of the auto scaling is the same for HPC as with any other workloads, -the only difference here is AWS ParallelCluster has code to specifically make it interact -in a more intelligent manner. If a static cluster is required, this can be -achieved by setting ``initial_queue_size`` and ``max_queue_size`` parameters to the size -of cluster required and also setting the ``maintain_initial_size`` parameter to -true. This will cause the ComputeFleet ASG to have the same value for minimum, -maximum and desired capacity. - -.. _`Auto Scaling Group`: https://docs.aws.amazon.com/autoscaling/ec2/userguide/what-is-amazon-ec2-auto-scaling.html -.. _nodewatcher: https://github.com/aws/aws-parallelcluster-node/tree/develop/src/nodewatcher -.. _jobwatcher: https://github.com/aws/aws-parallelcluster-node/tree/develop/src/jobwatcher -.. _TerminateInstanceInAutoScalingGroup: - http://docs.aws.amazon.com/AutoScaling/latest/APIReference/API_TerminateInstanceInAutoScalingGroup.html diff --git a/docs/aws_services.rst b/docs/aws_services.rst deleted file mode 100644 index 45c13d2e5c..0000000000 --- a/docs/aws_services.rst +++ /dev/null @@ -1,162 +0,0 @@ -.. _aws_services: - -AWS Services used in AWS ParallelCluster -======================================== - -The following Amazon Web Services (AWS) services are used in AWS ParallelCluster. - -* AWS CloudFormation -* AWS Identity and Access Management (IAM) -* Amazon SNS -* Amazon SQS -* Amazon EC2 -* Auto Scaling -* Amazon EBS -* Amazon S3 -* Amazon DynamoDB - -.. _aws_services_cloudformation: - -AWS CloudFormation ------------------- - -AWS CloudFormation is the core service used by AWS ParallelCluster. Each cluster is represented as a stack. All -resources required by the cluster are defined within the AWS ParallelCluster CloudFormation template. AWS -ParallelCluster CLI commands typically map to CloudFormation stack commands, such as create, update and delete. -Instances launched within a cluster make HTTPS calls to the CloudFormation Endpoint for the region the cluster is -launched in. - -For more details about AWS CloudFormation, see http://aws.amazon.com/cloudformation/ - -AWS Identity and Access Management (IAM) ----------------------------------------- - -AWS IAM is used within AWS ParallelCluster to provide an Amazon EC2 IAM Role for the instances. This role is a least -privileged role specifically created for each cluster. AWS ParallelCluster instances are given access only to the -specific API calls that are required to deploy and manage the cluster. - -With AWS Batch clusters, IAM Roles are also created for the components involved with the Docker image building process -at cluster creation time. -These components include the Lambda functions allowed to add and delete Docker images to/from the ECR repository and to -delete the S3 bucket created for the cluster and CodeBuild project. Then there are roles for the AWS Batch resources, -instance, job. - -For more details about AWS Identity and Access Management, see http://aws.amazon.com/iam/ - -Amazon Simple Notification Service (SNS) ----------------------------------------- - -Amazon SNS is used to receive notifications from Auto Scaling. These events are called life cycle events, and are -generated when an instance launches or terminates in an Autoscaling Group. Within AWS ParallelCluster, the Amazon SNS -topic for the Autoscaling Group is subscribed to an Amazon SQS queue. - -The service is not used with AWS Batch clusters. - -For more details about Amazon SNS, see http://aws.amazon.com/sns/ - -Amazon Simple Queuing Service (SQS) ------------------------------------ - -Amazon SQS is used to hold notifications(messages) from Auto Scaling, sent through Amazon SNS and notifications from -the ComputeFleet instances. This decouples the sending of notifications from the receiving and allows the Master to -handle them through polling. The MasterServer runs Amazon SQSwatcher and polls the queue. AutoScaling and the -ComputeFleet instances post messages to the queue. - -The service is not used with AWS Batch clusters. - -For more details about Amazon SQS, see http://aws.amazon.com/sqs/ - -Amazon EC2 ----------- - -Amazon EC2 provides the compute for AWS ParallelCluster. The MasterServer and ComputeFleet are EC2 instances. Any -instance type that support HVM can be selected. The MasterServer and ComputeFleet can be different instance types and -the ComputeFleet can also be launched as Spot instances. Instance store volumes found on the instances are mounted as a -striped LVM volume. - -For more details about Amazon EC2, see http://aws.amazon.com/ec2/ - -AWS Auto Scaling ----------------- - -AWS Auto Scaling is used to manage the ComputeFleet instances. These instances are managed as an AutoScaling Group and -can either be elastically driven by workload or static and driven by the config. - -The service is not used with AWS Batch clusters. - -For more details about Auto Scaling, see http://aws.amazon.com/autoscaling/ - -Amazon Elastic Block Store (EBS) --------------------------------- - -Amazon EBS provides the persistent storage for the shared volumes. Any EBS settings can be passed through the config. -EBS volumes can either be initialized empty or from an existing EBS snapshot. - -For more details about Amazon EBS, see http://aws.amazon.com/ebs/ - -Amazon S3 ---------- - -Amazon S3 is used to store the AWS ParallelCluster templates. Each region has a bucket with all templates. AWS -ParallelCluster can be configured to allow allow CLI/SDK tools to use S3. - -With an AWS Batch cluster, an S3 bucket in the customer's account is created to store artifacts used by the Docker -image creation and the jobs scripts when submitting jobs from the user's machine. - -For more details, see http://aws.amazon.com/s3/ - -Amazon DynamoDB ---------------- - -Amazon DynamoDB is used to store minimal state of the cluster. The MasterServer tracks provisioned instances in a -DynamoDB table. - -The service is not used with AWS Batch clusters. - -For more details, see http://aws.amazon.com/dynamodb/ - -AWS Batch ---------- -AWS Batch is the AWS managed job scheduler that dynamically provisions the optimal quantity and type of compute -resources (e.g., CPU or memory optimized instances) based on the volume and specific resource requirements of the batch -jobs submitted. With AWS Batch, there is no need to install and manage batch computing software or server clusters that -you use to run your jobs. - -The service is only used with AWS Batch clusters. - -For more details, see https://aws.amazon.com/batch/ - -AWS CodeBuild -------------- -AWS CodeBuild is used to automatically and transparently build Docker images at cluster creation time. - -The service is only used with AWS Batch clusters. - -For more details, see https://aws.amazon.com/codebuild/ - -AWS Lambda ----------- -AWS Lambda service runs the functions that orchestrate the Docker image creation and manage custom cluster resources -cleanup, that are the created Docker images stored in the ECR repository and the S3 bucket for the cluster. - -The service is only used with AWS Batch clusters. - -For more details, see https://aws.amazon.com/lambda/ - -Amazon Elastic Container Registry (ECR) ---------------------------------------- - -Amazon ECR stores the Docker images built at cluster creation time. The Docker images are then used by AWS Batch to run -the containers for the submitted jobs. - -The service is only used with AWS Batch clusters. - -For more details, see https://aws.amazon.com/ecr/ - -Amazon CloudWatch ------------------ -Amazon CloudWatch is used to log Docker image build steps and the standard output and error of the AWS Batch jobs. - -The service is only used with AWS Batch clusters. - -For more details, see https://aws.amazon.com/cloudwatch/ diff --git a/docs/awsbatchcli.rst b/docs/awsbatchcli.rst deleted file mode 100644 index b6c36d6a5b..0000000000 --- a/docs/awsbatchcli.rst +++ /dev/null @@ -1,20 +0,0 @@ -.. _awsbatchcli: - -###################################### -AWS ParallelCluster Batch CLI Commands -###################################### - -The AWS ParallelCluster Batch CLI commands will be automatically installed in the AWS ParallelCluster Master Node -when the selected scheduler is awsbatch. -The CLI uses AWS Batch APIs and permits to submit and manage jobs -and to monitor jobs, queues, hosts, mirroring traditional schedulers commands. - -.. toctree:: - :maxdepth: 1 - - awsbatchcli/awsbsub - awsbatchcli/awsbstat - awsbatchcli/awsbout - awsbatchcli/awsbkill - awsbatchcli/awsbqueues - awsbatchcli/awsbhosts diff --git a/docs/awsbatchcli/awsbhosts.rst b/docs/awsbatchcli/awsbhosts.rst deleted file mode 100644 index fa2a4b2d4a..0000000000 --- a/docs/awsbatchcli/awsbhosts.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _awsbhosts: - -awsbhosts -========= - -.. argparse:: - :filename: ../cli/awsbatch/awsbhosts.py - :func: _get_parser - :prog: awsbhosts diff --git a/docs/awsbatchcli/awsbkill.rst b/docs/awsbatchcli/awsbkill.rst deleted file mode 100644 index 9624671f0c..0000000000 --- a/docs/awsbatchcli/awsbkill.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _awsbkill: - -awsbkill -======== - -.. argparse:: - :filename: ../cli/awsbatch/awsbkill.py - :func: _get_parser - :prog: awsbkill diff --git a/docs/awsbatchcli/awsbout.rst b/docs/awsbatchcli/awsbout.rst deleted file mode 100644 index c33c348e89..0000000000 --- a/docs/awsbatchcli/awsbout.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _awsbout: - -awsbout -======= - -.. argparse:: - :filename: ../cli/awsbatch/awsbout.py - :func: _get_parser - :prog: awsbout diff --git a/docs/awsbatchcli/awsbqueues.rst b/docs/awsbatchcli/awsbqueues.rst deleted file mode 100644 index 0be0ac19fa..0000000000 --- a/docs/awsbatchcli/awsbqueues.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _awsbqueues: - -awsbqueues -========== - -.. argparse:: - :filename: ../cli/awsbatch/awsbqueues.py - :func: _get_parser - :prog: awsbqueues diff --git a/docs/awsbatchcli/awsbstat.rst b/docs/awsbatchcli/awsbstat.rst deleted file mode 100644 index 9f1233b6c9..0000000000 --- a/docs/awsbatchcli/awsbstat.rst +++ /dev/null @@ -1,9 +0,0 @@ -.. _awsbstat: - -awsbstat -======== - -.. argparse:: - :filename: ../cli/awsbatch/awsbstat.py - :func: _get_parser - :prog: awsbstat diff --git a/docs/awsbatchcli/awsbsub.rst b/docs/awsbatchcli/awsbsub.rst deleted file mode 100644 index feb30eb66c..0000000000 --- a/docs/awsbatchcli/awsbsub.rst +++ /dev/null @@ -1,19 +0,0 @@ -.. _awsbsub: - -awsbsub -======= - -.. argparse:: - :filename: ../cli/awsbatch/awsbsub.py - :func: _get_parser - :prog: awsbsub - -.. spelling:: - MiB - TextIOWrapper - env - io - jobId - startedAt - subfolder - utf diff --git a/docs/commands.rst b/docs/commands.rst deleted file mode 100644 index eafeb81998..0000000000 --- a/docs/commands.rst +++ /dev/null @@ -1,23 +0,0 @@ -.. _commands: - -.. toctree:: - :maxdepth: 2 - -################################ -AWS ParallelCluster CLI commands -################################ - -.. argparse:: - :filename: ../cli/pcluster/cli.py - :func: _get_parser - :prog: pcluster - -.. spelling:: - alinux - ami - centos - Ctrl - mycluster - pre - rsa - ubuntu diff --git a/docs/conf.py b/docs/conf.py deleted file mode 100644 index 268636c27c..0000000000 --- a/docs/conf.py +++ /dev/null @@ -1,369 +0,0 @@ -# -*- coding: utf-8 -*- -# -# AWS ParallelCluster documentation build configuration file, created by -# sphinx-quickstart on Wed Nov 5 07:56:13 2014. -# -# This file is execfile()d with the current directory set to its -# containing dir. -# -# Note that not all possible configuration values are present in this -# autogenerated file. -# -# All configuration values have a default; values that are commented out -# serve to show the default. - -import sys, os - -# If extensions (or modules to document with autodoc) are in another directory, -# add these directories to sys.path here. If the directory is relative to the -# documentation root, use os.path.abspath to make it absolute, like shown here. -sys.path.insert(0, os.path.abspath('.')) -sys.path.append(os.path.abspath('../cli')) -sys.path.append(os.path.abspath('../cli/pcluster')) - -# -- General configuration ------------------------------------------------ -# If your documentation needs a minimal Sphinx version, state it here. -#needs_sphinx = '1.0' - -# Add any Sphinx extension module names here, as strings. They can be -# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom -# ones. -extensions = ['sphinxarg.ext', 'sphinxcontrib.spelling'] - -# Add any paths that contain templates here, relative to this directory. -templates_path = ['_templates'] - -# The suffix of source filenames. -source_suffix = '.rst' - -# The encoding of source files. -#source_encoding = 'utf-8-sig' - -# The master toctree document. -master_doc = 'index' - -# General information about the project. -project = u'AWS ParallelCluster' -copyright = u'2014-2018, Amazon Web Services' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# The short X.Y version. -version = '2.4' -# The full version, including alpha/beta/rc tags. -release = '2.4.1' - -# The language for content autogenerated by Sphinx. Refer to documentation -# for a list of supported languages. -#language = None - -# There are two options for replacing |today|: either, you set today to some -# non-false value, then it is used: -#today = '' -# Else, today_fmt is used as the format for a strftime call. -#today_fmt = '%B %d, %Y' - -# List of patterns, relative to source directory, that match files and -# directories to ignore when looking for source files. -exclude_patterns = ['staging', 'README.rst', 'build', 'utils'] - -# The reST default role (used for this markup: `text`) to use for all -# documents. -#default_role = None - -# If true, '()' will be appended to :func: etc. cross-reference text. -#add_function_parentheses = True - -# If true, the current module name will be prepended to all description -# unit titles (such as .. function::). -#add_module_names = True - -# If true, sectionauthor and moduleauthor directives will be shown in the -# output. They are ignored by default. -#show_authors = False - -# The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'guzzle_sphinx_theme.GuzzleStyle' -#pygments_style = 'sphinx' - -# A list of ignored prefixes for module index sorting. -#modindex_common_prefix = [] - -# If true, keep warnings as "system message" paragraphs in the built documents. -#keep_warnings = False - - -# -- Options for HTML output ---------------------------------------------- -#import sphinx_rtd_theme -#html_theme = "sphinx_rtd_theme" -#html_theme_path = [sphinx_rtd_theme.get_html_theme_path()] - -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -#html_theme = 'default' - -# Theme options are theme-specific and customize the look and feel of a theme -# further. For a list of options available for each theme, see the -# documentation. -#html_theme_options = {} - -# Add any paths that contain custom themes here, relative to this directory. -#html_theme_path = [] - -# The name for this set of Sphinx documents. If None, it defaults to -# " v documentation". -html_title = "AWS ParallelCluster %s" % release - -# A shorter title for the navigation bar. Default is the same as html_title. -#html_short_title = None - -# The name of an image file (relative to this directory) to place at the top -# of the sidebar. -#html_logo = None - -# The name of an image file (within the static path) to use as favicon of the -# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 -# pixels large. -#html_favicon = None - -# Add any paths that contain custom static files (such as style sheets) here, -# relative to this directory. They are copied after the builtin static files, -# so a file named "default.css" will overwrite the builtin "default.css". -html_static_path = ['_static'] - -# Add any extra paths that contain custom files (such as robots.txt or -# .htaccess) here, relative to this directory. These files are copied -# directly to the root of the documentation. -#html_extra_path = [] - -# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, -# using the given strftime format. -#html_last_updated_fmt = '%b %d, %Y' - -# If true, SmartyPants will be used to convert quotes and dashes to -# typographically correct entities. -#html_use_smartypants = True - -# Custom sidebar templates, maps document names to template names. -html_sidebars = { - '**': ['sidebarlogo.html', - 'localtoc.html', - 'searchbox.html', - 'feedback.html', - 'userguide.html'] -} - -# Additional templates that should be rendered to pages, maps page names to -# template names. -#html_additional_pages = {} - -# If false, no module index is generated. -#html_domain_indices = True - -# If false, no index is generated. -#html_use_index = True - -# If true, the index is split into individual pages for each letter. -#html_split_index = False - -# If true, links to the reST sources are added to the pages. -#html_show_sourcelink = True - -# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. -#html_show_sphinx = True - -# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. -#html_show_copyright = True - -# If true, an OpenSearch description file will be output, and all pages will -# contain a tag referring to it. The value of this option must be the -# base URL from which the finished HTML is served. -#html_use_opensearch = '' - -# This is the file name suffix for HTML files (e.g. ".xhtml"). -#html_file_suffix = None - -# Output file base name for HTML help builder. -htmlhelp_basename = 'aws-parallelcluster-doc' - -html_translator_class = 'guzzle_sphinx_theme.HTMLTranslator' -p = os.path.abspath('.') -p = os.path.join(p, 'guzzle_sphinx_theme') -html_theme_path = [p] -html_theme = 'guzzle_sphinx_theme' -# Register the theme as an extension to generate a sitemap.xml -extensions.append("guzzle_sphinx_theme") - -html_theme_options = { - # Set the name of the project to appear in the nav menu - "project_nav_name": "AWS ParallelCluster", - # Set your GitHub user and repo to enable GitHub stars links - "github_user": "aws", - "github_repo": "aws-parallelcluster", - # Set to true to bind left and right key events to turn the page - "bind_key_events": False, - # Specify a base_url used to generate sitemap.xml links. If not - # specified, then no sitemap will be built. - "base_url": "https://aws-parallelcluster.readthedocs.io/latest/" -} - - -def setup(app): - app.add_css_file('theme_overrides.css') - app.add_javascript('custom.js') - - -# -- Options for LaTeX output --------------------------------------------- -latex_elements = { -# The paper size ('letterpaper' or 'a4paper'). -#'papersize': 'letterpaper', - -# The font size ('10pt', '11pt' or '12pt'). -#'pointsize': '10pt', - -# Additional stuff for the LaTeX preamble. -#'preamble': '', -} - -# Grouping the document tree into LaTeX files. List of tuples -# (source start file, target name, title, -# author, documentclass [howto, manual, or own class]). -latex_documents = [ - ('index', 'aws-parallelcluster.tex', u'AWS ParallelCluster Documentation', - u'Amazon Web Services', u'Amazon Web Services', 'manual'), -] - -# The name of an image file (relative to this directory) to place at the top of -# the title page. -#latex_logo = None - -# For "manual" documents, if this is true, then toplevel headings are parts, -# not chapters. -#latex_use_parts = False - -# If true, show page references after internal links. -#latex_show_pagerefs = False - -# If true, show URL addresses after external links. -#latex_show_urls = False - -# Documents to append as an appendix to all manuals. -#latex_appendices = [] - -# If false, no module index is generated. -#latex_domain_indices = True - - -# -- Options for manual page output --------------------------------------- - -# One entry per manual page. List of tuples -# (source start file, name, description, authors, manual section). -man_pages = [ - ('index', 'aws-parallelcluster', u'AWS ParallelCluster Documentation', - [u'Amazon Web Services'], 1) -] - -# If true, show URL addresses after external links. -#man_show_urls = False - - -# -- Options for Texinfo output ------------------------------------------- - -# Grouping the document tree into Texinfo files. List of tuples -# (source start file, target name, title, author, -# dir menu entry, description, category) -texinfo_documents = [ - ('index', 'aws-parallelcluster', u'AWS ParallelCluster Documentation', - u'Amazon Web Services', 'aws-parallelcluster', 'One line description of project.', - 'Miscellaneous'), -] - -# Documents to append as an appendix to all manuals. -#texinfo_appendices = [] - -# If false, no module index is generated. -#texinfo_domain_indices = True - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#texinfo_show_urls = 'footnote' - -# If true, do not generate a @detailmenu in the "Top" node's menu. -#texinfo_no_detailmenu = False - - -# -- Options for Epub output ---------------------------------------------- - -# Bibliographic Dublin Core info. -epub_title = u'AWS ParallelCluster' -epub_author = u'Amazon Web Services' -epub_publisher = u'Amazon Web Services' -epub_copyright = u'2014-2018, Amazon Web Services' - -# The basename for the epub file. It defaults to the project name. -#epub_basename = u'aws-parallelcluster' - -# The HTML theme for the epub output. Since the default themes are not optimized -# for small screen space, using the same theme for HTML and epub output is -# usually not wise. This defaults to 'epub', a theme designed to save visual -# space. -#epub_theme = 'epub' - -# The language of the text. It defaults to the language option -# or en if the language is not set. -#epub_language = '' - -# The scheme of the identifier. Typical schemes are ISBN or URL. -#epub_scheme = '' - -# The unique identifier of the text. This can be a ISBN number -# or the project homepage. -#epub_identifier = '' - -# A unique identification for the text. -#epub_uid = '' - -# A tuple containing the cover image and cover page html template filenames. -#epub_cover = () - -# A sequence of (type, uri, title) tuples for the guide element of content.opf. -#epub_guide = () - -# HTML files that should be inserted before the pages created by sphinx. -# The format is a list of tuples containing the path and title. -#epub_pre_files = [] - -# HTML files shat should be inserted after the pages created by sphinx. -# The format is a list of tuples containing the path and title. -#epub_post_files = [] - -# A list of files that should not be packed into the epub file. -epub_exclude_files = ['search.html'] - -# The depth of the table of contents in toc.ncx. -#epub_tocdepth = 3 - -# Allow duplicate toc entries. -#epub_tocdup = True - -# Choose between 'default' and 'includehidden'. -#epub_tocscope = 'default' - -# Fix unsupported image types using the PIL. -#epub_fix_images = False - -# Scale large images. -#epub_max_image_width = 0 - -# How to display URL addresses: 'footnote', 'no', or 'inline'. -#epub_show_urls = 'inline' - -# If false, no index is generated. -#epub_use_index = True - -# linkcheck config -# A list of regular expressions that match URIs that should not be checked when doing a linkcheck build. -linkcheck_ignore = [r'http://x\.x\.x\.x:\d+', r'http://hostname/'] -linkcheck_retries = 3 -linkcheck_workers = 1 diff --git a/docs/configuration.rst b/docs/configuration.rst deleted file mode 100644 index 3ed1f24f34..0000000000 --- a/docs/configuration.rst +++ /dev/null @@ -1,1091 +0,0 @@ -Configuration -============= -.. toctree:: - -ParallelCluster uses the file ``~/.parallelcluster/config`` by default for all configuration parameters. -You can change the location of the config file via the ``--config`` command option or by setting the -``AWS_PCLUSTER_CONFIG_FILE`` environment variable. - -An example configuration file can be found at ``site-packages/aws-parallelcluster/examples/config``. - - -Layout ------- - -Configuration is defined in multiple sections. - -Required sections are "global" and "aws". - -At least one "cluster" and one "subnet" section must be included. - -A section starts with the section name in brackets, followed by parameters and configuration. :: - - [global] - cluster_template = default - update_check = true - sanity_check = true - - -Configuration Options ---------------------- - -global -^^^^^^ -Global configuration options related to pcluster. :: - - [global] - -cluster_template -"""""""""""""""" -Defines the name of the cluster section used for the cluster. - -See the :ref:`Cluster Definition `. :: - - cluster_template = default - -update_check -"""""""""""" -Check for updates to pcluster. :: - - update_check = true - -sanity_check -"""""""""""" -Attempt to validate the existence of the resources defined in parameters. :: - - sanity_check = true - -aws -^^^ -AWS Region section. - -To store credentials, you can use environment variables, IAM roles, or the preferred way, the -`AWS CLI `_ :: - - # Defaults to us-east-1 if not defined in environment or below - aws_region_name = #region - -aliases -^^^^^^^ -Aliases section. - -Customize the `ssh` command here. - -`CFN_USER` is set to the default username for the OS. -`MASTER_IP` is set to the IP address of the master instance. -`ARGS` is set to whatever arguments the user provides after `pcluster ssh cluster_name`. :: - - [aliases] - # This is the aliases section, you can configure - # ssh alias here - ssh = ssh {CFN_USER}@{MASTER_IP} {ARGS} - -.. _cluster_definition: - -cluster -^^^^^^^ -Defines one or more clusters for different job types or workloads. - -Each cluster can have its own individual configuration. - -The format is [cluster ]. :: - - [cluster default] - -key_name -"""""""" -Name of an existing EC2 KeyPair to enable SSH access to the instances. :: - - key_name = mykey - -template_url -"""""""""""" -Defines the path to the CloudFormation template used to create the cluster. - -Updates use the template the stack was created with. - -Defaults to -``https://s3.amazonaws.com/-aws-parallelcluster/templates/aws-parallelcluster-.cfn.json``. :: - - template_url = https://s3.amazonaws.com/us-east-1-aws-parallelcluster/templates/aws-parallelcluster.cfn.json - -compute_instance_type -""""""""""""""""""""" -Defines the EC2 instance type used for the cluster compute nodes. - -If the scheduler is awsbatch, please refer to the Compute Environments creation in the -AWS Batch UI for the list of supported instance types. - -Defaults to t2.micro, ``optimal`` when scheduler is awsbatch :: - - compute_instance_type = t2.micro - -master_instance_type -"""""""""""""""""""" -Defines the EC2 instance type used for the master node. - -Defaults to t2.micro. :: - - master_instance_type = t2.micro - -.. _configuration_initial_queue_size: - -initial_queue_size -"""""""""""""""""" -Set the initial number of EC2 instances to launch as compute nodes in the cluster. - -This setting is applicable only for traditional schedulers (sge, slurm, and torque). - -If the scheduler is awsbatch, use :ref:`min_vcpus `. - -Defaults to 2. :: - - initial_queue_size = 2 - -.. _configuration_max_queue_size: - -max_queue_size -"""""""""""""" -Set the maximum number of EC2 instances that can be launched in the cluster. - -This setting is applicable only for traditional schedulers (sge, slurm, and torque). - -If the scheduler is awsbatch, use :ref:`max_vcpus `. - -Defaults to 10. :: - - max_queue_size = 10 - -maintain_initial_size -""""""""""""""""""""" -Boolean flag to maintain initial size of the Auto Scaling group for traditional schedulers. - -If the scheduler is awsbatch, use :ref:`desired_vcpus `. - -If set to true, the Auto Scaling group will never have fewer members than the value -of initial_queue_size. The cluster can still scale up to the value of max_queue_size. - -If set to false, the Auto Scaling group can scale down to 0 members to prevent resources -from sitting idle when they are not needed. - -Defaults to false. :: - - maintain_initial_size = false - -.. _min_vcpus: - -min_vcpus -""""""""" -If the scheduler is awsbatch, the compute environment will never have fewer than min_vcpus. - -Defaults to 0. :: - - min_vcpus = 0 - -.. _desired_vcpus: - -desired_vcpus -""""""""""""" -If the scheduler is awsbatch, the compute environment will initially have desired_vcpus. - -Defaults to 4. :: - - desired_vcpus = 4 - -.. _max_vcpus: - -max_vcpus -""""""""" -If the scheduler is awsbatch, the compute environment will at most have max_vcpus. - -Defaults to 20. :: - - max_vcpus = 20 - -scheduler -""""""""" -Defines the cluster scheduler. - -Valid options are sge, torque, slurm, or awsbatch. - -If the scheduler is awsbatch, please take a look at the :ref:`networking setup `. - -Defaults to sge. :: - - scheduler = sge - -cluster_type -"""""""""""" -Defines the type of cluster to launch. - -Valid options are ondemand or spot. - -Defaults to ondemand. :: - - cluster_type = ondemand - -spot_price -"""""""""" -If cluster_type is set to spot, you can optionally set the maximum spot price for the -ComputeFleet on traditional schedulers. If you do not specify a value, you are charged the -Spot price, capped at the On-Demand price. - -If the scheduler is awsbatch, use :ref:`spot_bid_percentage `. - -See the `Spot Bid Advisor `_ for assistance finding a bid price that meets your needs. :: - - spot_price = 1.50 - -.. _spot_bid_percentage: - -spot_bid_percentage -""""""""""""""""""" -If awsbatch is the scheduler, this optional parameter is the on-demand bid percentage. - -If unspecified, the current spot market price will be selected, capped at the on-demand price. :: - - spot_bid_percentage = 85 - -.. _custom_ami_section: - -custom_ami -"""""""""" -ID of a Custom AMI to use instead of the default `published AMIs `_. :: - - custom_ami = NONE - -s3_read_resource -"""""""""""""""" -Specify an S3 resource to which AWS ParallelCluster nodes will be granted read-only access. - -For example, 'arn:aws:s3:::my_corporate_bucket/\*' would provide read-only access to all -objects in the my_corporate_bucket bucket. - -See :doc:`working with S3 ` for details on format. - -Defaults to NONE. :: - - s3_read_resource = NONE - -s3_read_write_resource -"""""""""""""""""""""" -Specify an S3 resource to which AWS ParallelCluster nodes will be granted read-write access. - -For example, 'arn:aws:s3:::my_corporate_bucket/Development/\*' would provide read-write -access to all objects in the Development folder of the my_corporate_bucket bucket. - -See :doc:`working with S3 ` for details on format. - -Defaults to NONE. :: - - s3_read_write_resource = NONE - -pre_install -""""""""""" -URL to a preinstall script that is executed before any of the boot_as_* scripts are run. - -When using awsbatch as the scheduler, the preinstall script is only executed on the master node. - -The parameter format can be specified as "http://hostname/path/to/script.sh" or "s3://bucketname/path/to/script.sh". - -Defaults to NONE. :: - - pre_install = NONE - -pre_install_args -"""""""""""""""" -Quoted list of arguments to be passed to the preinstall script. - -Defaults to NONE. :: - - pre_install_args = NONE - -post_install -"""""""""""" -URL to a postinstall script that is executed after all of the boot_as_* scripts are run. - -When using awsbatch as the scheduler, the postinstall script is only executed on the master node. - -Can be specified in "http://hostname/path/to/script.sh" or "s3://bucketname/path/to/script.sh" format. - -Defaults to NONE. :: - - post_install = NONE - -post_install_args -""""""""""""""""" -Arguments to be passed to the postinstall script. - -Defaults to NONE. :: - - post_install_args = NONE - -proxy_server -"""""""""""" -Defines an HTTP(S) proxy server, typically http://x.x.x.x:8080. - -Defaults to NONE. :: - - proxy_server = NONE - -placement_group -""""""""""""""" -Defines the cluster placement group. - -Valid options are NONE, DYNAMIC or an existing EC2 placement group name. - -When DYNAMIC is set, a unique placement group will be created and deleted as part -of the cluster stack. - -This parameter does not apply to awsbatch. - -More information on placement groups can be found `here `_ - -Defaults to NONE. :: - - placement_group = NONE - -placement -""""""""" -Defines the cluster placement group logic. - -This enables the whole cluster or only the compute instances to use the placement group. - -Valid options are ``cluster`` or ``compute``. - -This parameter does not apply to awsbatch. - -Defaults to ``compute``. :: - - placement = compute - -ephemeral_dir -""""""""""""" -If instance store volumes exist, define the path where they will be mounted. - -Defaults to /scratch. :: - - ephemeral_dir = /scratch - -shared_dir -"""""""""" -Defines the path where the shared EBS volume will be mounted. - -Do not use this option with multiple EBS volumes. Provide shared_dir under each EBS section instead. - -See :ref:`EBS Section ` for details on working with multiple EBS volumes. - -Defaults to /shared. - -The example below mounts the shared EBS volume at /myshared. :: - - shared_dir = myshared - -encrypted_ephemeral -""""""""""""""""""" -Encrypt the ephemeral instance store volumes with non-recoverable in-memory keys -using LUKS (Linux Unified Key Setup). - -Please visit https://guardianproject.info/code/luks/ for more information. - -Defaults to false. :: - - encrypted_ephemeral = false - -master_root_volume_size -""""""""""""""""""""""" -MasterServer root volume size in GB. The AMI must support growroot. - -Defaults to 17, min value 17. :: - - master_root_volume_size = 17 - -compute_root_volume_size -"""""""""""""""""""""""" -ComputeFleet root volume size in GB. The AMI must support growroot. - -Defaults to 17, min value 17. :: - - compute_root_volume_size = 17 - -base_os -""""""" -OS type used in the cluster. - -Available options are: alinux, centos6, centos7, ubuntu1404 and ubuntu1604. - -Supported operating systems by region are listed in the table below. Please note -that commercial entails all supported regions including us-east-1, us-west-2, etc.:: - - ============== ====== ============ ============ ============= ============ - region alinux centos6 centos7 ubuntu1404 ubuntu1604 - ============== ====== ============ ============ ============= ============ - commercial True True True True True - govcloud True False False True True - china True False False True True - ============== ====== ============ ============ ============= ============ - -Note: The base_os determines the username used to log into the cluster. - -* CentOS 6 & 7: ``centos`` -* Ubuntu 14.04 LTS & 16.04 LTS: ``ubuntu`` -* Amazon Linux: ``ec2-user`` - -Defaults to alinux. :: - - base_os = alinux - -ec2_iam_role -"""""""""""" -Defines the name of an existing EC2 IAM Role that will be attached to all instances in -the cluster. Note that the given name of a role and its Amazon Resource Name (ARN) are -different, and the latter may not be used as an argument to ec2_iam_role. - -Defaults to NONE. :: - - ec2_iam_role = NONE - -extra_json -"""""""""" -Extra JSON that will be merged into the dna.json used by Chef. - -Defaults to {}. :: - - extra_json = {} - -additional_cfn_template -""""""""""""""""""""""" -Defines an additional CloudFormation template to launch along with the cluster. This -allows for the creation of resources that exist outside of the cluster but are part -of the cluster's life cycle. - -This value must be a HTTP URL to a public template with all parameters provided. - -Defaults to NONE. :: - - additional_cfn_template = NONE - -vpc_settings -"""""""""""" -Settings section for the VPC where the cluster will be deployed. - -See :ref:`VPC Section `. :: - - vpc_settings = public - -ebs_settings -"""""""""""" -Settings section related to the EBS volume mounted on the master instance. When using -multiple EBS volumes, enter these parameters as a comma separated list. - -Up to five (5) additional EBS volumes are supported. - -See :ref:`EBS Section `. :: - - ebs_settings = custom1, custom2, ... - -scaling_settings -"""""""""""""""" -Settings section relating to autoscaling configuration. - -See :ref:`Scaling Section `. :: - - scaling_settings = custom - -efs_settings -"""""""""""" -Settings section relating to EFS filesystem. - -See :ref:`EFS Section `. :: - - efs_settings = customfs - -raid_settings -""""""""""""" -Settings section relating to EBS volume RAID configuration. - -See :ref:`RAID Section `. :: - - raid_settings = rs - -fsx_settings -"""""""""""" -Settings section relating to FSx Lustre configuration. - -See :ref:`FSx Section `. :: - - fsx_settings = fs - -tags -"""" -Defines tags to be used by CloudFormation. - -If command line tags are specified via `--tags`, they will be merged with config tags. - -Command line tags overwrite config tags that have the same key. - -Tags are JSON formatted and should never have quotes outside the curly braces. - -See `AWS CloudFormation Resource Tags Type `_. :: - - tags = {"key" : "value", "key2" : "value2"} - -.. _vpc_section: - -vpc -^^^ -VPC Configuration Settings:: - - [vpc public] - vpc_id = vpc-xxxxxx - master_subnet_id = subnet-xxxxxx - -vpc_id -"""""" -ID of the VPC to provision cluster into. :: - - vpc_id = vpc-xxxxxx - -master_subnet_id -"""""""""""""""" -ID of an existing subnet to provision the Master server into. :: - - master_subnet_id = subnet-xxxxxx - -ssh_from -"""""""" -CIDR-formatted IP range to allow SSH access from. - -This parameter is only used when AWS ParallelCluster creates the security group. - -Defaults to 0.0.0.0/0. :: - - ssh_from = 0.0.0.0/0 - -additional_sg -""""""""""""" -Additional VPC security group Id for all instances. - -Defaults to NONE. :: - - additional_sg = sg-xxxxxx - -compute_subnet_id -""""""""""""""""" -ID of an existing subnet to provision the compute nodes into. - -If the subnet is private, you will need to setup NAT for web access. :: - - compute_subnet_id = subnet-xxxxxx - -compute_subnet_cidr -""""""""""""""""""" -If you want AWS ParallelCluster to create a compute subnet, designate the CIDR block here. :: - - compute_subnet_cidr = 10.0.100.0/24 - -use_public_ips -"""""""""""""" -Defines whether or not to assign public IP addresses to compute instances. - -If true, an Elastic IP will be associated to the Master instance. - -If false, the Master instance will have a Public IP (or not) according to the value -of the "Auto-assign Public IP" subnet configuration parameter. - -.. note:: - This parameter can't be set to false if :code:`compute_subnet_cidr` is specified. - -See :ref:`networking configuration ` for some examples. - -Defaults to true. :: - - use_public_ips = true - -vpc_security_group_id -""""""""""""""""""""" -Use an existing security group for all instances. - -Defaults to NONE. :: - - vpc_security_group_id = sg-xxxxxx - -.. _ebs_section: - -ebs -^^^ -EBS volume configuration settings for the volumes mounted on the master instance and -shared via NFS to the compute nodes. :: - - [ebs custom1] - shared_dir = vol1 - ebs_snapshot_id = snap-xxxxx - volume_type = io1 - volume_iops = 200 - ... - - [ebs custom2] - shared_dir = vol2 - ... - - ... - -shared_dir -"""""""""" -Path where the shared EBS volume will be mounted. - -This parameter is required when using multiple EBS volumes. - -When using one (1) EBS volume, this option will overwrite the shared_dir specified -under the cluster section. The example below mounts to /vol1 :: - - shared_dir = vol1 - -ebs_snapshot_id -""""""""""""""" -Defines the EBS snapshot Id if using a snapshot as the source for the volume. - -Defaults to NONE. :: - - ebs_snapshot_id = snap-xxxxx - -volume_type -""""""""""" -The `EBS volume type `_ of the volume you wish to launch. - -Defaults to gp2. :: - - volume_type = io1 - -volume_size -""""""""""" -Size of volume to be created (if not using a snapshot). - -Defaults to 20GB. :: - - volume_size = 20 - -volume_iops -""""""""""" -Defines the number of IOPS for io1 type volumes. :: - - volume_iops = 200 - -encrypted -""""""""" -Controls if the EBS volume should be encrypted (note: this should *not* be used with snapshots). - -Defaults to false. :: - - encrypted = false - -ebs_kms_key_id -"""""""""""""" -Use a custom KMS Key for encryption. - -This parameter must be used in conjunction with ``encrypted = true`` and needs to -have a custom ``ec2_iam_role``. - -See :ref:`Disk Encryption with a Custom KMS Key `. :: - - ebs_kms_key_id = xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - -ebs_volume_id -""""""""""""" -Defines the volume Id of an existing EBS volume that will be attached to the master instance. - -Defaults to NONE. :: - - ebs_volume_id = vol-xxxxxx - -.. _scaling_section: - -scaling -^^^^^^^ -Settings which define how the compute nodes scale. :: - - [scaling custom] - scaledown_idletime = 10 - -scaledown_idletime -"""""""""""""""""" -Amount of time in minutes without a job after which the compute node will terminate. - -This does not apply to awsbatch. - -Defaults to 10. :: - - scaledown_idletime = 10 - -examples -^^^^^^^^ - -Suppose you want to launch a cluster with the awsbatch scheduler and let batch pick -the optimal instance type, based on your jobs resource needs. - -The following configuration allows a maximum of 40 concurrent vCPUs and scales down -to zero when no jobs have run for 10 minutes. :: - - [global] - update_check = true - sanity_check = true - cluster_template = awsbatch - - [aws] - aws_region_name = [your_aws_region] - - [cluster awsbatch] - scheduler = awsbatch - compute_instance_type = optimal # optional, defaults to optimal - min_vcpus = 0 # optional, defaults to 0 - desired_vcpus = 0 # optional, defaults to 4 - max_vcpus = 40 # optional, defaults to 20 - base_os = alinux # optional, defaults to alinux, controls the base_os of the master instance and the docker image for the compute fleet - key_name = [your_ec2_keypair] - vpc_settings = public - - [vpc public] - master_subnet_id = [your_subnet] - vpc_id = [your_vpc] - -.. spelling:: - alinux - ami - arn - aws - bucketname - centos - cfn - cidr - cli - clustername - dna - ebs - ec - gp - iam - idletime - io - iops - ip - ips - mountpoint - myshared - ondemand - os - postinstall - pre - preinstall - scaledown - sg - sge - slurm - ubuntu - url - vcpus - vpc - -.. _efs_section: - -EFS -^^^ -Defines configuration settings for the EFS mounted on the master and compute instances. :: - - [efs customfs] - shared_dir = efs - encrypted = false - performance_mode = generalPurpose - -shared_dir -"""""""""" -Defines the EFS mount point on the master and compute nodes. - -This parameter is REQUIRED! The EFS section will only be used if shared_dir is specified. - -The example below will mount at /efs. - -Do not use NONE or /NONE as the shared directory.:: - - shared_dir = efs - -encrypted -""""""""" -Defines if the file system will be encrypted. - -Defaults to false. :: - - encrypted = false - -performance_mode -"""""""""""""""" -Defines the Performance Mode of the file system. - -Valid choices are generalPurpose or maxIO (these are case-sensitive). - -We recommend generalPurpose performance mode for most file systems. - -File systems using the maxIO performance mode can scale to higher levels of aggregate -throughput and operations per second with a trade-off of slightly higher latencies for -most file operations. - -This parameter cannot be changed after the file system has been created. - -Defaults to generalPurpose.:: - - performance_mode = generalPurpose - -throughput_mode -""""""""""""""" -Defines the Throughput Mode of the file system. - -Valid options are bursting and provisioned.:: - - throughput_mode = provisioned - -provisioned_throughput -"""""""""""""""""""""" -Defines the provisioned throughput measured in MiB/s. - -This parameter requires setting throughput_mode to provisioned. - -The limit on throughput is 1024 MiB/s. Please contact AWS Support to request a limit increase. - -Valid Range: Min of 0.0.:: - - provisioned_throughput = 1024 - -efs_fs_id -""""""""" -Defines the EFS file system ID for an existing file system. - -Specifying this option will void all other EFS options except for shared_dir. - -config_sanity will only support file systems without a mount target in the stack's -availability zone *or* file systems that have an existing mount target in the stack's -availability zone with inbound and outbound NFS traffic allowed from 0.0.0.0/0. - -The sanity check for validating efs_fs_id requires the IAM role to have the following permissions: - -efs:DescribeMountTargets -efs:DescribeMountTargetSecurityGroups -ec2:DescribeSubnets -ec2:DescribeSecurityGroups - -Please add these permissions to your IAM role or set `sanity_check = false` to avoid errors. - -CAUTION: Having mount target with inbound and outbound NFS traffic allowed from 0.0.0.0/0 -will expose the file system to NFS mounting request from anywhere in the mount target's -availability zone. AWS recommends *not* creating a mount target in the stack's availability -zone and letting us handle this step. If you must have a mount target in the stack's -availability zone, please consider using a custom security group by providing a vpc_security_group_id -option under the vpc section, adding that security group to the mount target, and turning -off config sanity to create the cluster. - -Defaults to NONE.:: - - efs_fs_id = fs-12345 - -.. _raid_section: - -RAID -^^^^ -Defines configuration settings for a RAID array built from a number of identical -EBS volumes. -The RAID drive is mounted on the master node and exported to compute nodes via NFS. :: - - [raid rs] - shared_dir = raid - raid_type = 1 - num_of_raid_volumes = 2 - encrypted = true - -shared_dir -"""""""""" -Defines the mount point for the RAID array on the master and compute nodes. - -The RAID drive will only be created if this parameter is specified. - -The example below will mount the array at /raid. - -Do not use NONE or /NONE as the shared directory.:: - - shared_dir = raid - -raid_type -""""""""" -Defines the RAID type for the RAID array. - -Valid options are RAID 0 or RAID 1. - -For more information on RAID types, see: `RAID info -`_ - -The RAID drive will only be created if this parameter is specified. - -The example below will create a RAID 0 array:: - - raid_type = 0 - -num_of_raid_volumes -""""""""""""""""""" -Defines the number of EBS volumes to assemble the RAID array from. - -Minimum number of volumes = 2. - -Maximum number of volumes = 5. - -Defaults to 2. :: - - num_of_raid_volumes = 2 - -volume_type -""""""""""" -Defines the type of volume to build. - -See: `Volume type `_ for more detail. - -Defaults to gp2. :: - - volume_type = io1 - -volume_size -""""""""""" -Defines the size of volume to be created. - -Defaults to 20GB. :: - - volume_size = 20 - -volume_iops -""""""""""" -Defines the number of IOPS for io1 type volumes. :: - - volume_iops = 500 - -encrypted -""""""""" -Determines if the file system will be encrypted. - -Defaults to false. :: - - encrypted = false - -ebs_kms_key_id -"""""""""""""" -Use a custom KMS Key for encryption. - -This must be used in conjunction with ``encrypted = true`` and must have a custom ``ec2_iam_role``. - -See :ref:`Disk Encryption with a Custom KMS Key `. :: - - ebs_kms_key_id = xxxxxxxx-xxxx-xxxx-xxxx-xxxxxxxxxxxx - - -.. _fsx_section: - -FSx -^^^ -Configuration for an attached FSx Lustre file system. See `FSx CreateFileSystem -`_ for more information. - -FSx Lustre is supported when ``base_os = centos7 | alinux``. - -When using an Amazon Linux ``custom_ami``, the kernel must be >= ``4.14.104-78.84.amzn1.x86_64``. -See `Installing the Lustre Client `_ -for instructions. - -Note FSx is not currently supported when using ``awsbatch`` as a scheduler. - -If using an existing file system, it must be associated to a security group that allows inbound and outbound -TCP traffic from ``0.0.0.0/0`` through port ``988``. This is done by automatically when not using -``vpc_security_group_id``. - -Use an existing FSx file system by specifying ``fsx_fs_id``. :: - - [fsx fs] - shared_dir = /fsx - fsx_fs_id = fs-073c3803dca3e28a6 - -Or create and configure a new file system, with the following parameters :: - - [fsx fs] - shared_dir = /fsx - storage_capacity = 3600 - import_path = s3://bucket - imported_file_chunk_size = 1024 - export_path = s3://bucket/folder - weekly_maintenance_start_time = 1:00:00 - -shared_dir -"""""""""" -**Required** Defines the mount point for the Lustre File system on the master and compute nodes. - -The example below will mount the filesystem at /fsx. - -Do not use NONE or /NONE as the shared directory.:: - - shared_dir = /fsx - -fsx_fs_id -""""""""" -**Optional** Attach an existing FSx File System. - -If this option is specified, all following FSx parameters, such as ``storage_capacity`` are ignored. :: - - fsx_fs_id = fs-073c3803dca3e28a6 - -storage_capacity -"""""""""""""""" -**Optional** The storage capacity of the file system in GiB. - -The storage capacity has a minimum of 3,600 GiB and is provisioned in increments of 3,600 GiB. - -Defaults to 3,600 GiB. :: - - storage_capacity = 3600 - -import_path -""""""""""" -**Optional** S3 Bucket to load data from into the file system. Also serves as the export bucket. See ``export_path``. - -Import occurs on cluster creation, see `Importing Data from your Amazon S3 Bucket -`_ - -If not provided, file system will be empty. :: - - import_path = s3://bucket - -imported_file_chunk_size -"""""""""""""""""""""""" -**Optional** For files imported from a data repository (using ``import_path``), this value determines the stripe count -and maximum amount of data per file (in MiB) stored on a single physical disk. The maximum number of disks that a single -file can be striped across is limited by the total number of disks that make up the file system. - -The chunk size default is 1,024 MiB (1 GiB) and can go as high as 512,000 MiB (500 GiB). -Amazon S3 objects have a maximum size of 5 TB. - -Valid only when using ``import_path``. :: - - imported_file_chunk_size = 1024 - -export_path -""""""""""" -**Optional** The S3 path where the root of your file system is exported. The path **must** be in the same S3 bucket as -the ``import_path`` parameter. - -Defaults to ``s3://import-bucket/FSxLustre[creation-timestamp]`` where ``import-bucket`` is the bucket provided in -``import_path`` parameter. - -Valid only when using ``import_path``. :: - - export_path = s3://bucket/folder - -weekly_maintenance_start_time -""""""""""""""""""""""""""""" -***Optional** Preferred time to perform weekly maintenance, in UTC time zone. - -Format is [day of week]:[hour of day]:[minute of hour]. For example, Monday at Midnight is: :: - - weekly_maintenance_start_time = 1:00:00 - - diff --git a/docs/custom_cookbook.rst b/docs/custom_cookbook.rst deleted file mode 100644 index d17968e31f..0000000000 --- a/docs/custom_cookbook.rst +++ /dev/null @@ -1,32 +0,0 @@ -.. _custom_cookbook: - -################################################ -Setting Up a Custom AWS ParallelCluster Cookbook -################################################ - -.. warning:: - The following are instructions for use a custom version of the AWS ParallelCluster cookbook recipes. - This is an advanced method of customizing AWS ParallelCluster, with many hard to debug pitfalls. - The AWS ParallelCluster team highly recommends using :doc:`pre_post_install` scripts for customization, - as post install hooks are generally easier to debug and more portable across releases of AWS ParallelCluster. - -Steps -===== - -#. Clone the cookbook and make changes :: - - $ git clone https://github.com/aws/aws-parallelcluster-cookbook.git - ... - # Make changes to cookbook - -#. Upload the cookbook, changing ``[your_bucket]`` to a bucket you own :: - - $ cd aws-parallelcluster-cookbook - $ /bin/bash util/uploadCookbook.sh --bucket [your_bucket] --srcdir . - -#. From the output above, add the following variable to the AWS ParallelCluster config file, under the ``[cluster ...]`` section :: - - custom_chef_cookbook = https://s3.amazonaws.com/your_bucket/cookbooks/aws-parallelcluster-cookbook-2.2.1.tgz - -.. spelling:: - md diff --git a/docs/custom_node_package.rst b/docs/custom_node_package.rst deleted file mode 100644 index 87a41cd67c..0000000000 --- a/docs/custom_node_package.rst +++ /dev/null @@ -1,40 +0,0 @@ -.. _custom_node_package: - -#################################################### -Setting Up a Custom AWS ParallelCluster Node Package -#################################################### - -.. warning:: - The following are instructions for use a custom version of the AWS ParallelCluster Node package. - This is an advanced method of customizing AWS ParallelCluster, with many hard to debug pitfalls. - The AWS ParallelCluster team highly recommends using :doc:`pre_post_install` scripts for customization, as post - install hooks are generally easier to debug and more portable across releases of AWS ParallelCluster. - -Steps -===== - -#. Identify the AWS ParallelCluster Node working directory where you have cloned the AWS ParallelCluster Node code :: - - _nodeDir= - -#. Detect the current version of the AWS ParallelCluster Node :: - - _version=$(grep "version = \"" ${_nodeDir}/setup.py |awk '{print $3}' | tr -d \") - -#. Create an archive of the AWS ParallelCluster Node :: - - cd "${_nodeDir}" - _stashName=$(git stash create) - git archive --format tar --prefix="aws-parallelcluster-node-${_version}/" "${_stashName:-HEAD}" | gzip > "aws-parallelcluster-node-${_version}.tgz" - -#. Create an S3 bucket and upload the archive into the bucket, giving public readable permission through a public-read - ACL :: - - _bucket= - aws s3 cp --acl public-read aws-parallelcluster-node-${_version}.tgz s3://${_bucket}/node/aws-parallelcluster-node-${_version}.tgz - - -#. Add the following variable to the AWS ParallelCluster config file, under the `[cluster ...]` section" :: - - extra_json = { "cluster" : { "custom_node_package" : "https://s3..amazonaws.com/${_bucket}/node/aws-parallelcluster-node-${_version}.tgz" } } - diff --git a/docs/development.rst b/docs/development.rst deleted file mode 100644 index 09e8b374dc..0000000000 --- a/docs/development.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. _development: - -Development -########### - -Here you can find guides for getting started with the development of AWS ParallelCluster. - -.. warning:: - The following guides are instructions for use a custom version of the cookbook recipes or a custom AWS - ParallelCluster Node package. - These are advanced method of customizing AWS ParallelCluster, with many hard to debug pitfalls. - The AWS ParallelCluster team highly recommends using :doc:`pre_post_install` scripts for customization, as post - install hooks are generally easier to debug and more portable across releases of AWS ParallelCluster. - -.. toctree:: - - custom_cookbook - custom_node_package diff --git a/docs/functional.rst b/docs/functional.rst deleted file mode 100644 index c2a25a9f70..0000000000 --- a/docs/functional.rst +++ /dev/null @@ -1,13 +0,0 @@ -.. _functional: - -How AWS ParallelCluster Works -############################# - -AWS ParallelCluster was built not only as a way to manage clusters, but as a reference on how to use AWS services to -build your HPC environment - -.. toctree:: - - processes - aws_services - autoscaling diff --git a/docs/getting_started.rst b/docs/getting_started.rst deleted file mode 100644 index 1346dada2d..0000000000 --- a/docs/getting_started.rst +++ /dev/null @@ -1,241 +0,0 @@ -.. _getting_started: - -.. toctree:: - :maxdepth: 2 - -######################################## -Getting started with AWS ParallelCluster -######################################## - -AWS ParallelCluster is an AWS supported Open Source cluster management tool that makes it easy for you to deploy and -manage High Performance Computing (HPC) clusters in the AWS cloud. -Built on the Open Source CfnCluster project, AWS ParallelCluster enables you to quickly build an HPC compute -environment in AWS. -It automatically sets up the required compute resources and a shared filesystem and offers a variety of batch -schedulers such as AWS Batch, SGE, Torque, and Slurm. -AWS ParallelCluster facilitates both quick start proof of concepts (POCs) and production deployments. -You can build higher level workflows, such as a Genomics portal that automates the entire DNA sequencing workflow, on -top of AWS ParallelCluster. - -Installing AWS ParallelCluster -============================== - -The current working version is aws-parallelcluster-|version|. The CLI is written in Python and uses BOTO for AWS -actions. -You can install the CLI with the following commands, depending on your OS. - -Linux/OSX ---------- -:: - - $ sudo pip install aws-parallelcluster - -Windows -------- -Windows support is experimental!! - -Install the following packages: - -* Python3.6 - https://www.python.org/download/ -* pip - https://pip.pypa.io/en/stable/installing/ - -Once installed, you should update the Environment Variables to have the Python install directory and Python Scripts -directory in the PATH, for example: ``C:\Python36-32;C:\Python36-32\Scripts`` - -Now it should be possible to run the following within a command prompt window: - -:: - - C:\> pip install aws-parallelcluster - -Upgrading ---------- - -To upgrade an older version of AWS ParallelCluster, you can use either of the following commands, depending on how it -was originally installed: - -:: - - $ sudo pip install --upgrade aws-parallelcluster - -**Remember when upgrading to check that the existing config is compatible with the latest version installed.** - -.. _getting_started_configuring_parallelcluster: - -Configuring AWS ParallelCluster -=============================== - -First you'll need to setup your IAM credentials, see `AWS CLI `_ -for more information. - -:: - - $ aws configure - AWS Access Key ID [None]: AKIAIOSFODNN7EXAMPLE - AWS Secret Access Key [None]: wJalrXUtnFEMI/K7MDENG/bPxRfiCYEXAMPLEKEY - Default region name [us-east-1]: us-east-1 - Default output format [None]: - -Once installed you will need to setup some initial config. The easiest way to do this is below: - -:: - - $ pcluster configure - -This configure wizard will prompt you for everything you need to create your cluster. -You will first be prompted for your cluster template name, which is the logical name of the template you will create a -cluster from. - -:: - - Cluster Template [mycluster]: - -Now, you will be presented with a list of valid AWS region identifiers. Choose the region in which you'd like your -cluster to run. - -:: - - Acceptable Values for AWS Region ID: - us-east-1 - cn-north-1 - ap-northeast-1 - eu-west-1 - ap-southeast-1 - ap-southeast-2 - us-west-2 - us-gov-west-1 - us-gov-east-1 - us-west-1 - eu-central-1 - sa-east-1 - AWS Region ID []: - -Choose a descriptive name for your VPC. Typically, this will be something like :code:`production` or :code:`test`. - -:: - - VPC Name [myvpc]: - -Next, you will need to choose a key pair that already exists in EC2 in order to log into your master instance. -If you do not already have a key pair, refer to the EC2 documentation on `EC2 Key Pairs -`_. - -:: - - Acceptable Values for Key Name: - keypair1 - keypair-test - production-key - Key Name []: - -Choose the VPC ID into which you'd like your cluster launched. - -:: - - Acceptable Values for VPC ID: - vpc-1kd24879 - vpc-blk4982d - VPC ID []: - -Finally, choose the subnet in which you'd like your master server to run. - -:: - - Acceptable Values for Master Subnet ID: - subnet-9k284a6f - subnet-1k01g357 - subnet-b921nv04 - Master Subnet ID []: - - -Next, a simple cluster launches into a VPC and uses an existing subnet which supports public IP's i.e. the route table -for the subnet is :code:`0.0.0.0/0 => igw-xxxxxx`. -The VPC must have :code:`DNS Resolution = yes` and :code:`DNS Hostnames = yes`. -It should also have DHCP options with the correct :code:`domain-name` for the region, as defined in the docs: `VPC DHCP -Options `_. - -Once all of those settings contain valid values, you can launch the cluster by running the create command: - -:: - - $ pcluster create mycluster - -Once the cluster reaches the "CREATE_COMPLETE" status, you can connect using your normal SSH client/settings. -For more details on connecting to EC2 instances, check the `EC2 User Guide -`_. - - -Moving from CfnCluster to AWS ParallelCluster -============================================= - -AWS ParallelCluster is an enhanced and productized version of CfnCluster. - -If you are a previous CfnCluster user, we encourage you to start using and creating new clusters only with AWS -ParallelCluster. -Although you can still use CfnCluster, it will no longer be developed. - -The main differences between CfnCluster and AWS ParallelCluster are listed below. - -| - -**AWS ParallelCluster CLI manages a different set of clusters** - -Clusters created by :code:`cfncluster` CLI cannot be managed with :code:`pcluster` CLI. -The following commands will no longer work on clusters created by CfnCluster:: - - pcluster list - pcluster update cluster_name - pcluster start cluster_name - pcluster status cluster_name - -You need to use the :code:`cfncluster` CLI to manage your old clusters. - -If you need an old CfnCluster package to manage your old clusters, we recommend you install and use it -from a `Python Virtual Environment `_. - -| - -**Distinct IAM Custom Policies** - -Custom IAM Policies, previously used for CfnCluster cluster creation, cannot be used with AWS ParallelCluster. -If you require custom policies you need to create the new ones by following :ref:`IAM in AWS ParallelCluster ` -guide. - -| - -**Different configuration files** - -The AWS ParallelCluster configuration file resides in the :code:`~/.parallelcluster` folder, unlike the CfnCluster one -that was created in the :code:`~/.cfncluster` folder. - -You can still use your existing configuration file but this needs to be moved from :code:`~/.cfncluster/config` to -:code:`~/.parallelcluster/config`. - -If you use the :code:`extra_json` configuration parameter, it must be changed as described below: - -:code:`extra_json = { "cfncluster" : { } }` - -has been changed to - -:code:`extra_json = { "cluster" : { } }` - -| - -**Ganglia disabled by default** - -Ganglia is disabled by default. -You can enable it by setting the :code:`extra_json` parameter as described below: - -:code:`extra_json = { "cluster" : { "ganglia_enabled" : "yes" } }` - -and changing the Master SG to allow connections to port 80. -The :code:`parallelcluster--MasterSecurityGroup-` Security Group has to be modified by -`adding a new Security Group Rule -`_ -to allow Inbound connection to the port 80 from your Public IP. - -.. spelling:: - aws - wJalrXUtnFEMI - MDENG - bPxRfiCYEXAMPLEKEY diff --git a/docs/guzzle_sphinx_theme/LICENSE b/docs/guzzle_sphinx_theme/LICENSE deleted file mode 100644 index b711d95871..0000000000 --- a/docs/guzzle_sphinx_theme/LICENSE +++ /dev/null @@ -1,19 +0,0 @@ -Copyright (c) 2013 Michael Dowling - -Permission is hereby granted, free of charge, to any person obtaining a copy -of this software and associated documentation files (the "Software"), to deal -in the Software without restriction, including without limitation the rights -to use, copy, modify, merge, publish, distribute, sublicense, and/or sell -copies of the Software, and to permit persons to whom the Software is -furnished to do so, subject to the following conditions: - -The above copyright notice and this permission notice shall be included in -all copies or substantial portions of the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN -THE SOFTWARE. diff --git a/docs/guzzle_sphinx_theme/__init__.py b/docs/guzzle_sphinx_theme/__init__.py deleted file mode 100644 index a5bdd2109f..0000000000 --- a/docs/guzzle_sphinx_theme/__init__.py +++ /dev/null @@ -1,157 +0,0 @@ -"""Sphinx Guzzle theme.""" - -import os -import xml.etree.ElementTree as ET - -from docutils import nodes -from sphinx.locale import admonitionlabels -from sphinx.writers.html import HTMLTranslator as SphinxHTMLTranslator - -from pygments.style import Style -from pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic, Whitespace, Punctuation, Other, Literal - - -def setup(app): - """Setup conntects events to the sitemap builder""" - app.connect('html-page-context', add_html_link) - app.connect('build-finished', create_sitemap) - app.sitemap_links = [] - - -def add_html_link(app, pagename, templatename, context, doctree): - """As each page is built, collect page names for the sitemap""" - base_url = app.config['html_theme_options'].get('base_url', '') - if base_url: - app.sitemap_links.append(base_url + pagename + ".html") - - -def create_sitemap(app, exception): - """Generates the sitemap.xml from the collected HTML page links""" - if (not app.config['html_theme_options'].get('base_url', '') or - exception is not None or - not app.sitemap_links): - return - - filename = app.outdir + "/sitemap.xml" - print("Generating sitemap.xml in %s" % filename) - - root = ET.Element("urlset") - root.set("xmlns", "http://www.sitemaps.org/schemas/sitemap/0.9") - - for link in app.sitemap_links: - url = ET.SubElement(root, "url") - ET.SubElement(url, "loc").text = link - - ET.ElementTree(root).write(filename) - - -def html_theme_path(): - return [os.path.dirname(os.path.abspath(__file__))] - - -class HTMLTranslator(SphinxHTMLTranslator): - """ - Handle translating to bootstrap structure. - """ - def visit_table(self, node, name=''): - """ - Override docutils default table formatter to not include a border - and to use Bootstrap CSS - See: http://sourceforge.net/p/docutils/code/HEAD/tree/trunk/docutils/docutils/writers/html4css1/__init__.py#l1550 - """ - self.context.append(self.compact_p) - self.compact_p = True - classes = ' '.join(['table', 'table-bordered', - self.settings.table_style]).strip() - self.body.append( - self.starttag(node, 'table', CLASS=classes)) - - def depart_table(self, node): - """ - This needs overridin' too - """ - self.compact_p = self.context.pop() - self.body.append('\n') - - -class GuzzleStyle(Style): - background_color = "#f8f8f8" - default_style = "" - - styles = { - # No corresponding class for the following: - #Text: "", # class: '' - Whitespace: "underline #f8f8f8", # class: 'w' - Error: "#a40000 border:#ef2929", # class: 'err' - Other: "#000000", # class 'x' - - Comment: "italic #8f5902", # class: 'c' - Comment.Preproc: "noitalic", # class: 'cp' - - Keyword: "bold #004461", # class: 'k' - Keyword.Constant: "bold #004461", # class: 'kc' - Keyword.Declaration: "bold #004461", # class: 'kd' - Keyword.Namespace: "bold #004461", # class: 'kn' - Keyword.Pseudo: "bold #004461", # class: 'kp' - Keyword.Reserved: "bold #004461", # class: 'kr' - Keyword.Type: "bold #004461", # class: 'kt' - - Operator: "#582800", # class: 'o' - Operator.Word: "bold #004461", # class: 'ow' - like keywords - - Punctuation: "bold #000000", # class: 'p' - - # because special names such as Name.Class, Name.Function, etc. - # are not recognized as such later in the parsing, we choose them - # to look the same as ordinary variables. - Name: "#000000", # class: 'n' - Name.Attribute: "#006EC4", # class: 'na' - to be revised - Name.Builtin: "#004461", # class: 'nb' - Name.Builtin.Pseudo: "#3465a4", # class: 'bp' - Name.Class: "#000000", # class: 'nc' - to be revised - Name.Constant: "#000000", # class: 'no' - to be revised - Name.Decorator: "#888", # class: 'nd' - to be revised - Name.Entity: "#ce5c00", # class: 'ni' - Name.Exception: "bold #cc0000", # class: 'ne' - Name.Function: "#000000", # class: 'nf' - Name.Property: "#000000", # class: 'py' - Name.Label: "#f57900", # class: 'nl' - Name.Namespace: "#000000", # class: 'nn' - to be revised - Name.Other: "#000000", # class: 'nx' - Name.Tag: "bold #004461", # class: 'nt' - like a keyword - Name.Variable: "#000000", # class: 'nv' - to be revised - Name.Variable.Class: "#000000", # class: 'vc' - to be revised - Name.Variable.Global: "#000000", # class: 'vg' - to be revised - Name.Variable.Instance: "#000000", # class: 'vi' - to be revised - - Number: "#990000", # class: 'm' - - Literal: "#000000", # class: 'l' - Literal.Date: "#000000", # class: 'ld' - - String: "#4e9a06", # class: 's' - String.Backtick: "#4e9a06", # class: 'sb' - String.Char: "#4e9a06", # class: 'sc' - String.Doc: "italic #8f5902", # class: 'sd' - like a comment - String.Double: "#4e9a06", # class: 's2' - String.Escape: "#4e9a06", # class: 'se' - String.Heredoc: "#4e9a06", # class: 'sh' - String.Interpol: "#4e9a06", # class: 'si' - String.Other: "#4e9a06", # class: 'sx' - String.Regex: "#4e9a06", # class: 'sr' - String.Single: "#4e9a06", # class: 's1' - String.Symbol: "#4e9a06", # class: 'ss' - - Generic: "#000000", # class: 'g' - Generic.Deleted: "#a40000", # class: 'gd' - Generic.Emph: "italic #000000", # class: 'ge' - Generic.Error: "#ef2929", # class: 'gr' - Generic.Heading: "bold #000080", # class: 'gh' - Generic.Inserted: "#00A000", # class: 'gi' - Generic.Output: "#888", # class: 'go' - Generic.Prompt: "#745334", # class: 'gp' - Generic.Strong: "bold #000000", # class: 'gs' - Generic.Subheading: "bold #800080", # class: 'gu' - Generic.Traceback: "bold #a40000", # class: 'gt' - } diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/comments.html b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/comments.html deleted file mode 100644 index 42a95fb912..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/comments.html +++ /dev/null @@ -1,16 +0,0 @@ -{% if theme_disqus_comments_shortname %} -
    -
    - - - comments powered by Disqus -
    -{% endif %} diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/layout.html b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/layout.html deleted file mode 100644 index 7625d50fdb..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/layout.html +++ /dev/null @@ -1,156 +0,0 @@ -{%- extends "basic/layout.html" %} - -{# Do this so that bootstrap is included before the main css file #} -{%- block htmltitle %} - {{ super() }} - - -{%- endblock %} - -{# Displays the next and previous links both before and after content #} -{% macro render_relations(class='footer-links', add_spacer=0) -%} - {% if prev or next %} -
    - -
    - {% else %} - {% if add_spacer %} -
    - {% endif %} - {% endif %} -{%- endmacro %} - -{%- block extrahead %} - - - {% if theme_touch_icon %} - - {% endif %} - {{ super() }} -{% endblock %} - -{% block header %} - - {{ super() }} - -{% endblock %} - -{%- block content %} - - {%- if pagename == 'index' and theme_index_template %} - {% include theme_index_template %} - {%- else %} -
    - - {%- block top_rel_links %} - {{ render_relations('top-links', 1) }} - {%- endblock %} - - {%- block document_wrapper %} -
    - {%- block sidebar1 %}{{ sidebar() }}{% endblock %} - {%- block document %} -
    -
    -

    Important

    -

    New Docs are available at https://docs.aws.amazon.com/parallelcluster

    -

    All new features, starting with 2.4.0, will be documented there.

    -
    - {% block body %} {% endblock %} -
    - {%- endblock %} - {%- block sidebar2 %}{# Place holder for other sidebar #}{% endblock %} -
    -
    - {%- endblock %} - - {%- block bottom_rel_links %} - {{ render_relations('footer-links') }} - {%- endblock %} - - {%- block comments -%} - {% if theme_disqus_comments_shortname %} -
    - {% include "comments.html" %} -
    - {% endif %} - {%- endblock %} - -
    - {%- endif %} -{%- endblock %} - -{%- block footer %} - - {%- block footer_wrapper %} - - {%- endblock %} - - {%- block ga %} - {%- if theme_google_analytics_account %} - - {%- endif %} - {%- endblock %} - - {% block key_events %} - {% if theme_bind_key_events %} - - {% endif %} - {% endblock %} - -{%- endblock %} - diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/searchbox.html b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/searchbox.html deleted file mode 100644 index 115b88e190..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/searchbox.html +++ /dev/null @@ -1,12 +0,0 @@ -{%- if pagename != "search" %} - - -{%- endif %} diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap-responsive.min.css b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap-responsive.min.css deleted file mode 100644 index d1b7f4b0b8..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap-responsive.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap Responsive v2.3.1 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}@-ms-viewport{width:device-width}.hidden{display:none;visibility:hidden}.visible-phone{display:none!important}.visible-tablet{display:none!important}.hidden-desktop{display:none!important}.visible-desktop{display:inherit!important}@media(min-width:768px) and (max-width:979px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-tablet{display:inherit!important}.hidden-tablet{display:none!important}}@media(max-width:767px){.hidden-desktop{display:inherit!important}.visible-desktop{display:none!important}.visible-phone{display:inherit!important}.hidden-phone{display:none!important}}.visible-print{display:none!important}@media print{.visible-print{display:inherit!important}.hidden-print{display:none!important}}@media(min-width:1200px){.row{margin-left:-30px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:30px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:1170px}.span12{width:1170px}.span11{width:1070px}.span10{width:970px}.span9{width:870px}.span8{width:770px}.span7{width:670px}.span6{width:570px}.span5{width:470px}.span4{width:370px}.span3{width:270px}.span2{width:170px}.span1{width:70px}.offset12{margin-left:1230px}.offset11{margin-left:1130px}.offset10{margin-left:1030px}.offset9{margin-left:930px}.offset8{margin-left:830px}.offset7{margin-left:730px}.offset6{margin-left:630px}.offset5{margin-left:530px}.offset4{margin-left:430px}.offset3{margin-left:330px}.offset2{margin-left:230px}.offset1{margin-left:130px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.564102564102564%;*margin-left:2.5109110747408616%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.564102564102564%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.45299145299145%;*width:91.39979996362975%}.row-fluid .span10{width:82.90598290598291%;*width:82.8527914166212%}.row-fluid .span9{width:74.35897435897436%;*width:74.30578286961266%}.row-fluid .span8{width:65.81196581196582%;*width:65.75877432260411%}.row-fluid .span7{width:57.26495726495726%;*width:57.21176577559556%}.row-fluid .span6{width:48.717948717948715%;*width:48.664757228587014%}.row-fluid .span5{width:40.17094017094017%;*width:40.11774868157847%}.row-fluid .span4{width:31.623931623931625%;*width:31.570740134569924%}.row-fluid .span3{width:23.076923076923077%;*width:23.023731587561375%}.row-fluid .span2{width:14.52991452991453%;*width:14.476723040552828%}.row-fluid .span1{width:5.982905982905983%;*width:5.929714493544281%}.row-fluid .offset12{margin-left:105.12820512820512%;*margin-left:105.02182214948171%}.row-fluid .offset12:first-child{margin-left:102.56410256410257%;*margin-left:102.45771958537915%}.row-fluid .offset11{margin-left:96.58119658119658%;*margin-left:96.47481360247316%}.row-fluid .offset11:first-child{margin-left:94.01709401709402%;*margin-left:93.91071103837061%}.row-fluid .offset10{margin-left:88.03418803418803%;*margin-left:87.92780505546462%}.row-fluid .offset10:first-child{margin-left:85.47008547008548%;*margin-left:85.36370249136206%}.row-fluid .offset9{margin-left:79.48717948717949%;*margin-left:79.38079650845607%}.row-fluid .offset9:first-child{margin-left:76.92307692307693%;*margin-left:76.81669394435352%}.row-fluid .offset8{margin-left:70.94017094017094%;*margin-left:70.83378796144753%}.row-fluid .offset8:first-child{margin-left:68.37606837606839%;*margin-left:68.26968539734497%}.row-fluid .offset7{margin-left:62.393162393162385%;*margin-left:62.28677941443899%}.row-fluid .offset7:first-child{margin-left:59.82905982905982%;*margin-left:59.72267685033642%}.row-fluid .offset6{margin-left:53.84615384615384%;*margin-left:53.739770867430444%}.row-fluid .offset6:first-child{margin-left:51.28205128205128%;*margin-left:51.175668303327875%}.row-fluid .offset5{margin-left:45.299145299145295%;*margin-left:45.1927623204219%}.row-fluid .offset5:first-child{margin-left:42.73504273504273%;*margin-left:42.62865975631933%}.row-fluid .offset4{margin-left:36.75213675213675%;*margin-left:36.645753773413354%}.row-fluid .offset4:first-child{margin-left:34.18803418803419%;*margin-left:34.081651209310785%}.row-fluid .offset3{margin-left:28.205128205128204%;*margin-left:28.0987452264048%}.row-fluid .offset3:first-child{margin-left:25.641025641025642%;*margin-left:25.53464266230224%}.row-fluid .offset2{margin-left:19.65811965811966%;*margin-left:19.551736679396257%}.row-fluid .offset2:first-child{margin-left:17.094017094017094%;*margin-left:16.98763411529369%}.row-fluid .offset1{margin-left:11.11111111111111%;*margin-left:11.004728132387708%}.row-fluid .offset1:first-child{margin-left:8.547008547008547%;*margin-left:8.440625568285142%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:30px}input.span12,textarea.span12,.uneditable-input.span12{width:1156px}input.span11,textarea.span11,.uneditable-input.span11{width:1056px}input.span10,textarea.span10,.uneditable-input.span10{width:956px}input.span9,textarea.span9,.uneditable-input.span9{width:856px}input.span8,textarea.span8,.uneditable-input.span8{width:756px}input.span7,textarea.span7,.uneditable-input.span7{width:656px}input.span6,textarea.span6,.uneditable-input.span6{width:556px}input.span5,textarea.span5,.uneditable-input.span5{width:456px}input.span4,textarea.span4,.uneditable-input.span4{width:356px}input.span3,textarea.span3,.uneditable-input.span3{width:256px}input.span2,textarea.span2,.uneditable-input.span2{width:156px}input.span1,textarea.span1,.uneditable-input.span1{width:56px}.thumbnails{margin-left:-30px}.thumbnails>li{margin-left:30px}.row-fluid .thumbnails{margin-left:0}}@media(min-width:768px) and (max-width:979px){.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:724px}.span12{width:724px}.span11{width:662px}.span10{width:600px}.span9{width:538px}.span8{width:476px}.span7{width:414px}.span6{width:352px}.span5{width:290px}.span4{width:228px}.span3{width:166px}.span2{width:104px}.span1{width:42px}.offset12{margin-left:764px}.offset11{margin-left:702px}.offset10{margin-left:640px}.offset9{margin-left:578px}.offset8{margin-left:516px}.offset7{margin-left:454px}.offset6{margin-left:392px}.offset5{margin-left:330px}.offset4{margin-left:268px}.offset3{margin-left:206px}.offset2{margin-left:144px}.offset1{margin-left:82px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.7624309392265194%;*margin-left:2.709239449864817%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.7624309392265194%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.43646408839778%;*width:91.38327259903608%}.row-fluid .span10{width:82.87292817679558%;*width:82.81973668743387%}.row-fluid .span9{width:74.30939226519337%;*width:74.25620077583166%}.row-fluid .span8{width:65.74585635359117%;*width:65.69266486422946%}.row-fluid .span7{width:57.18232044198895%;*width:57.12912895262725%}.row-fluid .span6{width:48.61878453038674%;*width:48.56559304102504%}.row-fluid .span5{width:40.05524861878453%;*width:40.00205712942283%}.row-fluid .span4{width:31.491712707182323%;*width:31.43852121782062%}.row-fluid .span3{width:22.92817679558011%;*width:22.87498530621841%}.row-fluid .span2{width:14.3646408839779%;*width:14.311449394616199%}.row-fluid .span1{width:5.801104972375691%;*width:5.747913483013988%}.row-fluid .offset12{margin-left:105.52486187845304%;*margin-left:105.41847889972962%}.row-fluid .offset12:first-child{margin-left:102.76243093922652%;*margin-left:102.6560479605031%}.row-fluid .offset11{margin-left:96.96132596685082%;*margin-left:96.8549429881274%}.row-fluid .offset11:first-child{margin-left:94.1988950276243%;*margin-left:94.09251204890089%}.row-fluid .offset10{margin-left:88.39779005524862%;*margin-left:88.2914070765252%}.row-fluid .offset10:first-child{margin-left:85.6353591160221%;*margin-left:85.52897613729868%}.row-fluid .offset9{margin-left:79.8342541436464%;*margin-left:79.72787116492299%}.row-fluid .offset9:first-child{margin-left:77.07182320441989%;*margin-left:76.96544022569647%}.row-fluid .offset8{margin-left:71.2707182320442%;*margin-left:71.16433525332079%}.row-fluid .offset8:first-child{margin-left:68.50828729281768%;*margin-left:68.40190431409427%}.row-fluid .offset7{margin-left:62.70718232044199%;*margin-left:62.600799341718584%}.row-fluid .offset7:first-child{margin-left:59.94475138121547%;*margin-left:59.838368402492065%}.row-fluid .offset6{margin-left:54.14364640883978%;*margin-left:54.037263430116376%}.row-fluid .offset6:first-child{margin-left:51.38121546961326%;*margin-left:51.27483249088986%}.row-fluid .offset5{margin-left:45.58011049723757%;*margin-left:45.47372751851417%}.row-fluid .offset5:first-child{margin-left:42.81767955801105%;*margin-left:42.71129657928765%}.row-fluid .offset4{margin-left:37.01657458563536%;*margin-left:36.91019160691196%}.row-fluid .offset4:first-child{margin-left:34.25414364640884%;*margin-left:34.14776066768544%}.row-fluid .offset3{margin-left:28.45303867403315%;*margin-left:28.346655695309746%}.row-fluid .offset3:first-child{margin-left:25.69060773480663%;*margin-left:25.584224756083227%}.row-fluid .offset2{margin-left:19.88950276243094%;*margin-left:19.783119783707537%}.row-fluid .offset2:first-child{margin-left:17.12707182320442%;*margin-left:17.02068884448102%}.row-fluid .offset1{margin-left:11.32596685082873%;*margin-left:11.219583872105325%}.row-fluid .offset1:first-child{margin-left:8.56353591160221%;*margin-left:8.457152932878806%}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:710px}input.span11,textarea.span11,.uneditable-input.span11{width:648px}input.span10,textarea.span10,.uneditable-input.span10{width:586px}input.span9,textarea.span9,.uneditable-input.span9{width:524px}input.span8,textarea.span8,.uneditable-input.span8{width:462px}input.span7,textarea.span7,.uneditable-input.span7{width:400px}input.span6,textarea.span6,.uneditable-input.span6{width:338px}input.span5,textarea.span5,.uneditable-input.span5{width:276px}input.span4,textarea.span4,.uneditable-input.span4{width:214px}input.span3,textarea.span3,.uneditable-input.span3{width:152px}input.span2,textarea.span2,.uneditable-input.span2{width:90px}input.span1,textarea.span1,.uneditable-input.span1{width:28px}}@media(max-width:767px){body{padding-right:20px;padding-left:20px}.navbar-fixed-top,.navbar-fixed-bottom,.navbar-static-top{margin-right:-20px;margin-left:-20px}.container-fluid{padding:0}.dl-horizontal dt{float:none;width:auto;clear:none;text-align:left}.dl-horizontal dd{margin-left:0}.container{width:auto}.row-fluid{width:100%}.row,.thumbnails{margin-left:0}.thumbnails>li{float:none;margin-left:0}[class*="span"],.uneditable-input[class*="span"],.row-fluid [class*="span"]{display:block;float:none;width:100%;margin-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.span12,.row-fluid .span12{width:100%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="offset"]:first-child{margin-left:0}.input-large,.input-xlarge,.input-xxlarge,input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.input-prepend input,.input-append input,.input-prepend input[class*="span"],.input-append input[class*="span"]{display:inline-block;width:auto}.controls-row [class*="span"]+[class*="span"]{margin-left:0}.modal{position:fixed;top:20px;right:20px;left:20px;width:auto;margin:0}.modal.fade{top:-100px}.modal.fade.in{top:20px}}@media(max-width:480px){.nav-collapse{-webkit-transform:translate3d(0,0,0)}.page-header h1 small{display:block;line-height:20px}input[type="checkbox"],input[type="radio"]{border:1px solid #ccc}.form-horizontal .control-label{float:none;width:auto;padding-top:0;text-align:left}.form-horizontal .controls{margin-left:0}.form-horizontal .control-list{padding-top:0}.form-horizontal .form-actions{padding-right:10px;padding-left:10px}.media .pull-left,.media .pull-right{display:block;float:none;margin-bottom:10px}.media-object{margin-right:0;margin-left:0}.modal{top:10px;right:10px;left:10px}.modal-header .close{padding:10px;margin:-10px}.carousel-caption{position:static}}@media(max-width:979px){body{padding-top:0}.navbar-fixed-top,.navbar-fixed-bottom{position:static}.navbar-fixed-top{margin-bottom:20px}.navbar-fixed-bottom{margin-top:20px}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding:5px}.navbar .container{width:auto;padding:0}.navbar .brand{padding-right:10px;padding-left:10px;margin:0 0 0 -5px}.nav-collapse{clear:both}.nav-collapse .nav{float:none;margin:0 0 10px}.nav-collapse .nav>li{float:none}.nav-collapse .nav>li>a{margin-bottom:2px}.nav-collapse .nav>.divider-vertical{display:none}.nav-collapse .nav .nav-header{color:#777;text-shadow:none}.nav-collapse .nav>li>a,.nav-collapse .dropdown-menu a{padding:9px 15px;font-weight:bold;color:#777;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.nav-collapse .btn{padding:4px 10px 4px;font-weight:normal;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.nav-collapse .dropdown-menu li+li a{margin-bottom:2px}.nav-collapse .nav>li>a:hover,.nav-collapse .nav>li>a:focus,.nav-collapse .dropdown-menu a:hover,.nav-collapse .dropdown-menu a:focus{background-color:#f2f2f2}.navbar-inverse .nav-collapse .nav>li>a,.navbar-inverse .nav-collapse .dropdown-menu a{color:#999}.navbar-inverse .nav-collapse .nav>li>a:hover,.navbar-inverse .nav-collapse .nav>li>a:focus,.navbar-inverse .nav-collapse .dropdown-menu a:hover,.navbar-inverse .nav-collapse .dropdown-menu a:focus{background-color:#111}.nav-collapse.in .btn-group{padding:0;margin-top:5px}.nav-collapse .dropdown-menu{position:static;top:auto;left:auto;display:none;float:none;max-width:none;padding:0;margin:0 15px;background-color:transparent;border:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.nav-collapse .open>.dropdown-menu{display:block}.nav-collapse .dropdown-menu:before,.nav-collapse .dropdown-menu:after{display:none}.nav-collapse .dropdown-menu .divider{display:none}.nav-collapse .nav>li>.dropdown-menu:before,.nav-collapse .nav>li>.dropdown-menu:after{display:none}.nav-collapse .navbar-form,.nav-collapse .navbar-search{float:none;padding:10px 15px;margin:10px 0;border-top:1px solid #f2f2f2;border-bottom:1px solid #f2f2f2;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.1)}.navbar-inverse .nav-collapse .navbar-form,.navbar-inverse .nav-collapse .navbar-search{border-top-color:#111;border-bottom-color:#111}.navbar .nav-collapse .nav.pull-right{float:none;margin-left:0}.nav-collapse,.nav-collapse.collapse{height:0;overflow:hidden}.navbar .btn-navbar{display:block}.navbar-static .navbar-inner{padding-right:10px;padding-left:10px}}@media(min-width:980px){.nav-collapse.collapse{height:auto!important;overflow:visible!important}} diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.css b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.css deleted file mode 100644 index 3fd3b1a78d..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.css +++ /dev/null @@ -1,9 +0,0 @@ -/*! - * Bootstrap v2.3.1 - * - * Copyright 2012 Twitter, Inc - * Licensed under the Apache License v2.0 - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Designed and built with all the love in the world @twitter by @mdo and @fat. - */.clearfix{*zoom:1}.clearfix:before,.clearfix:after{display:table;line-height:0;content:""}.clearfix:after{clear:both}.hide-text{font:0/0 a;color:transparent;text-shadow:none;background-color:transparent;border:0}.input-block-level{display:block;width:100%;min-height:30px;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}article,aside,details,figcaption,figure,footer,header,hgroup,nav,section{display:block}audio,canvas,video{display:inline-block;*display:inline;*zoom:1}audio:not([controls]){display:none}html{font-size:100%;-webkit-text-size-adjust:100%;-ms-text-size-adjust:100%}a:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}a:hover,a:active{outline:0}sub,sup{position:relative;font-size:75%;line-height:0;vertical-align:baseline}sup{top:-0.5em}sub{bottom:-0.25em}img{width:auto\9;height:auto;max-width:100%;vertical-align:middle;border:0;-ms-interpolation-mode:bicubic}#map_canvas img,.google-maps img{max-width:none}button,input,select,textarea{margin:0;font-size:100%;vertical-align:middle}button,input{*overflow:visible;line-height:normal}button::-moz-focus-inner,input::-moz-focus-inner{padding:0;border:0}button,html input[type="button"],input[type="reset"],input[type="submit"]{cursor:pointer;-webkit-appearance:button}label,select,button,input[type="button"],input[type="reset"],input[type="submit"],input[type="radio"],input[type="checkbox"]{cursor:pointer}input[type="search"]{-webkit-box-sizing:content-box;-moz-box-sizing:content-box;box-sizing:content-box;-webkit-appearance:textfield}input[type="search"]::-webkit-search-decoration,input[type="search"]::-webkit-search-cancel-button{-webkit-appearance:none}textarea{overflow:auto;vertical-align:top}@media print{*{color:#000!important;text-shadow:none!important;background:transparent!important;box-shadow:none!important}a,a:visited{text-decoration:underline}a[href]:after{content:" (" attr(href) ")"}abbr[title]:after{content:" (" attr(title) ")"}.ir a:after,a[href^="javascript:"]:after,a[href^="#"]:after{content:""}pre,blockquote{border:1px solid #999;page-break-inside:avoid}thead{display:table-header-group}tr,img{page-break-inside:avoid}img{max-width:100%!important}@page{margin:.5cm}p,h2,h3{orphans:3;widows:3}h2,h3{page-break-after:avoid}}body{margin:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:14px;line-height:20px;color:#333;background-color:#fff}a{color:#08c;text-decoration:none}a:hover,a:focus{color:#005580;text-decoration:underline}.img-rounded{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.img-polaroid{padding:4px;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.1);box-shadow:0 1px 3px rgba(0,0,0,0.1)}.img-circle{-webkit-border-radius:500px;-moz-border-radius:500px;border-radius:500px}.row{margin-left:-20px;*zoom:1}.row:before,.row:after{display:table;line-height:0;content:""}.row:after{clear:both}[class*="span"]{float:left;min-height:1px;margin-left:20px}.container,.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.span12{width:940px}.span11{width:860px}.span10{width:780px}.span9{width:700px}.span8{width:620px}.span7{width:540px}.span6{width:460px}.span5{width:380px}.span4{width:300px}.span3{width:220px}.span2{width:140px}.span1{width:60px}.offset12{margin-left:980px}.offset11{margin-left:900px}.offset10{margin-left:820px}.offset9{margin-left:740px}.offset8{margin-left:660px}.offset7{margin-left:580px}.offset6{margin-left:500px}.offset5{margin-left:420px}.offset4{margin-left:340px}.offset3{margin-left:260px}.offset2{margin-left:180px}.offset1{margin-left:100px}.row-fluid{width:100%;*zoom:1}.row-fluid:before,.row-fluid:after{display:table;line-height:0;content:""}.row-fluid:after{clear:both}.row-fluid [class*="span"]{display:block;float:left;width:100%;min-height:30px;margin-left:2.127659574468085%;*margin-left:2.074468085106383%;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.row-fluid [class*="span"]:first-child{margin-left:0}.row-fluid .controls-row [class*="span"]+[class*="span"]{margin-left:2.127659574468085%}.row-fluid .span12{width:100%;*width:99.94680851063829%}.row-fluid .span11{width:91.48936170212765%;*width:91.43617021276594%}.row-fluid .span10{width:82.97872340425532%;*width:82.92553191489361%}.row-fluid .span9{width:74.46808510638297%;*width:74.41489361702126%}.row-fluid .span8{width:65.95744680851064%;*width:65.90425531914893%}.row-fluid .span7{width:57.44680851063829%;*width:57.39361702127659%}.row-fluid .span6{width:48.93617021276595%;*width:48.88297872340425%}.row-fluid .span5{width:40.42553191489362%;*width:40.37234042553192%}.row-fluid .span4{width:31.914893617021278%;*width:31.861702127659576%}.row-fluid .span3{width:23.404255319148934%;*width:23.351063829787233%}.row-fluid .span2{width:14.893617021276595%;*width:14.840425531914894%}.row-fluid .span1{width:6.382978723404255%;*width:6.329787234042553%}.row-fluid .offset12{margin-left:104.25531914893617%;*margin-left:104.14893617021275%}.row-fluid .offset12:first-child{margin-left:102.12765957446808%;*margin-left:102.02127659574467%}.row-fluid .offset11{margin-left:95.74468085106382%;*margin-left:95.6382978723404%}.row-fluid .offset11:first-child{margin-left:93.61702127659574%;*margin-left:93.51063829787232%}.row-fluid .offset10{margin-left:87.23404255319149%;*margin-left:87.12765957446807%}.row-fluid .offset10:first-child{margin-left:85.1063829787234%;*margin-left:84.99999999999999%}.row-fluid .offset9{margin-left:78.72340425531914%;*margin-left:78.61702127659572%}.row-fluid .offset9:first-child{margin-left:76.59574468085106%;*margin-left:76.48936170212764%}.row-fluid .offset8{margin-left:70.2127659574468%;*margin-left:70.10638297872339%}.row-fluid .offset8:first-child{margin-left:68.08510638297872%;*margin-left:67.9787234042553%}.row-fluid .offset7{margin-left:61.70212765957446%;*margin-left:61.59574468085106%}.row-fluid .offset7:first-child{margin-left:59.574468085106375%;*margin-left:59.46808510638297%}.row-fluid .offset6{margin-left:53.191489361702125%;*margin-left:53.085106382978715%}.row-fluid .offset6:first-child{margin-left:51.063829787234035%;*margin-left:50.95744680851063%}.row-fluid .offset5{margin-left:44.68085106382979%;*margin-left:44.57446808510638%}.row-fluid .offset5:first-child{margin-left:42.5531914893617%;*margin-left:42.4468085106383%}.row-fluid .offset4{margin-left:36.170212765957444%;*margin-left:36.06382978723405%}.row-fluid .offset4:first-child{margin-left:34.04255319148936%;*margin-left:33.93617021276596%}.row-fluid .offset3{margin-left:27.659574468085104%;*margin-left:27.5531914893617%}.row-fluid .offset3:first-child{margin-left:25.53191489361702%;*margin-left:25.425531914893618%}.row-fluid .offset2{margin-left:19.148936170212764%;*margin-left:19.04255319148936%}.row-fluid .offset2:first-child{margin-left:17.02127659574468%;*margin-left:16.914893617021278%}.row-fluid .offset1{margin-left:10.638297872340425%;*margin-left:10.53191489361702%}.row-fluid .offset1:first-child{margin-left:8.51063829787234%;*margin-left:8.404255319148938%}[class*="span"].hide,.row-fluid [class*="span"].hide{display:none}[class*="span"].pull-right,.row-fluid [class*="span"].pull-right{float:right}.container{margin-right:auto;margin-left:auto;*zoom:1}.container:before,.container:after{display:table;line-height:0;content:""}.container:after{clear:both}.container-fluid{padding-right:20px;padding-left:20px;*zoom:1}.container-fluid:before,.container-fluid:after{display:table;line-height:0;content:""}.container-fluid:after{clear:both}p{margin:0 0 10px}.lead{margin-bottom:20px;font-size:21px;font-weight:200;line-height:30px}small{font-size:85%}strong{font-weight:bold}em{font-style:italic}cite{font-style:normal}.muted{color:#999}a.muted:hover,a.muted:focus{color:#808080}.text-warning{color:#c09853}a.text-warning:hover,a.text-warning:focus{color:#a47e3c}.text-error{color:#b94a48}a.text-error:hover,a.text-error:focus{color:#953b39}.text-info{color:#3a87ad}a.text-info:hover,a.text-info:focus{color:#2d6987}.text-success{color:#468847}a.text-success:hover,a.text-success:focus{color:#356635}.text-left{text-align:left}.text-right{text-align:right}.text-center{text-align:center}h1,h2,h3,h4,h5,h6{margin:10px 0;font-family:inherit;font-weight:bold;line-height:20px;color:inherit;text-rendering:optimizelegibility}h1 small,h2 small,h3 small,h4 small,h5 small,h6 small{font-weight:normal;line-height:1;color:#999}h1,h2,h3{line-height:40px}h1{font-size:38.5px}h2{font-size:31.5px}h3{font-size:24.5px}h4{font-size:17.5px}h5{font-size:14px}h6{font-size:11.9px}h1 small{font-size:24.5px}h2 small{font-size:17.5px}h3 small{font-size:14px}h4 small{font-size:14px}.page-header{padding-bottom:9px;margin:20px 0 30px;border-bottom:1px solid #eee}ul,ol{padding:0;margin:0 0 10px 25px}ul ul,ul ol,ol ol,ol ul{margin-bottom:0}li{line-height:20px}ul.unstyled,ol.unstyled{margin-left:0;list-style:none}ul.inline,ol.inline{margin-left:0;list-style:none}ul.inline>li,ol.inline>li{display:inline-block;*display:inline;padding-right:5px;padding-left:5px;*zoom:1}dl{margin-bottom:20px}dt,dd{line-height:20px}dt{font-weight:bold}dd{margin-left:10px}.dl-horizontal{*zoom:1}.dl-horizontal:before,.dl-horizontal:after{display:table;line-height:0;content:""}.dl-horizontal:after{clear:both}.dl-horizontal dt{float:left;width:160px;overflow:hidden;clear:left;text-align:right;text-overflow:ellipsis;white-space:nowrap}.dl-horizontal dd{margin-left:180px}hr{margin:20px 0;border:0;border-top:1px solid #eee;border-bottom:1px solid #fff}abbr[title],abbr[data-original-title]{cursor:help;border-bottom:1px dotted #999}abbr.initialism{font-size:90%;text-transform:uppercase}blockquote{padding:0 0 0 15px;margin:0 0 20px;border-left:5px solid #eee}blockquote p{margin-bottom:0;font-size:15px;line-height:1.25}blockquote small{display:block;line-height:20px;color:#999}blockquote small:before{content:'\2014 \00A0'}blockquote.pull-right{float:right;padding-right:15px;padding-left:0;border-right:5px solid #eee;border-left:0}blockquote.pull-right p,blockquote.pull-right small{text-align:right}blockquote.pull-right small:before{content:''}blockquote.pull-right small:after{content:'\00A0 \2014'}q:before,q:after,blockquote:before,blockquote:after{content:""}address{display:block;margin-bottom:20px;font-style:normal;line-height:20px}code,pre{padding:0 3px 2px;font-family:Monaco,Menlo,Consolas,"Courier New",monospace;font-size:12px;color:#333;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}code{padding:2px 4px;color:#d14;white-space:nowrap;background-color:#f7f7f9;border:1px solid #e1e1e8}pre{display:block;padding:9.5px;margin:0 0 10px;font-size:13px;line-height:20px;word-break:break-all;word-wrap:break-word;white-space:pre;white-space:pre-wrap;background-color:#f5f5f5;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.15);-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}pre.prettyprint{margin-bottom:20px}pre code{padding:0;color:inherit;white-space:pre;white-space:pre-wrap;background-color:transparent;border:0}.pre-scrollable{max-height:340px;overflow-y:scroll}form{margin:0 0 20px}fieldset{padding:0;margin:0;border:0}legend{display:block;width:100%;padding:0;margin-bottom:20px;font-size:21px;line-height:40px;color:#333;border:0;border-bottom:1px solid #e5e5e5}legend small{font-size:15px;color:#999}label,input,button,select,textarea{font-size:14px;font-weight:normal;line-height:20px}input,button,select,textarea{font-family:"Helvetica Neue",Helvetica,Arial,sans-serif}label{display:block;margin-bottom:5px}select,textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{display:inline-block;height:20px;padding:4px 6px;margin-bottom:10px;font-size:14px;line-height:20px;color:#555;vertical-align:middle;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}input,textarea,.uneditable-input{width:206px}textarea{height:auto}textarea,input[type="text"],input[type="password"],input[type="datetime"],input[type="datetime-local"],input[type="date"],input[type="month"],input[type="time"],input[type="week"],input[type="number"],input[type="email"],input[type="url"],input[type="search"],input[type="tel"],input[type="color"],.uneditable-input{background-color:#fff;border:1px solid #ccc;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-webkit-transition:border linear .2s,box-shadow linear .2s;-moz-transition:border linear .2s,box-shadow linear .2s;-o-transition:border linear .2s,box-shadow linear .2s;transition:border linear .2s,box-shadow linear .2s}textarea:focus,input[type="text"]:focus,input[type="password"]:focus,input[type="datetime"]:focus,input[type="datetime-local"]:focus,input[type="date"]:focus,input[type="month"]:focus,input[type="time"]:focus,input[type="week"]:focus,input[type="number"]:focus,input[type="email"]:focus,input[type="url"]:focus,input[type="search"]:focus,input[type="tel"]:focus,input[type="color"]:focus,.uneditable-input:focus{border-color:rgba(82,168,236,0.8);outline:0;outline:thin dotted \9;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 8px rgba(82,168,236,0.6)}input[type="radio"],input[type="checkbox"]{margin:4px 0 0;margin-top:1px \9;*margin-top:0;line-height:normal}input[type="file"],input[type="image"],input[type="submit"],input[type="reset"],input[type="button"],input[type="radio"],input[type="checkbox"]{width:auto}select,input[type="file"]{height:30px;*margin-top:4px;line-height:30px}select{width:220px;background-color:#fff;border:1px solid #ccc}select[multiple],select[size]{height:auto}select:focus,input[type="file"]:focus,input[type="radio"]:focus,input[type="checkbox"]:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.uneditable-input,.uneditable-textarea{color:#999;cursor:not-allowed;background-color:#fcfcfc;border-color:#ccc;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.025);box-shadow:inset 0 1px 2px rgba(0,0,0,0.025)}.uneditable-input{overflow:hidden;white-space:nowrap}.uneditable-textarea{width:auto;height:auto}input:-moz-placeholder,textarea:-moz-placeholder{color:#999}input:-ms-input-placeholder,textarea:-ms-input-placeholder{color:#999}input::-webkit-input-placeholder,textarea::-webkit-input-placeholder{color:#999}.radio,.checkbox{min-height:20px;padding-left:20px}.radio input[type="radio"],.checkbox input[type="checkbox"]{float:left;margin-left:-20px}.controls>.radio:first-child,.controls>.checkbox:first-child{padding-top:5px}.radio.inline,.checkbox.inline{display:inline-block;padding-top:5px;margin-bottom:0;vertical-align:middle}.radio.inline+.radio.inline,.checkbox.inline+.checkbox.inline{margin-left:10px}.input-mini{width:60px}.input-small{width:90px}.input-medium{width:150px}.input-large{width:210px}.input-xlarge{width:270px}.input-xxlarge{width:530px}input[class*="span"],select[class*="span"],textarea[class*="span"],.uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"]{float:none;margin-left:0}.input-append input[class*="span"],.input-append .uneditable-input[class*="span"],.input-prepend input[class*="span"],.input-prepend .uneditable-input[class*="span"],.row-fluid input[class*="span"],.row-fluid select[class*="span"],.row-fluid textarea[class*="span"],.row-fluid .uneditable-input[class*="span"],.row-fluid .input-prepend [class*="span"],.row-fluid .input-append [class*="span"]{display:inline-block}input,textarea,.uneditable-input{margin-left:0}.controls-row [class*="span"]+[class*="span"]{margin-left:20px}input.span12,textarea.span12,.uneditable-input.span12{width:926px}input.span11,textarea.span11,.uneditable-input.span11{width:846px}input.span10,textarea.span10,.uneditable-input.span10{width:766px}input.span9,textarea.span9,.uneditable-input.span9{width:686px}input.span8,textarea.span8,.uneditable-input.span8{width:606px}input.span7,textarea.span7,.uneditable-input.span7{width:526px}input.span6,textarea.span6,.uneditable-input.span6{width:446px}input.span5,textarea.span5,.uneditable-input.span5{width:366px}input.span4,textarea.span4,.uneditable-input.span4{width:286px}input.span3,textarea.span3,.uneditable-input.span3{width:206px}input.span2,textarea.span2,.uneditable-input.span2{width:126px}input.span1,textarea.span1,.uneditable-input.span1{width:46px}.controls-row{*zoom:1}.controls-row:before,.controls-row:after{display:table;line-height:0;content:""}.controls-row:after{clear:both}.controls-row [class*="span"],.row-fluid .controls-row [class*="span"]{float:left}.controls-row .checkbox[class*="span"],.controls-row .radio[class*="span"]{padding-top:5px}input[disabled],select[disabled],textarea[disabled],input[readonly],select[readonly],textarea[readonly]{cursor:not-allowed;background-color:#eee}input[type="radio"][disabled],input[type="checkbox"][disabled],input[type="radio"][readonly],input[type="checkbox"][readonly]{background-color:transparent}.control-group.warning .control-label,.control-group.warning .help-block,.control-group.warning .help-inline{color:#c09853}.control-group.warning .checkbox,.control-group.warning .radio,.control-group.warning input,.control-group.warning select,.control-group.warning textarea{color:#c09853}.control-group.warning input,.control-group.warning select,.control-group.warning textarea{border-color:#c09853;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.warning input:focus,.control-group.warning select:focus,.control-group.warning textarea:focus{border-color:#a47e3c;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #dbc59e}.control-group.warning .input-prepend .add-on,.control-group.warning .input-append .add-on{color:#c09853;background-color:#fcf8e3;border-color:#c09853}.control-group.error .control-label,.control-group.error .help-block,.control-group.error .help-inline{color:#b94a48}.control-group.error .checkbox,.control-group.error .radio,.control-group.error input,.control-group.error select,.control-group.error textarea{color:#b94a48}.control-group.error input,.control-group.error select,.control-group.error textarea{border-color:#b94a48;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.error input:focus,.control-group.error select:focus,.control-group.error textarea:focus{border-color:#953b39;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #d59392}.control-group.error .input-prepend .add-on,.control-group.error .input-append .add-on{color:#b94a48;background-color:#f2dede;border-color:#b94a48}.control-group.success .control-label,.control-group.success .help-block,.control-group.success .help-inline{color:#468847}.control-group.success .checkbox,.control-group.success .radio,.control-group.success input,.control-group.success select,.control-group.success textarea{color:#468847}.control-group.success input,.control-group.success select,.control-group.success textarea{border-color:#468847;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.success input:focus,.control-group.success select:focus,.control-group.success textarea:focus{border-color:#356635;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7aba7b}.control-group.success .input-prepend .add-on,.control-group.success .input-append .add-on{color:#468847;background-color:#dff0d8;border-color:#468847}.control-group.info .control-label,.control-group.info .help-block,.control-group.info .help-inline{color:#3a87ad}.control-group.info .checkbox,.control-group.info .radio,.control-group.info input,.control-group.info select,.control-group.info textarea{color:#3a87ad}.control-group.info input,.control-group.info select,.control-group.info textarea{border-color:#3a87ad;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075);box-shadow:inset 0 1px 1px rgba(0,0,0,0.075)}.control-group.info input:focus,.control-group.info select:focus,.control-group.info textarea:focus{border-color:#2d6987;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3;-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3;box-shadow:inset 0 1px 1px rgba(0,0,0,0.075),0 0 6px #7ab5d3}.control-group.info .input-prepend .add-on,.control-group.info .input-append .add-on{color:#3a87ad;background-color:#d9edf7;border-color:#3a87ad}input:focus:invalid,textarea:focus:invalid,select:focus:invalid{color:#b94a48;border-color:#ee5f5b}input:focus:invalid:focus,textarea:focus:invalid:focus,select:focus:invalid:focus{border-color:#e9322d;-webkit-box-shadow:0 0 6px #f8b9b7;-moz-box-shadow:0 0 6px #f8b9b7;box-shadow:0 0 6px #f8b9b7}.form-actions{padding:19px 20px 20px;margin-top:20px;margin-bottom:20px;background-color:#f5f5f5;border-top:1px solid #e5e5e5;*zoom:1}.form-actions:before,.form-actions:after{display:table;line-height:0;content:""}.form-actions:after{clear:both}.help-block,.help-inline{color:#595959}.help-block{display:block;margin-bottom:10px}.help-inline{display:inline-block;*display:inline;padding-left:5px;vertical-align:middle;*zoom:1}.input-append,.input-prepend{display:inline-block;margin-bottom:10px;font-size:0;white-space:nowrap;vertical-align:middle}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input,.input-append .dropdown-menu,.input-prepend .dropdown-menu,.input-append .popover,.input-prepend .popover{font-size:14px}.input-append input,.input-prepend input,.input-append select,.input-prepend select,.input-append .uneditable-input,.input-prepend .uneditable-input{position:relative;margin-bottom:0;*margin-left:0;vertical-align:top;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-append input:focus,.input-prepend input:focus,.input-append select:focus,.input-prepend select:focus,.input-append .uneditable-input:focus,.input-prepend .uneditable-input:focus{z-index:2}.input-append .add-on,.input-prepend .add-on{display:inline-block;width:auto;height:20px;min-width:16px;padding:4px 5px;font-size:14px;font-weight:normal;line-height:20px;text-align:center;text-shadow:0 1px 0 #fff;background-color:#eee;border:1px solid #ccc}.input-append .add-on,.input-prepend .add-on,.input-append .btn,.input-prepend .btn,.input-append .btn-group>.dropdown-toggle,.input-prepend .btn-group>.dropdown-toggle{vertical-align:top;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-append .active,.input-prepend .active{background-color:#a9dba9;border-color:#46a546}.input-prepend .add-on,.input-prepend .btn{margin-right:-1px}.input-prepend .add-on:first-child,.input-prepend .btn:first-child{-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-append input,.input-append select,.input-append .uneditable-input{-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-append input+.btn-group .btn:last-child,.input-append select+.btn-group .btn:last-child,.input-append .uneditable-input+.btn-group .btn:last-child{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-append .add-on,.input-append .btn,.input-append .btn-group{margin-left:-1px}.input-append .add-on:last-child,.input-append .btn:last-child,.input-append .btn-group:last-child>.dropdown-toggle{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append input,.input-prepend.input-append select,.input-prepend.input-append .uneditable-input{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.input-prepend.input-append input+.btn-group .btn,.input-prepend.input-append select+.btn-group .btn,.input-prepend.input-append .uneditable-input+.btn-group .btn{-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append .add-on:first-child,.input-prepend.input-append .btn:first-child{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.input-prepend.input-append .add-on:last-child,.input-prepend.input-append .btn:last-child{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.input-prepend.input-append .btn-group:first-child{margin-left:0}input.search-query{padding-right:14px;padding-right:4px \9;padding-left:14px;padding-left:4px \9;margin-bottom:0;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.form-search .input-append .search-query,.form-search .input-prepend .search-query{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.form-search .input-append .search-query{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search .input-append .btn{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .search-query{-webkit-border-radius:0 14px 14px 0;-moz-border-radius:0 14px 14px 0;border-radius:0 14px 14px 0}.form-search .input-prepend .btn{-webkit-border-radius:14px 0 0 14px;-moz-border-radius:14px 0 0 14px;border-radius:14px 0 0 14px}.form-search input,.form-inline input,.form-horizontal input,.form-search textarea,.form-inline textarea,.form-horizontal textarea,.form-search select,.form-inline select,.form-horizontal select,.form-search .help-inline,.form-inline .help-inline,.form-horizontal .help-inline,.form-search .uneditable-input,.form-inline .uneditable-input,.form-horizontal .uneditable-input,.form-search .input-prepend,.form-inline .input-prepend,.form-horizontal .input-prepend,.form-search .input-append,.form-inline .input-append,.form-horizontal .input-append{display:inline-block;*display:inline;margin-bottom:0;vertical-align:middle;*zoom:1}.form-search .hide,.form-inline .hide,.form-horizontal .hide{display:none}.form-search label,.form-inline label,.form-search .btn-group,.form-inline .btn-group{display:inline-block}.form-search .input-append,.form-inline .input-append,.form-search .input-prepend,.form-inline .input-prepend{margin-bottom:0}.form-search .radio,.form-search .checkbox,.form-inline .radio,.form-inline .checkbox{padding-left:0;margin-bottom:0;vertical-align:middle}.form-search .radio input[type="radio"],.form-search .checkbox input[type="checkbox"],.form-inline .radio input[type="radio"],.form-inline .checkbox input[type="checkbox"]{float:left;margin-right:3px;margin-left:0}.control-group{margin-bottom:10px}legend+.control-group{margin-top:20px;-webkit-margin-top-collapse:separate}.form-horizontal .control-group{margin-bottom:20px;*zoom:1}.form-horizontal .control-group:before,.form-horizontal .control-group:after{display:table;line-height:0;content:""}.form-horizontal .control-group:after{clear:both}.form-horizontal .control-label{float:left;width:160px;padding-top:5px;text-align:right}.form-horizontal .controls{*display:inline-block;*padding-left:20px;margin-left:180px;*margin-left:0}.form-horizontal .controls:first-child{*padding-left:180px}.form-horizontal .help-block{margin-bottom:0}.form-horizontal input+.help-block,.form-horizontal select+.help-block,.form-horizontal textarea+.help-block,.form-horizontal .uneditable-input+.help-block,.form-horizontal .input-prepend+.help-block,.form-horizontal .input-append+.help-block{margin-top:10px}.form-horizontal .form-actions{padding-left:180px}table{max-width:100%;background-color:transparent;border-collapse:collapse;border-spacing:0}.table{width:100%;margin-bottom:20px}.table th,.table td{padding:8px;line-height:20px;text-align:left;vertical-align:top;border-top:1px solid #ddd}.table th{font-weight:bold}.table thead th{vertical-align:bottom}.table caption+thead tr:first-child th,.table caption+thead tr:first-child td,.table colgroup+thead tr:first-child th,.table colgroup+thead tr:first-child td,.table thead:first-child tr:first-child th,.table thead:first-child tr:first-child td{border-top:0}.table tbody+tbody{border-top:2px solid #ddd}.table .table{background-color:#fff}.table-condensed th,.table-condensed td{padding:4px 5px}.table-bordered{border:1px solid #ddd;border-collapse:separate;*border-collapse:collapse;border-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.table-bordered th,.table-bordered td{border-left:1px solid #ddd}.table-bordered caption+thead tr:first-child th,.table-bordered caption+tbody tr:first-child th,.table-bordered caption+tbody tr:first-child td,.table-bordered colgroup+thead tr:first-child th,.table-bordered colgroup+tbody tr:first-child th,.table-bordered colgroup+tbody tr:first-child td,.table-bordered thead:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child th,.table-bordered tbody:first-child tr:first-child td{border-top:0}.table-bordered thead:first-child tr:first-child>th:first-child,.table-bordered tbody:first-child tr:first-child>td:first-child,.table-bordered tbody:first-child tr:first-child>th:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered thead:first-child tr:first-child>th:last-child,.table-bordered tbody:first-child tr:first-child>td:last-child,.table-bordered tbody:first-child tr:first-child>th:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-bordered thead:last-child tr:last-child>th:first-child,.table-bordered tbody:last-child tr:last-child>td:first-child,.table-bordered tbody:last-child tr:last-child>th:first-child,.table-bordered tfoot:last-child tr:last-child>td:first-child,.table-bordered tfoot:last-child tr:last-child>th:first-child{-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomleft:4px}.table-bordered thead:last-child tr:last-child>th:last-child,.table-bordered tbody:last-child tr:last-child>td:last-child,.table-bordered tbody:last-child tr:last-child>th:last-child,.table-bordered tfoot:last-child tr:last-child>td:last-child,.table-bordered tfoot:last-child tr:last-child>th:last-child{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-bottomright:4px}.table-bordered tfoot+tbody:last-child tr:last-child td:first-child{-webkit-border-bottom-left-radius:0;border-bottom-left-radius:0;-moz-border-radius-bottomleft:0}.table-bordered tfoot+tbody:last-child tr:last-child td:last-child{-webkit-border-bottom-right-radius:0;border-bottom-right-radius:0;-moz-border-radius-bottomright:0}.table-bordered caption+thead tr:first-child th:first-child,.table-bordered caption+tbody tr:first-child td:first-child,.table-bordered colgroup+thead tr:first-child th:first-child,.table-bordered colgroup+tbody tr:first-child td:first-child{-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topleft:4px}.table-bordered caption+thead tr:first-child th:last-child,.table-bordered caption+tbody tr:first-child td:last-child,.table-bordered colgroup+thead tr:first-child th:last-child,.table-bordered colgroup+tbody tr:first-child td:last-child{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-moz-border-radius-topright:4px}.table-striped tbody>tr:nth-child(odd)>td,.table-striped tbody>tr:nth-child(odd)>th{background-color:#f9f9f9}.table-hover tbody tr:hover>td,.table-hover tbody tr:hover>th{background-color:#f5f5f5}table td[class*="span"],table th[class*="span"],.row-fluid table td[class*="span"],.row-fluid table th[class*="span"]{display:table-cell;float:none;margin-left:0}.table td.span1,.table th.span1{float:none;width:44px;margin-left:0}.table td.span2,.table th.span2{float:none;width:124px;margin-left:0}.table td.span3,.table th.span3{float:none;width:204px;margin-left:0}.table td.span4,.table th.span4{float:none;width:284px;margin-left:0}.table td.span5,.table th.span5{float:none;width:364px;margin-left:0}.table td.span6,.table th.span6{float:none;width:444px;margin-left:0}.table td.span7,.table th.span7{float:none;width:524px;margin-left:0}.table td.span8,.table th.span8{float:none;width:604px;margin-left:0}.table td.span9,.table th.span9{float:none;width:684px;margin-left:0}.table td.span10,.table th.span10{float:none;width:764px;margin-left:0}.table td.span11,.table th.span11{float:none;width:844px;margin-left:0}.table td.span12,.table th.span12{float:none;width:924px;margin-left:0}.table tbody tr.success>td{background-color:#dff0d8}.table tbody tr.error>td{background-color:#f2dede}.table tbody tr.warning>td{background-color:#fcf8e3}.table tbody tr.info>td{background-color:#d9edf7}.table-hover tbody tr.success:hover>td{background-color:#d0e9c6}.table-hover tbody tr.error:hover>td{background-color:#ebcccc}.table-hover tbody tr.warning:hover>td{background-color:#faf2cc}.table-hover tbody tr.info:hover>td{background-color:#c4e3f3}[class^="icon-"],[class*=" icon-"]{display:inline-block;width:14px;height:14px;margin-top:1px;*margin-right:.3em;line-height:14px;vertical-align:text-top;background-image:url("../img/glyphicons-halflings.png");background-position:14px 14px;background-repeat:no-repeat}.icon-white,.nav-pills>.active>a>[class^="icon-"],.nav-pills>.active>a>[class*=" icon-"],.nav-list>.active>a>[class^="icon-"],.nav-list>.active>a>[class*=" icon-"],.navbar-inverse .nav>.active>a>[class^="icon-"],.navbar-inverse .nav>.active>a>[class*=" icon-"],.dropdown-menu>li>a:hover>[class^="icon-"],.dropdown-menu>li>a:focus>[class^="icon-"],.dropdown-menu>li>a:hover>[class*=" icon-"],.dropdown-menu>li>a:focus>[class*=" icon-"],.dropdown-menu>.active>a>[class^="icon-"],.dropdown-menu>.active>a>[class*=" icon-"],.dropdown-submenu:hover>a>[class^="icon-"],.dropdown-submenu:focus>a>[class^="icon-"],.dropdown-submenu:hover>a>[class*=" icon-"],.dropdown-submenu:focus>a>[class*=" icon-"]{background-image:url("../img/glyphicons-halflings-white.png")}.icon-glass{background-position:0 0}.icon-music{background-position:-24px 0}.icon-search{background-position:-48px 0}.icon-envelope{background-position:-72px 0}.icon-heart{background-position:-96px 0}.icon-star{background-position:-120px 0}.icon-star-empty{background-position:-144px 0}.icon-user{background-position:-168px 0}.icon-film{background-position:-192px 0}.icon-th-large{background-position:-216px 0}.icon-th{background-position:-240px 0}.icon-th-list{background-position:-264px 0}.icon-ok{background-position:-288px 0}.icon-remove{background-position:-312px 0}.icon-zoom-in{background-position:-336px 0}.icon-zoom-out{background-position:-360px 0}.icon-off{background-position:-384px 0}.icon-signal{background-position:-408px 0}.icon-cog{background-position:-432px 0}.icon-trash{background-position:-456px 0}.icon-home{background-position:0 -24px}.icon-file{background-position:-24px -24px}.icon-time{background-position:-48px -24px}.icon-road{background-position:-72px -24px}.icon-download-alt{background-position:-96px -24px}.icon-download{background-position:-120px -24px}.icon-upload{background-position:-144px -24px}.icon-inbox{background-position:-168px -24px}.icon-play-circle{background-position:-192px -24px}.icon-repeat{background-position:-216px -24px}.icon-refresh{background-position:-240px -24px}.icon-list-alt{background-position:-264px -24px}.icon-lock{background-position:-287px -24px}.icon-flag{background-position:-312px -24px}.icon-headphones{background-position:-336px -24px}.icon-volume-off{background-position:-360px -24px}.icon-volume-down{background-position:-384px -24px}.icon-volume-up{background-position:-408px -24px}.icon-qrcode{background-position:-432px -24px}.icon-barcode{background-position:-456px -24px}.icon-tag{background-position:0 -48px}.icon-tags{background-position:-25px -48px}.icon-book{background-position:-48px -48px}.icon-bookmark{background-position:-72px -48px}.icon-print{background-position:-96px -48px}.icon-camera{background-position:-120px -48px}.icon-font{background-position:-144px -48px}.icon-bold{background-position:-167px -48px}.icon-italic{background-position:-192px -48px}.icon-text-height{background-position:-216px -48px}.icon-text-width{background-position:-240px -48px}.icon-align-left{background-position:-264px -48px}.icon-align-center{background-position:-288px -48px}.icon-align-right{background-position:-312px -48px}.icon-align-justify{background-position:-336px -48px}.icon-list{background-position:-360px -48px}.icon-indent-left{background-position:-384px -48px}.icon-indent-right{background-position:-408px -48px}.icon-facetime-video{background-position:-432px -48px}.icon-picture{background-position:-456px -48px}.icon-pencil{background-position:0 -72px}.icon-map-marker{background-position:-24px -72px}.icon-adjust{background-position:-48px -72px}.icon-tint{background-position:-72px -72px}.icon-edit{background-position:-96px -72px}.icon-share{background-position:-120px -72px}.icon-check{background-position:-144px -72px}.icon-move{background-position:-168px -72px}.icon-step-backward{background-position:-192px -72px}.icon-fast-backward{background-position:-216px -72px}.icon-backward{background-position:-240px -72px}.icon-play{background-position:-264px -72px}.icon-pause{background-position:-288px -72px}.icon-stop{background-position:-312px -72px}.icon-forward{background-position:-336px -72px}.icon-fast-forward{background-position:-360px -72px}.icon-step-forward{background-position:-384px -72px}.icon-eject{background-position:-408px -72px}.icon-chevron-left{background-position:-432px -72px}.icon-chevron-right{background-position:-456px -72px}.icon-plus-sign{background-position:0 -96px}.icon-minus-sign{background-position:-24px -96px}.icon-remove-sign{background-position:-48px -96px}.icon-ok-sign{background-position:-72px -96px}.icon-question-sign{background-position:-96px -96px}.icon-info-sign{background-position:-120px -96px}.icon-screenshot{background-position:-144px -96px}.icon-remove-circle{background-position:-168px -96px}.icon-ok-circle{background-position:-192px -96px}.icon-ban-circle{background-position:-216px -96px}.icon-arrow-left{background-position:-240px -96px}.icon-arrow-right{background-position:-264px -96px}.icon-arrow-up{background-position:-289px -96px}.icon-arrow-down{background-position:-312px -96px}.icon-share-alt{background-position:-336px -96px}.icon-resize-full{background-position:-360px -96px}.icon-resize-small{background-position:-384px -96px}.icon-plus{background-position:-408px -96px}.icon-minus{background-position:-433px -96px}.icon-asterisk{background-position:-456px -96px}.icon-exclamation-sign{background-position:0 -120px}.icon-gift{background-position:-24px -120px}.icon-leaf{background-position:-48px -120px}.icon-fire{background-position:-72px -120px}.icon-eye-open{background-position:-96px -120px}.icon-eye-close{background-position:-120px -120px}.icon-warning-sign{background-position:-144px -120px}.icon-plane{background-position:-168px -120px}.icon-calendar{background-position:-192px -120px}.icon-random{width:16px;background-position:-216px -120px}.icon-comment{background-position:-240px -120px}.icon-magnet{background-position:-264px -120px}.icon-chevron-up{background-position:-288px -120px}.icon-chevron-down{background-position:-313px -119px}.icon-retweet{background-position:-336px -120px}.icon-shopping-cart{background-position:-360px -120px}.icon-folder-close{width:16px;background-position:-384px -120px}.icon-folder-open{width:16px;background-position:-408px -120px}.icon-resize-vertical{background-position:-432px -119px}.icon-resize-horizontal{background-position:-456px -118px}.icon-hdd{background-position:0 -144px}.icon-bullhorn{background-position:-24px -144px}.icon-bell{background-position:-48px -144px}.icon-certificate{background-position:-72px -144px}.icon-thumbs-up{background-position:-96px -144px}.icon-thumbs-down{background-position:-120px -144px}.icon-hand-right{background-position:-144px -144px}.icon-hand-left{background-position:-168px -144px}.icon-hand-up{background-position:-192px -144px}.icon-hand-down{background-position:-216px -144px}.icon-circle-arrow-right{background-position:-240px -144px}.icon-circle-arrow-left{background-position:-264px -144px}.icon-circle-arrow-up{background-position:-288px -144px}.icon-circle-arrow-down{background-position:-312px -144px}.icon-globe{background-position:-336px -144px}.icon-wrench{background-position:-360px -144px}.icon-tasks{background-position:-384px -144px}.icon-filter{background-position:-408px -144px}.icon-briefcase{background-position:-432px -144px}.icon-fullscreen{background-position:-456px -144px}.dropup,.dropdown{position:relative}.dropdown-toggle{*margin-bottom:-3px}.dropdown-toggle:active,.open .dropdown-toggle{outline:0}.caret{display:inline-block;width:0;height:0;vertical-align:top;border-top:4px solid #000;border-right:4px solid transparent;border-left:4px solid transparent;content:""}.dropdown .caret{margin-top:8px;margin-left:2px}.dropdown-menu{position:absolute;top:100%;left:0;z-index:1000;display:none;float:left;min-width:160px;padding:5px 0;margin:2px 0 0;list-style:none;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);*border-right-width:2px;*border-bottom-width:2px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.dropdown-menu.pull-right{right:0;left:auto}.dropdown-menu .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.dropdown-menu>li>a{display:block;padding:3px 20px;clear:both;font-weight:normal;line-height:20px;color:#333;white-space:nowrap}.dropdown-menu>li>a:hover,.dropdown-menu>li>a:focus,.dropdown-submenu:hover>a,.dropdown-submenu:focus>a{color:#fff;text-decoration:none;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu>.active>a,.dropdown-menu>.active>a:hover,.dropdown-menu>.active>a:focus{color:#fff;text-decoration:none;background-color:#0081c2;background-image:-moz-linear-gradient(top,#08c,#0077b3);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#0077b3));background-image:-webkit-linear-gradient(top,#08c,#0077b3);background-image:-o-linear-gradient(top,#08c,#0077b3);background-image:linear-gradient(to bottom,#08c,#0077b3);background-repeat:repeat-x;outline:0;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0077b3',GradientType=0)}.dropdown-menu>.disabled>a,.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{color:#999}.dropdown-menu>.disabled>a:hover,.dropdown-menu>.disabled>a:focus{text-decoration:none;cursor:default;background-color:transparent;background-image:none;filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.open{*z-index:1000}.open>.dropdown-menu{display:block}.pull-right>.dropdown-menu{right:0;left:auto}.dropup .caret,.navbar-fixed-bottom .dropdown .caret{border-top:0;border-bottom:4px solid #000;content:""}.dropup .dropdown-menu,.navbar-fixed-bottom .dropdown .dropdown-menu{top:auto;bottom:100%;margin-bottom:1px}.dropdown-submenu{position:relative}.dropdown-submenu>.dropdown-menu{top:0;left:100%;margin-top:-6px;margin-left:-1px;-webkit-border-radius:0 6px 6px 6px;-moz-border-radius:0 6px 6px 6px;border-radius:0 6px 6px 6px}.dropdown-submenu:hover>.dropdown-menu{display:block}.dropup .dropdown-submenu>.dropdown-menu{top:auto;bottom:0;margin-top:0;margin-bottom:-2px;-webkit-border-radius:5px 5px 5px 0;-moz-border-radius:5px 5px 5px 0;border-radius:5px 5px 5px 0}.dropdown-submenu>a:after{display:block;float:right;width:0;height:0;margin-top:5px;margin-right:-10px;border-color:transparent;border-left-color:#ccc;border-style:solid;border-width:5px 0 5px 5px;content:" "}.dropdown-submenu:hover>a:after{border-left-color:#fff}.dropdown-submenu.pull-left{float:none}.dropdown-submenu.pull-left>.dropdown-menu{left:-100%;margin-left:10px;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.dropdown .dropdown-menu .nav-header{padding-right:20px;padding-left:20px}.typeahead{z-index:1051;margin-top:2px;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.well{min-height:20px;padding:19px;margin-bottom:20px;background-color:#f5f5f5;border:1px solid #e3e3e3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 1px rgba(0,0,0,0.05);box-shadow:inset 0 1px 1px rgba(0,0,0,0.05)}.well blockquote{border-color:#ddd;border-color:rgba(0,0,0,0.15)}.well-large{padding:24px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.well-small{padding:9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.fade{opacity:0;-webkit-transition:opacity .15s linear;-moz-transition:opacity .15s linear;-o-transition:opacity .15s linear;transition:opacity .15s linear}.fade.in{opacity:1}.collapse{position:relative;height:0;overflow:hidden;-webkit-transition:height .35s ease;-moz-transition:height .35s ease;-o-transition:height .35s ease;transition:height .35s ease}.collapse.in{height:auto}.close{float:right;font-size:20px;font-weight:bold;line-height:20px;color:#000;text-shadow:0 1px 0 #fff;opacity:.2;filter:alpha(opacity=20)}.close:hover,.close:focus{color:#000;text-decoration:none;cursor:pointer;opacity:.4;filter:alpha(opacity=40)}button.close{padding:0;cursor:pointer;background:transparent;border:0;-webkit-appearance:none}.btn{display:inline-block;*display:inline;padding:4px 12px;margin-bottom:0;*margin-left:.3em;font-size:14px;line-height:20px;color:#333;text-align:center;text-shadow:0 1px 1px rgba(255,255,255,0.75);vertical-align:middle;cursor:pointer;background-color:#f5f5f5;*background-color:#e6e6e6;background-image:-moz-linear-gradient(top,#fff,#e6e6e6);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#e6e6e6));background-image:-webkit-linear-gradient(top,#fff,#e6e6e6);background-image:-o-linear-gradient(top,#fff,#e6e6e6);background-image:linear-gradient(to bottom,#fff,#e6e6e6);background-repeat:repeat-x;border:1px solid #ccc;*border:0;border-color:#e6e6e6 #e6e6e6 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);border-bottom-color:#b3b3b3;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#ffe6e6e6',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);*zoom:1;-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn:hover,.btn:focus,.btn:active,.btn.active,.btn.disabled,.btn[disabled]{color:#333;background-color:#e6e6e6;*background-color:#d9d9d9}.btn:active,.btn.active{background-color:#ccc \9}.btn:first-child{*margin-left:0}.btn:hover,.btn:focus{color:#333;text-decoration:none;background-position:0 -15px;-webkit-transition:background-position .1s linear;-moz-transition:background-position .1s linear;-o-transition:background-position .1s linear;transition:background-position .1s linear}.btn:focus{outline:thin dotted #333;outline:5px auto -webkit-focus-ring-color;outline-offset:-2px}.btn.active,.btn:active{background-image:none;outline:0;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn.disabled,.btn[disabled]{cursor:default;background-image:none;opacity:.65;filter:alpha(opacity=65);-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-large{padding:11px 19px;font-size:17.5px;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.btn-large [class^="icon-"],.btn-large [class*=" icon-"]{margin-top:4px}.btn-small{padding:2px 10px;font-size:11.9px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.btn-small [class^="icon-"],.btn-small [class*=" icon-"]{margin-top:0}.btn-mini [class^="icon-"],.btn-mini [class*=" icon-"]{margin-top:-1px}.btn-mini{padding:0 6px;font-size:10.5px;-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.btn-block{display:block;width:100%;padding-right:0;padding-left:0;-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box}.btn-block+.btn-block{margin-top:5px}input[type="submit"].btn-block,input[type="reset"].btn-block,input[type="button"].btn-block{width:100%}.btn-primary.active,.btn-warning.active,.btn-danger.active,.btn-success.active,.btn-info.active,.btn-inverse.active{color:rgba(255,255,255,0.75)}.btn-primary{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#006dcc;*background-color:#04c;background-image:-moz-linear-gradient(top,#08c,#04c);background-image:-webkit-gradient(linear,0 0,0 100%,from(#08c),to(#04c));background-image:-webkit-linear-gradient(top,#08c,#04c);background-image:-o-linear-gradient(top,#08c,#04c);background-image:linear-gradient(to bottom,#08c,#04c);background-repeat:repeat-x;border-color:#04c #04c #002a80;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff0088cc',endColorstr='#ff0044cc',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-primary:hover,.btn-primary:focus,.btn-primary:active,.btn-primary.active,.btn-primary.disabled,.btn-primary[disabled]{color:#fff;background-color:#04c;*background-color:#003bb3}.btn-primary:active,.btn-primary.active{background-color:#039 \9}.btn-warning{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#faa732;*background-color:#f89406;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;border-color:#f89406 #f89406 #ad6704;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-warning:hover,.btn-warning:focus,.btn-warning:active,.btn-warning.active,.btn-warning.disabled,.btn-warning[disabled]{color:#fff;background-color:#f89406;*background-color:#df8505}.btn-warning:active,.btn-warning.active{background-color:#c67605 \9}.btn-danger{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#da4f49;*background-color:#bd362f;background-image:-moz-linear-gradient(top,#ee5f5b,#bd362f);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#bd362f));background-image:-webkit-linear-gradient(top,#ee5f5b,#bd362f);background-image:-o-linear-gradient(top,#ee5f5b,#bd362f);background-image:linear-gradient(to bottom,#ee5f5b,#bd362f);background-repeat:repeat-x;border-color:#bd362f #bd362f #802420;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffbd362f',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-danger:hover,.btn-danger:focus,.btn-danger:active,.btn-danger.active,.btn-danger.disabled,.btn-danger[disabled]{color:#fff;background-color:#bd362f;*background-color:#a9302a}.btn-danger:active,.btn-danger.active{background-color:#942a25 \9}.btn-success{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#5bb75b;*background-color:#51a351;background-image:-moz-linear-gradient(top,#62c462,#51a351);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#51a351));background-image:-webkit-linear-gradient(top,#62c462,#51a351);background-image:-o-linear-gradient(top,#62c462,#51a351);background-image:linear-gradient(to bottom,#62c462,#51a351);background-repeat:repeat-x;border-color:#51a351 #51a351 #387038;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff51a351',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-success:hover,.btn-success:focus,.btn-success:active,.btn-success.active,.btn-success.disabled,.btn-success[disabled]{color:#fff;background-color:#51a351;*background-color:#499249}.btn-success:active,.btn-success.active{background-color:#408140 \9}.btn-info{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#49afcd;*background-color:#2f96b4;background-image:-moz-linear-gradient(top,#5bc0de,#2f96b4);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#2f96b4));background-image:-webkit-linear-gradient(top,#5bc0de,#2f96b4);background-image:-o-linear-gradient(top,#5bc0de,#2f96b4);background-image:linear-gradient(to bottom,#5bc0de,#2f96b4);background-repeat:repeat-x;border-color:#2f96b4 #2f96b4 #1f6377;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff2f96b4',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-info:hover,.btn-info:focus,.btn-info:active,.btn-info.active,.btn-info.disabled,.btn-info[disabled]{color:#fff;background-color:#2f96b4;*background-color:#2a85a0}.btn-info:active,.btn-info.active{background-color:#24748c \9}.btn-inverse{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#363636;*background-color:#222;background-image:-moz-linear-gradient(top,#444,#222);background-image:-webkit-gradient(linear,0 0,0 100%,from(#444),to(#222));background-image:-webkit-linear-gradient(top,#444,#222);background-image:-o-linear-gradient(top,#444,#222);background-image:linear-gradient(to bottom,#444,#222);background-repeat:repeat-x;border-color:#222 #222 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff444444',endColorstr='#ff222222',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.btn-inverse:hover,.btn-inverse:focus,.btn-inverse:active,.btn-inverse.active,.btn-inverse.disabled,.btn-inverse[disabled]{color:#fff;background-color:#222;*background-color:#151515}.btn-inverse:active,.btn-inverse.active{background-color:#080808 \9}button.btn,input[type="submit"].btn{*padding-top:3px;*padding-bottom:3px}button.btn::-moz-focus-inner,input[type="submit"].btn::-moz-focus-inner{padding:0;border:0}button.btn.btn-large,input[type="submit"].btn.btn-large{*padding-top:7px;*padding-bottom:7px}button.btn.btn-small,input[type="submit"].btn.btn-small{*padding-top:3px;*padding-bottom:3px}button.btn.btn-mini,input[type="submit"].btn.btn-mini{*padding-top:1px;*padding-bottom:1px}.btn-link,.btn-link:active,.btn-link[disabled]{background-color:transparent;background-image:none;-webkit-box-shadow:none;-moz-box-shadow:none;box-shadow:none}.btn-link{color:#08c;cursor:pointer;border-color:transparent;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-link:hover,.btn-link:focus{color:#005580;text-decoration:underline;background-color:transparent}.btn-link[disabled]:hover,.btn-link[disabled]:focus{color:#333;text-decoration:none}.btn-group{position:relative;display:inline-block;*display:inline;*margin-left:.3em;font-size:0;white-space:nowrap;vertical-align:middle;*zoom:1}.btn-group:first-child{*margin-left:0}.btn-group+.btn-group{margin-left:5px}.btn-toolbar{margin-top:10px;margin-bottom:10px;font-size:0}.btn-toolbar>.btn+.btn,.btn-toolbar>.btn-group+.btn,.btn-toolbar>.btn+.btn-group{margin-left:5px}.btn-group>.btn{position:relative;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group>.btn+.btn{margin-left:-1px}.btn-group>.btn,.btn-group>.dropdown-menu,.btn-group>.popover{font-size:14px}.btn-group>.btn-mini{font-size:10.5px}.btn-group>.btn-small{font-size:11.9px}.btn-group>.btn-large{font-size:17.5px}.btn-group>.btn:first-child{margin-left:0;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.btn-group>.btn:last-child,.btn-group>.dropdown-toggle{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.btn-group>.btn.large:first-child{margin-left:0;-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.btn-group>.btn.large:last-child,.btn-group>.large.dropdown-toggle{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.btn-group>.btn:hover,.btn-group>.btn:focus,.btn-group>.btn:active,.btn-group>.btn.active{z-index:2}.btn-group .dropdown-toggle:active,.btn-group.open .dropdown-toggle{outline:0}.btn-group>.btn+.dropdown-toggle{*padding-top:5px;padding-right:8px;*padding-bottom:5px;padding-left:8px;-webkit-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 1px 0 0 rgba(255,255,255,0.125),inset 0 1px 0 rgba(255,255,255,0.2),0 1px 2px rgba(0,0,0,0.05)}.btn-group>.btn-mini+.dropdown-toggle{*padding-top:2px;padding-right:5px;*padding-bottom:2px;padding-left:5px}.btn-group>.btn-small+.dropdown-toggle{*padding-top:5px;*padding-bottom:4px}.btn-group>.btn-large+.dropdown-toggle{*padding-top:7px;padding-right:12px;*padding-bottom:7px;padding-left:12px}.btn-group.open .dropdown-toggle{background-image:none;-webkit-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05);box-shadow:inset 0 2px 4px rgba(0,0,0,0.15),0 1px 2px rgba(0,0,0,0.05)}.btn-group.open .btn.dropdown-toggle{background-color:#e6e6e6}.btn-group.open .btn-primary.dropdown-toggle{background-color:#04c}.btn-group.open .btn-warning.dropdown-toggle{background-color:#f89406}.btn-group.open .btn-danger.dropdown-toggle{background-color:#bd362f}.btn-group.open .btn-success.dropdown-toggle{background-color:#51a351}.btn-group.open .btn-info.dropdown-toggle{background-color:#2f96b4}.btn-group.open .btn-inverse.dropdown-toggle{background-color:#222}.btn .caret{margin-top:8px;margin-left:0}.btn-large .caret{margin-top:6px}.btn-large .caret{border-top-width:5px;border-right-width:5px;border-left-width:5px}.btn-mini .caret,.btn-small .caret{margin-top:8px}.dropup .btn-large .caret{border-bottom-width:5px}.btn-primary .caret,.btn-warning .caret,.btn-danger .caret,.btn-info .caret,.btn-success .caret,.btn-inverse .caret{border-top-color:#fff;border-bottom-color:#fff}.btn-group-vertical{display:inline-block;*display:inline;*zoom:1}.btn-group-vertical>.btn{display:block;float:none;max-width:100%;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.btn-group-vertical>.btn+.btn{margin-top:-1px;margin-left:0}.btn-group-vertical>.btn:first-child{-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.btn-group-vertical>.btn:last-child{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.btn-group-vertical>.btn-large:first-child{-webkit-border-radius:6px 6px 0 0;-moz-border-radius:6px 6px 0 0;border-radius:6px 6px 0 0}.btn-group-vertical>.btn-large:last-child{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.alert{padding:8px 35px 8px 14px;margin-bottom:20px;text-shadow:0 1px 0 rgba(255,255,255,0.5);background-color:#fcf8e3;border:1px solid #fbeed5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.alert,.alert h4{color:#c09853}.alert h4{margin:0}.alert .close{position:relative;top:-2px;right:-21px;line-height:20px}.alert-success{color:#468847;background-color:#dff0d8;border-color:#d6e9c6}.alert-success h4{color:#468847}.alert-danger,.alert-error{color:#b94a48;background-color:#f2dede;border-color:#eed3d7}.alert-danger h4,.alert-error h4{color:#b94a48}.alert-info{color:#3a87ad;background-color:#d9edf7;border-color:#bce8f1}.alert-info h4{color:#3a87ad}.alert-block{padding-top:14px;padding-bottom:14px}.alert-block>p,.alert-block>ul{margin-bottom:0}.alert-block p+p{margin-top:5px}.nav{margin-bottom:20px;margin-left:0;list-style:none}.nav>li>a{display:block}.nav>li>a:hover,.nav>li>a:focus{text-decoration:none;background-color:#eee}.nav>li>a>img{max-width:none}.nav>.pull-right{float:right}.nav-header{display:block;padding:3px 15px;font-size:11px;font-weight:bold;line-height:20px;color:#999;text-shadow:0 1px 0 rgba(255,255,255,0.5);text-transform:uppercase}.nav li+.nav-header{margin-top:9px}.nav-list{padding-right:15px;padding-left:15px;margin-bottom:0}.nav-list>li>a,.nav-list .nav-header{margin-right:-15px;margin-left:-15px;text-shadow:0 1px 0 rgba(255,255,255,0.5)}.nav-list>li>a{padding:3px 15px}.nav-list>.active>a,.nav-list>.active>a:hover,.nav-list>.active>a:focus{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.2);background-color:#08c}.nav-list [class^="icon-"],.nav-list [class*=" icon-"]{margin-right:2px}.nav-list .divider{*width:100%;height:1px;margin:9px 1px;*margin:-5px 0 5px;overflow:hidden;background-color:#e5e5e5;border-bottom:1px solid #fff}.nav-tabs,.nav-pills{*zoom:1}.nav-tabs:before,.nav-pills:before,.nav-tabs:after,.nav-pills:after{display:table;line-height:0;content:""}.nav-tabs:after,.nav-pills:after{clear:both}.nav-tabs>li,.nav-pills>li{float:left}.nav-tabs>li>a,.nav-pills>li>a{padding-right:12px;padding-left:12px;margin-right:2px;line-height:14px}.nav-tabs{border-bottom:1px solid #ddd}.nav-tabs>li{margin-bottom:-1px}.nav-tabs>li>a{padding-top:8px;padding-bottom:8px;line-height:20px;border:1px solid transparent;-webkit-border-radius:4px 4px 0 0;-moz-border-radius:4px 4px 0 0;border-radius:4px 4px 0 0}.nav-tabs>li>a:hover,.nav-tabs>li>a:focus{border-color:#eee #eee #ddd}.nav-tabs>.active>a,.nav-tabs>.active>a:hover,.nav-tabs>.active>a:focus{color:#555;cursor:default;background-color:#fff;border:1px solid #ddd;border-bottom-color:transparent}.nav-pills>li>a{padding-top:8px;padding-bottom:8px;margin-top:2px;margin-bottom:2px;-webkit-border-radius:5px;-moz-border-radius:5px;border-radius:5px}.nav-pills>.active>a,.nav-pills>.active>a:hover,.nav-pills>.active>a:focus{color:#fff;background-color:#08c}.nav-stacked>li{float:none}.nav-stacked>li>a{margin-right:0}.nav-tabs.nav-stacked{border-bottom:0}.nav-tabs.nav-stacked>li>a{border:1px solid #ddd;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.nav-tabs.nav-stacked>li:first-child>a{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-topleft:4px}.nav-tabs.nav-stacked>li:last-child>a{-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-moz-border-radius-bottomright:4px;-moz-border-radius-bottomleft:4px}.nav-tabs.nav-stacked>li>a:hover,.nav-tabs.nav-stacked>li>a:focus{z-index:2;border-color:#ddd}.nav-pills.nav-stacked>li>a{margin-bottom:3px}.nav-pills.nav-stacked>li:last-child>a{margin-bottom:1px}.nav-tabs .dropdown-menu{-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px}.nav-pills .dropdown-menu{-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.nav .dropdown-toggle .caret{margin-top:6px;border-top-color:#08c;border-bottom-color:#08c}.nav .dropdown-toggle:hover .caret,.nav .dropdown-toggle:focus .caret{border-top-color:#005580;border-bottom-color:#005580}.nav-tabs .dropdown-toggle .caret{margin-top:8px}.nav .active .dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.nav-tabs .active .dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.nav>.dropdown.active>a:hover,.nav>.dropdown.active>a:focus{cursor:pointer}.nav-tabs .open .dropdown-toggle,.nav-pills .open .dropdown-toggle,.nav>li.dropdown.open.active>a:hover,.nav>li.dropdown.open.active>a:focus{color:#fff;background-color:#999;border-color:#999}.nav li.dropdown.open .caret,.nav li.dropdown.open.active .caret,.nav li.dropdown.open a:hover .caret,.nav li.dropdown.open a:focus .caret{border-top-color:#fff;border-bottom-color:#fff;opacity:1;filter:alpha(opacity=100)}.tabs-stacked .open>a:hover,.tabs-stacked .open>a:focus{border-color:#999}.tabbable{*zoom:1}.tabbable:before,.tabbable:after{display:table;line-height:0;content:""}.tabbable:after{clear:both}.tab-content{overflow:auto}.tabs-below>.nav-tabs,.tabs-right>.nav-tabs,.tabs-left>.nav-tabs{border-bottom:0}.tab-content>.tab-pane,.pill-content>.pill-pane{display:none}.tab-content>.active,.pill-content>.active{display:block}.tabs-below>.nav-tabs{border-top:1px solid #ddd}.tabs-below>.nav-tabs>li{margin-top:-1px;margin-bottom:0}.tabs-below>.nav-tabs>li>a{-webkit-border-radius:0 0 4px 4px;-moz-border-radius:0 0 4px 4px;border-radius:0 0 4px 4px}.tabs-below>.nav-tabs>li>a:hover,.tabs-below>.nav-tabs>li>a:focus{border-top-color:#ddd;border-bottom-color:transparent}.tabs-below>.nav-tabs>.active>a,.tabs-below>.nav-tabs>.active>a:hover,.tabs-below>.nav-tabs>.active>a:focus{border-color:transparent #ddd #ddd #ddd}.tabs-left>.nav-tabs>li,.tabs-right>.nav-tabs>li{float:none}.tabs-left>.nav-tabs>li>a,.tabs-right>.nav-tabs>li>a{min-width:74px;margin-right:0;margin-bottom:3px}.tabs-left>.nav-tabs{float:left;margin-right:19px;border-right:1px solid #ddd}.tabs-left>.nav-tabs>li>a{margin-right:-1px;-webkit-border-radius:4px 0 0 4px;-moz-border-radius:4px 0 0 4px;border-radius:4px 0 0 4px}.tabs-left>.nav-tabs>li>a:hover,.tabs-left>.nav-tabs>li>a:focus{border-color:#eee #ddd #eee #eee}.tabs-left>.nav-tabs .active>a,.tabs-left>.nav-tabs .active>a:hover,.tabs-left>.nav-tabs .active>a:focus{border-color:#ddd transparent #ddd #ddd;*border-right-color:#fff}.tabs-right>.nav-tabs{float:right;margin-left:19px;border-left:1px solid #ddd}.tabs-right>.nav-tabs>li>a{margin-left:-1px;-webkit-border-radius:0 4px 4px 0;-moz-border-radius:0 4px 4px 0;border-radius:0 4px 4px 0}.tabs-right>.nav-tabs>li>a:hover,.tabs-right>.nav-tabs>li>a:focus{border-color:#eee #eee #eee #ddd}.tabs-right>.nav-tabs .active>a,.tabs-right>.nav-tabs .active>a:hover,.tabs-right>.nav-tabs .active>a:focus{border-color:#ddd #ddd #ddd transparent;*border-left-color:#fff}.nav>.disabled>a{color:#999}.nav>.disabled>a:hover,.nav>.disabled>a:focus{text-decoration:none;cursor:default;background-color:transparent}.navbar{*position:relative;*z-index:2;margin-bottom:20px;overflow:visible}.navbar-inner{min-height:40px;padding-right:20px;padding-left:20px;background-color:#fafafa;background-image:-moz-linear-gradient(top,#fff,#f2f2f2);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fff),to(#f2f2f2));background-image:-webkit-linear-gradient(top,#fff,#f2f2f2);background-image:-o-linear-gradient(top,#fff,#f2f2f2);background-image:linear-gradient(to bottom,#fff,#f2f2f2);background-repeat:repeat-x;border:1px solid #d4d4d4;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffffffff',endColorstr='#fff2f2f2',GradientType=0);*zoom:1;-webkit-box-shadow:0 1px 4px rgba(0,0,0,0.065);-moz-box-shadow:0 1px 4px rgba(0,0,0,0.065);box-shadow:0 1px 4px rgba(0,0,0,0.065)}.navbar-inner:before,.navbar-inner:after{display:table;line-height:0;content:""}.navbar-inner:after{clear:both}.navbar .container{width:auto}.nav-collapse.collapse{height:auto;overflow:visible}.navbar .brand{display:block;float:left;padding:10px 20px 10px;margin-left:-20px;font-size:20px;font-weight:200;color:#777;text-shadow:0 1px 0 #fff}.navbar .brand:hover,.navbar .brand:focus{text-decoration:none}.navbar-text{margin-bottom:0;line-height:40px;color:#777}.navbar-link{color:#777}.navbar-link:hover,.navbar-link:focus{color:#333}.navbar .divider-vertical{height:40px;margin:0 9px;border-right:1px solid #fff;border-left:1px solid #f2f2f2}.navbar .btn,.navbar .btn-group{margin-top:5px}.navbar .btn-group .btn,.navbar .input-prepend .btn,.navbar .input-append .btn,.navbar .input-prepend .btn-group,.navbar .input-append .btn-group{margin-top:0}.navbar-form{margin-bottom:0;*zoom:1}.navbar-form:before,.navbar-form:after{display:table;line-height:0;content:""}.navbar-form:after{clear:both}.navbar-form input,.navbar-form select,.navbar-form .radio,.navbar-form .checkbox{margin-top:5px}.navbar-form input,.navbar-form select,.navbar-form .btn{display:inline-block;margin-bottom:0}.navbar-form input[type="image"],.navbar-form input[type="checkbox"],.navbar-form input[type="radio"]{margin-top:3px}.navbar-form .input-append,.navbar-form .input-prepend{margin-top:5px;white-space:nowrap}.navbar-form .input-append input,.navbar-form .input-prepend input{margin-top:0}.navbar-search{position:relative;float:left;margin-top:5px;margin-bottom:0}.navbar-search .search-query{padding:4px 14px;margin-bottom:0;font-family:"Helvetica Neue",Helvetica,Arial,sans-serif;font-size:13px;font-weight:normal;line-height:1;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.navbar-static-top{position:static;margin-bottom:0}.navbar-static-top .navbar-inner{-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-fixed-top,.navbar-fixed-bottom{position:fixed;right:0;left:0;z-index:1030;margin-bottom:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{border-width:0 0 1px}.navbar-fixed-bottom .navbar-inner{border-width:1px 0 0}.navbar-fixed-top .navbar-inner,.navbar-fixed-bottom .navbar-inner{padding-right:0;padding-left:0;-webkit-border-radius:0;-moz-border-radius:0;border-radius:0}.navbar-static-top .container,.navbar-fixed-top .container,.navbar-fixed-bottom .container{width:940px}.navbar-fixed-top{top:0}.navbar-fixed-top .navbar-inner,.navbar-static-top .navbar-inner{-webkit-box-shadow:0 1px 10px rgba(0,0,0,0.1);-moz-box-shadow:0 1px 10px rgba(0,0,0,0.1);box-shadow:0 1px 10px rgba(0,0,0,0.1)}.navbar-fixed-bottom{bottom:0}.navbar-fixed-bottom .navbar-inner{-webkit-box-shadow:0 -1px 10px rgba(0,0,0,0.1);-moz-box-shadow:0 -1px 10px rgba(0,0,0,0.1);box-shadow:0 -1px 10px rgba(0,0,0,0.1)}.navbar .nav{position:relative;left:0;display:block;float:left;margin:0 10px 0 0}.navbar .nav.pull-right{float:right;margin-right:0}.navbar .nav>li{float:left}.navbar .nav>li>a{float:none;padding:10px 15px 10px;color:#777;text-decoration:none;text-shadow:0 1px 0 #fff}.navbar .nav .dropdown-toggle .caret{margin-top:8px}.navbar .nav>li>a:focus,.navbar .nav>li>a:hover{color:#333;text-decoration:none;background-color:transparent}.navbar .nav>.active>a,.navbar .nav>.active>a:hover,.navbar .nav>.active>a:focus{color:#555;text-decoration:none;background-color:#e5e5e5;-webkit-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);-moz-box-shadow:inset 0 3px 8px rgba(0,0,0,0.125);box-shadow:inset 0 3px 8px rgba(0,0,0,0.125)}.navbar .btn-navbar{display:none;float:right;padding:7px 10px;margin-right:5px;margin-left:5px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#ededed;*background-color:#e5e5e5;background-image:-moz-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f2f2f2),to(#e5e5e5));background-image:-webkit-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:-o-linear-gradient(top,#f2f2f2,#e5e5e5);background-image:linear-gradient(to bottom,#f2f2f2,#e5e5e5);background-repeat:repeat-x;border-color:#e5e5e5 #e5e5e5 #bfbfbf;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff2f2f2',endColorstr='#ffe5e5e5',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false);-webkit-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);-moz-box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075);box-shadow:inset 0 1px 0 rgba(255,255,255,0.1),0 1px 0 rgba(255,255,255,0.075)}.navbar .btn-navbar:hover,.navbar .btn-navbar:focus,.navbar .btn-navbar:active,.navbar .btn-navbar.active,.navbar .btn-navbar.disabled,.navbar .btn-navbar[disabled]{color:#fff;background-color:#e5e5e5;*background-color:#d9d9d9}.navbar .btn-navbar:active,.navbar .btn-navbar.active{background-color:#ccc \9}.navbar .btn-navbar .icon-bar{display:block;width:18px;height:2px;background-color:#f5f5f5;-webkit-border-radius:1px;-moz-border-radius:1px;border-radius:1px;-webkit-box-shadow:0 1px 0 rgba(0,0,0,0.25);-moz-box-shadow:0 1px 0 rgba(0,0,0,0.25);box-shadow:0 1px 0 rgba(0,0,0,0.25)}.btn-navbar .icon-bar+.icon-bar{margin-top:3px}.navbar .nav>li>.dropdown-menu:before{position:absolute;top:-7px;left:9px;display:inline-block;border-right:7px solid transparent;border-bottom:7px solid #ccc;border-left:7px solid transparent;border-bottom-color:rgba(0,0,0,0.2);content:''}.navbar .nav>li>.dropdown-menu:after{position:absolute;top:-6px;left:10px;display:inline-block;border-right:6px solid transparent;border-bottom:6px solid #fff;border-left:6px solid transparent;content:''}.navbar-fixed-bottom .nav>li>.dropdown-menu:before{top:auto;bottom:-7px;border-top:7px solid #ccc;border-bottom:0;border-top-color:rgba(0,0,0,0.2)}.navbar-fixed-bottom .nav>li>.dropdown-menu:after{top:auto;bottom:-6px;border-top:6px solid #fff;border-bottom:0}.navbar .nav li.dropdown>a:hover .caret,.navbar .nav li.dropdown>a:focus .caret{border-top-color:#333;border-bottom-color:#333}.navbar .nav li.dropdown.open>.dropdown-toggle,.navbar .nav li.dropdown.active>.dropdown-toggle,.navbar .nav li.dropdown.open.active>.dropdown-toggle{color:#555;background-color:#e5e5e5}.navbar .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#777;border-bottom-color:#777}.navbar .nav li.dropdown.open>.dropdown-toggle .caret,.navbar .nav li.dropdown.active>.dropdown-toggle .caret,.navbar .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#555;border-bottom-color:#555}.navbar .pull-right>li>.dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right{right:0;left:auto}.navbar .pull-right>li>.dropdown-menu:before,.navbar .nav>li>.dropdown-menu.pull-right:before{right:12px;left:auto}.navbar .pull-right>li>.dropdown-menu:after,.navbar .nav>li>.dropdown-menu.pull-right:after{right:13px;left:auto}.navbar .pull-right>li>.dropdown-menu .dropdown-menu,.navbar .nav>li>.dropdown-menu.pull-right .dropdown-menu{right:100%;left:auto;margin-right:-1px;margin-left:0;-webkit-border-radius:6px 0 6px 6px;-moz-border-radius:6px 0 6px 6px;border-radius:6px 0 6px 6px}.navbar-inverse .navbar-inner{background-color:#1b1b1b;background-image:-moz-linear-gradient(top,#222,#111);background-image:-webkit-gradient(linear,0 0,0 100%,from(#222),to(#111));background-image:-webkit-linear-gradient(top,#222,#111);background-image:-o-linear-gradient(top,#222,#111);background-image:linear-gradient(to bottom,#222,#111);background-repeat:repeat-x;border-color:#252525;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff222222',endColorstr='#ff111111',GradientType=0)}.navbar-inverse .brand,.navbar-inverse .nav>li>a{color:#999;text-shadow:0 -1px 0 rgba(0,0,0,0.25)}.navbar-inverse .brand:hover,.navbar-inverse .nav>li>a:hover,.navbar-inverse .brand:focus,.navbar-inverse .nav>li>a:focus{color:#fff}.navbar-inverse .brand{color:#999}.navbar-inverse .navbar-text{color:#999}.navbar-inverse .nav>li>a:focus,.navbar-inverse .nav>li>a:hover{color:#fff;background-color:transparent}.navbar-inverse .nav .active>a,.navbar-inverse .nav .active>a:hover,.navbar-inverse .nav .active>a:focus{color:#fff;background-color:#111}.navbar-inverse .navbar-link{color:#999}.navbar-inverse .navbar-link:hover,.navbar-inverse .navbar-link:focus{color:#fff}.navbar-inverse .divider-vertical{border-right-color:#222;border-left-color:#111}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle{color:#fff;background-color:#111}.navbar-inverse .nav li.dropdown>a:hover .caret,.navbar-inverse .nav li.dropdown>a:focus .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .nav li.dropdown>.dropdown-toggle .caret{border-top-color:#999;border-bottom-color:#999}.navbar-inverse .nav li.dropdown.open>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.active>.dropdown-toggle .caret,.navbar-inverse .nav li.dropdown.open.active>.dropdown-toggle .caret{border-top-color:#fff;border-bottom-color:#fff}.navbar-inverse .navbar-search .search-query{color:#fff;background-color:#515151;border-color:#111;-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1),0 1px 0 rgba(255,255,255,0.15);-webkit-transition:none;-moz-transition:none;-o-transition:none;transition:none}.navbar-inverse .navbar-search .search-query:-moz-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:-ms-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query::-webkit-input-placeholder{color:#ccc}.navbar-inverse .navbar-search .search-query:focus,.navbar-inverse .navbar-search .search-query.focused{padding:5px 15px;color:#333;text-shadow:0 1px 0 #fff;background-color:#fff;border:0;outline:0;-webkit-box-shadow:0 0 3px rgba(0,0,0,0.15);-moz-box-shadow:0 0 3px rgba(0,0,0,0.15);box-shadow:0 0 3px rgba(0,0,0,0.15)}.navbar-inverse .btn-navbar{color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e0e0e;*background-color:#040404;background-image:-moz-linear-gradient(top,#151515,#040404);background-image:-webkit-gradient(linear,0 0,0 100%,from(#151515),to(#040404));background-image:-webkit-linear-gradient(top,#151515,#040404);background-image:-o-linear-gradient(top,#151515,#040404);background-image:linear-gradient(to bottom,#151515,#040404);background-repeat:repeat-x;border-color:#040404 #040404 #000;border-color:rgba(0,0,0,0.1) rgba(0,0,0,0.1) rgba(0,0,0,0.25);filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff151515',endColorstr='#ff040404',GradientType=0);filter:progid:DXImageTransform.Microsoft.gradient(enabled=false)}.navbar-inverse .btn-navbar:hover,.navbar-inverse .btn-navbar:focus,.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active,.navbar-inverse .btn-navbar.disabled,.navbar-inverse .btn-navbar[disabled]{color:#fff;background-color:#040404;*background-color:#000}.navbar-inverse .btn-navbar:active,.navbar-inverse .btn-navbar.active{background-color:#000 \9}.breadcrumb{padding:8px 15px;margin:0 0 20px;list-style:none;background-color:#f5f5f5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.breadcrumb>li{display:inline-block;*display:inline;text-shadow:0 1px 0 #fff;*zoom:1}.breadcrumb>li>.divider{padding:0 5px;color:#ccc}.breadcrumb>.active{color:#999}.pagination{margin:20px 0}.pagination ul{display:inline-block;*display:inline;margin-bottom:0;margin-left:0;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;*zoom:1;-webkit-box-shadow:0 1px 2px rgba(0,0,0,0.05);-moz-box-shadow:0 1px 2px rgba(0,0,0,0.05);box-shadow:0 1px 2px rgba(0,0,0,0.05)}.pagination ul>li{display:inline}.pagination ul>li>a,.pagination ul>li>span{float:left;padding:4px 12px;line-height:20px;text-decoration:none;background-color:#fff;border:1px solid #ddd;border-left-width:0}.pagination ul>li>a:hover,.pagination ul>li>a:focus,.pagination ul>.active>a,.pagination ul>.active>span{background-color:#f5f5f5}.pagination ul>.active>a,.pagination ul>.active>span{color:#999;cursor:default}.pagination ul>.disabled>span,.pagination ul>.disabled>a,.pagination ul>.disabled>a:hover,.pagination ul>.disabled>a:focus{color:#999;cursor:default;background-color:transparent}.pagination ul>li:first-child>a,.pagination ul>li:first-child>span{border-left-width:1px;-webkit-border-bottom-left-radius:4px;border-bottom-left-radius:4px;-webkit-border-top-left-radius:4px;border-top-left-radius:4px;-moz-border-radius-bottomleft:4px;-moz-border-radius-topleft:4px}.pagination ul>li:last-child>a,.pagination ul>li:last-child>span{-webkit-border-top-right-radius:4px;border-top-right-radius:4px;-webkit-border-bottom-right-radius:4px;border-bottom-right-radius:4px;-moz-border-radius-topright:4px;-moz-border-radius-bottomright:4px}.pagination-centered{text-align:center}.pagination-right{text-align:right}.pagination-large ul>li>a,.pagination-large ul>li>span{padding:11px 19px;font-size:17.5px}.pagination-large ul>li:first-child>a,.pagination-large ul>li:first-child>span{-webkit-border-bottom-left-radius:6px;border-bottom-left-radius:6px;-webkit-border-top-left-radius:6px;border-top-left-radius:6px;-moz-border-radius-bottomleft:6px;-moz-border-radius-topleft:6px}.pagination-large ul>li:last-child>a,.pagination-large ul>li:last-child>span{-webkit-border-top-right-radius:6px;border-top-right-radius:6px;-webkit-border-bottom-right-radius:6px;border-bottom-right-radius:6px;-moz-border-radius-topright:6px;-moz-border-radius-bottomright:6px}.pagination-mini ul>li:first-child>a,.pagination-small ul>li:first-child>a,.pagination-mini ul>li:first-child>span,.pagination-small ul>li:first-child>span{-webkit-border-bottom-left-radius:3px;border-bottom-left-radius:3px;-webkit-border-top-left-radius:3px;border-top-left-radius:3px;-moz-border-radius-bottomleft:3px;-moz-border-radius-topleft:3px}.pagination-mini ul>li:last-child>a,.pagination-small ul>li:last-child>a,.pagination-mini ul>li:last-child>span,.pagination-small ul>li:last-child>span{-webkit-border-top-right-radius:3px;border-top-right-radius:3px;-webkit-border-bottom-right-radius:3px;border-bottom-right-radius:3px;-moz-border-radius-topright:3px;-moz-border-radius-bottomright:3px}.pagination-small ul>li>a,.pagination-small ul>li>span{padding:2px 10px;font-size:11.9px}.pagination-mini ul>li>a,.pagination-mini ul>li>span{padding:0 6px;font-size:10.5px}.pager{margin:20px 0;text-align:center;list-style:none;*zoom:1}.pager:before,.pager:after{display:table;line-height:0;content:""}.pager:after{clear:both}.pager li{display:inline}.pager li>a,.pager li>span{display:inline-block;padding:5px 14px;background-color:#fff;border:1px solid #ddd;-webkit-border-radius:15px;-moz-border-radius:15px;border-radius:15px}.pager li>a:hover,.pager li>a:focus{text-decoration:none;background-color:#f5f5f5}.pager .next>a,.pager .next>span{float:right}.pager .previous>a,.pager .previous>span{float:left}.pager .disabled>a,.pager .disabled>a:hover,.pager .disabled>a:focus,.pager .disabled>span{color:#999;cursor:default;background-color:#fff}.modal-backdrop{position:fixed;top:0;right:0;bottom:0;left:0;z-index:1040;background-color:#000}.modal-backdrop.fade{opacity:0}.modal-backdrop,.modal-backdrop.fade.in{opacity:.8;filter:alpha(opacity=80)}.modal{position:fixed;top:10%;left:50%;z-index:1050;width:560px;margin-left:-280px;background-color:#fff;border:1px solid #999;border:1px solid rgba(0,0,0,0.3);*border:1px solid #999;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;outline:0;-webkit-box-shadow:0 3px 7px rgba(0,0,0,0.3);-moz-box-shadow:0 3px 7px rgba(0,0,0,0.3);box-shadow:0 3px 7px rgba(0,0,0,0.3);-webkit-background-clip:padding-box;-moz-background-clip:padding-box;background-clip:padding-box}.modal.fade{top:-25%;-webkit-transition:opacity .3s linear,top .3s ease-out;-moz-transition:opacity .3s linear,top .3s ease-out;-o-transition:opacity .3s linear,top .3s ease-out;transition:opacity .3s linear,top .3s ease-out}.modal.fade.in{top:10%}.modal-header{padding:9px 15px;border-bottom:1px solid #eee}.modal-header .close{margin-top:2px}.modal-header h3{margin:0;line-height:30px}.modal-body{position:relative;max-height:400px;padding:15px;overflow-y:auto}.modal-form{margin-bottom:0}.modal-footer{padding:14px 15px 15px;margin-bottom:0;text-align:right;background-color:#f5f5f5;border-top:1px solid #ddd;-webkit-border-radius:0 0 6px 6px;-moz-border-radius:0 0 6px 6px;border-radius:0 0 6px 6px;*zoom:1;-webkit-box-shadow:inset 0 1px 0 #fff;-moz-box-shadow:inset 0 1px 0 #fff;box-shadow:inset 0 1px 0 #fff}.modal-footer:before,.modal-footer:after{display:table;line-height:0;content:""}.modal-footer:after{clear:both}.modal-footer .btn+.btn{margin-bottom:0;margin-left:5px}.modal-footer .btn-group .btn+.btn{margin-left:-1px}.modal-footer .btn-block+.btn-block{margin-left:0}.tooltip{position:absolute;z-index:1030;display:block;font-size:11px;line-height:1.4;opacity:0;filter:alpha(opacity=0);visibility:visible}.tooltip.in{opacity:.8;filter:alpha(opacity=80)}.tooltip.top{padding:5px 0;margin-top:-3px}.tooltip.right{padding:0 5px;margin-left:3px}.tooltip.bottom{padding:5px 0;margin-top:3px}.tooltip.left{padding:0 5px;margin-left:-3px}.tooltip-inner{max-width:200px;padding:8px;color:#fff;text-align:center;text-decoration:none;background-color:#000;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.tooltip-arrow{position:absolute;width:0;height:0;border-color:transparent;border-style:solid}.tooltip.top .tooltip-arrow{bottom:0;left:50%;margin-left:-5px;border-top-color:#000;border-width:5px 5px 0}.tooltip.right .tooltip-arrow{top:50%;left:0;margin-top:-5px;border-right-color:#000;border-width:5px 5px 5px 0}.tooltip.left .tooltip-arrow{top:50%;right:0;margin-top:-5px;border-left-color:#000;border-width:5px 0 5px 5px}.tooltip.bottom .tooltip-arrow{top:0;left:50%;margin-left:-5px;border-bottom-color:#000;border-width:0 5px 5px}.popover{position:absolute;top:0;left:0;z-index:1010;display:none;max-width:276px;padding:1px;text-align:left;white-space:normal;background-color:#fff;border:1px solid #ccc;border:1px solid rgba(0,0,0,0.2);-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px;-webkit-box-shadow:0 5px 10px rgba(0,0,0,0.2);-moz-box-shadow:0 5px 10px rgba(0,0,0,0.2);box-shadow:0 5px 10px rgba(0,0,0,0.2);-webkit-background-clip:padding-box;-moz-background-clip:padding;background-clip:padding-box}.popover.top{margin-top:-10px}.popover.right{margin-left:10px}.popover.bottom{margin-top:10px}.popover.left{margin-left:-10px}.popover-title{padding:8px 14px;margin:0;font-size:14px;font-weight:normal;line-height:18px;background-color:#f7f7f7;border-bottom:1px solid #ebebeb;-webkit-border-radius:5px 5px 0 0;-moz-border-radius:5px 5px 0 0;border-radius:5px 5px 0 0}.popover-title:empty{display:none}.popover-content{padding:9px 14px}.popover .arrow,.popover .arrow:after{position:absolute;display:block;width:0;height:0;border-color:transparent;border-style:solid}.popover .arrow{border-width:11px}.popover .arrow:after{border-width:10px;content:""}.popover.top .arrow{bottom:-11px;left:50%;margin-left:-11px;border-top-color:#999;border-top-color:rgba(0,0,0,0.25);border-bottom-width:0}.popover.top .arrow:after{bottom:1px;margin-left:-10px;border-top-color:#fff;border-bottom-width:0}.popover.right .arrow{top:50%;left:-11px;margin-top:-11px;border-right-color:#999;border-right-color:rgba(0,0,0,0.25);border-left-width:0}.popover.right .arrow:after{bottom:-10px;left:1px;border-right-color:#fff;border-left-width:0}.popover.bottom .arrow{top:-11px;left:50%;margin-left:-11px;border-bottom-color:#999;border-bottom-color:rgba(0,0,0,0.25);border-top-width:0}.popover.bottom .arrow:after{top:1px;margin-left:-10px;border-bottom-color:#fff;border-top-width:0}.popover.left .arrow{top:50%;right:-11px;margin-top:-11px;border-left-color:#999;border-left-color:rgba(0,0,0,0.25);border-right-width:0}.popover.left .arrow:after{right:1px;bottom:-10px;border-left-color:#fff;border-right-width:0}.thumbnails{margin-left:-20px;list-style:none;*zoom:1}.thumbnails:before,.thumbnails:after{display:table;line-height:0;content:""}.thumbnails:after{clear:both}.row-fluid .thumbnails{margin-left:0}.thumbnails>li{float:left;margin-bottom:20px;margin-left:20px}.thumbnail{display:block;padding:4px;line-height:20px;border:1px solid #ddd;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;-webkit-box-shadow:0 1px 3px rgba(0,0,0,0.055);-moz-box-shadow:0 1px 3px rgba(0,0,0,0.055);box-shadow:0 1px 3px rgba(0,0,0,0.055);-webkit-transition:all .2s ease-in-out;-moz-transition:all .2s ease-in-out;-o-transition:all .2s ease-in-out;transition:all .2s ease-in-out}a.thumbnail:hover,a.thumbnail:focus{border-color:#08c;-webkit-box-shadow:0 1px 4px rgba(0,105,214,0.25);-moz-box-shadow:0 1px 4px rgba(0,105,214,0.25);box-shadow:0 1px 4px rgba(0,105,214,0.25)}.thumbnail>img{display:block;max-width:100%;margin-right:auto;margin-left:auto}.thumbnail .caption{padding:9px;color:#555}.media,.media-body{overflow:hidden;*overflow:visible;zoom:1}.media,.media .media{margin-top:15px}.media:first-child{margin-top:0}.media-object{display:block}.media-heading{margin:0 0 5px}.media>.pull-left{margin-right:10px}.media>.pull-right{margin-left:10px}.media-list{margin-left:0;list-style:none}.label,.badge{display:inline-block;padding:2px 4px;font-size:11.844px;font-weight:bold;line-height:14px;color:#fff;text-shadow:0 -1px 0 rgba(0,0,0,0.25);white-space:nowrap;vertical-align:baseline;background-color:#999}.label{-webkit-border-radius:3px;-moz-border-radius:3px;border-radius:3px}.badge{padding-right:9px;padding-left:9px;-webkit-border-radius:9px;-moz-border-radius:9px;border-radius:9px}.label:empty,.badge:empty{display:none}a.label:hover,a.label:focus,a.badge:hover,a.badge:focus{color:#fff;text-decoration:none;cursor:pointer}.label-important,.badge-important{background-color:#b94a48}.label-important[href],.badge-important[href]{background-color:#953b39}.label-warning,.badge-warning{background-color:#f89406}.label-warning[href],.badge-warning[href]{background-color:#c67605}.label-success,.badge-success{background-color:#468847}.label-success[href],.badge-success[href]{background-color:#356635}.label-info,.badge-info{background-color:#3a87ad}.label-info[href],.badge-info[href]{background-color:#2d6987}.label-inverse,.badge-inverse{background-color:#333}.label-inverse[href],.badge-inverse[href]{background-color:#1a1a1a}.btn .label,.btn .badge{position:relative;top:-1px}.btn-mini .label,.btn-mini .badge{top:0}@-webkit-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-moz-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-ms-keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}@-o-keyframes progress-bar-stripes{from{background-position:0 0}to{background-position:40px 0}}@keyframes progress-bar-stripes{from{background-position:40px 0}to{background-position:0 0}}.progress{height:20px;margin-bottom:20px;overflow:hidden;background-color:#f7f7f7;background-image:-moz-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#f5f5f5),to(#f9f9f9));background-image:-webkit-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:-o-linear-gradient(top,#f5f5f5,#f9f9f9);background-image:linear-gradient(to bottom,#f5f5f5,#f9f9f9);background-repeat:repeat-x;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fff5f5f5',endColorstr='#fff9f9f9',GradientType=0);-webkit-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);-moz-box-shadow:inset 0 1px 2px rgba(0,0,0,0.1);box-shadow:inset 0 1px 2px rgba(0,0,0,0.1)}.progress .bar{float:left;width:0;height:100%;font-size:12px;color:#fff;text-align:center;text-shadow:0 -1px 0 rgba(0,0,0,0.25);background-color:#0e90d2;background-image:-moz-linear-gradient(top,#149bdf,#0480be);background-image:-webkit-gradient(linear,0 0,0 100%,from(#149bdf),to(#0480be));background-image:-webkit-linear-gradient(top,#149bdf,#0480be);background-image:-o-linear-gradient(top,#149bdf,#0480be);background-image:linear-gradient(to bottom,#149bdf,#0480be);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff149bdf',endColorstr='#ff0480be',GradientType=0);-webkit-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 0 -1px 0 rgba(0,0,0,0.15);-webkit-box-sizing:border-box;-moz-box-sizing:border-box;box-sizing:border-box;-webkit-transition:width .6s ease;-moz-transition:width .6s ease;-o-transition:width .6s ease;transition:width .6s ease}.progress .bar+.bar{-webkit-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);-moz-box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15);box-shadow:inset 1px 0 0 rgba(0,0,0,0.15),inset 0 -1px 0 rgba(0,0,0,0.15)}.progress-striped .bar{background-color:#149bdf;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);-webkit-background-size:40px 40px;-moz-background-size:40px 40px;-o-background-size:40px 40px;background-size:40px 40px}.progress.active .bar{-webkit-animation:progress-bar-stripes 2s linear infinite;-moz-animation:progress-bar-stripes 2s linear infinite;-ms-animation:progress-bar-stripes 2s linear infinite;-o-animation:progress-bar-stripes 2s linear infinite;animation:progress-bar-stripes 2s linear infinite}.progress-danger .bar,.progress .bar-danger{background-color:#dd514c;background-image:-moz-linear-gradient(top,#ee5f5b,#c43c35);background-image:-webkit-gradient(linear,0 0,0 100%,from(#ee5f5b),to(#c43c35));background-image:-webkit-linear-gradient(top,#ee5f5b,#c43c35);background-image:-o-linear-gradient(top,#ee5f5b,#c43c35);background-image:linear-gradient(to bottom,#ee5f5b,#c43c35);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ffee5f5b',endColorstr='#ffc43c35',GradientType=0)}.progress-danger.progress-striped .bar,.progress-striped .bar-danger{background-color:#ee5f5b;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-success .bar,.progress .bar-success{background-color:#5eb95e;background-image:-moz-linear-gradient(top,#62c462,#57a957);background-image:-webkit-gradient(linear,0 0,0 100%,from(#62c462),to(#57a957));background-image:-webkit-linear-gradient(top,#62c462,#57a957);background-image:-o-linear-gradient(top,#62c462,#57a957);background-image:linear-gradient(to bottom,#62c462,#57a957);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff62c462',endColorstr='#ff57a957',GradientType=0)}.progress-success.progress-striped .bar,.progress-striped .bar-success{background-color:#62c462;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-info .bar,.progress .bar-info{background-color:#4bb1cf;background-image:-moz-linear-gradient(top,#5bc0de,#339bb9);background-image:-webkit-gradient(linear,0 0,0 100%,from(#5bc0de),to(#339bb9));background-image:-webkit-linear-gradient(top,#5bc0de,#339bb9);background-image:-o-linear-gradient(top,#5bc0de,#339bb9);background-image:linear-gradient(to bottom,#5bc0de,#339bb9);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#ff5bc0de',endColorstr='#ff339bb9',GradientType=0)}.progress-info.progress-striped .bar,.progress-striped .bar-info{background-color:#5bc0de;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.progress-warning .bar,.progress .bar-warning{background-color:#faa732;background-image:-moz-linear-gradient(top,#fbb450,#f89406);background-image:-webkit-gradient(linear,0 0,0 100%,from(#fbb450),to(#f89406));background-image:-webkit-linear-gradient(top,#fbb450,#f89406);background-image:-o-linear-gradient(top,#fbb450,#f89406);background-image:linear-gradient(to bottom,#fbb450,#f89406);background-repeat:repeat-x;filter:progid:DXImageTransform.Microsoft.gradient(startColorstr='#fffbb450',endColorstr='#fff89406',GradientType=0)}.progress-warning.progress-striped .bar,.progress-striped .bar-warning{background-color:#fbb450;background-image:-webkit-gradient(linear,0 100%,100% 0,color-stop(0.25,rgba(255,255,255,0.15)),color-stop(0.25,transparent),color-stop(0.5,transparent),color-stop(0.5,rgba(255,255,255,0.15)),color-stop(0.75,rgba(255,255,255,0.15)),color-stop(0.75,transparent),to(transparent));background-image:-webkit-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-moz-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:-o-linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent);background-image:linear-gradient(45deg,rgba(255,255,255,0.15) 25%,transparent 25%,transparent 50%,rgba(255,255,255,0.15) 50%,rgba(255,255,255,0.15) 75%,transparent 75%,transparent)}.accordion{margin-bottom:20px}.accordion-group{margin-bottom:2px;border:1px solid #e5e5e5;-webkit-border-radius:4px;-moz-border-radius:4px;border-radius:4px}.accordion-heading{border-bottom:0}.accordion-heading .accordion-toggle{display:block;padding:8px 15px}.accordion-toggle{cursor:pointer}.accordion-inner{padding:9px 15px;border-top:1px solid #e5e5e5}.carousel{position:relative;margin-bottom:20px;line-height:1}.carousel-inner{position:relative;width:100%;overflow:hidden}.carousel-inner>.item{position:relative;display:none;-webkit-transition:.6s ease-in-out left;-moz-transition:.6s ease-in-out left;-o-transition:.6s ease-in-out left;transition:.6s ease-in-out left}.carousel-inner>.item>img,.carousel-inner>.item>a>img{display:block;line-height:1}.carousel-inner>.active,.carousel-inner>.next,.carousel-inner>.prev{display:block}.carousel-inner>.active{left:0}.carousel-inner>.next,.carousel-inner>.prev{position:absolute;top:0;width:100%}.carousel-inner>.next{left:100%}.carousel-inner>.prev{left:-100%}.carousel-inner>.next.left,.carousel-inner>.prev.right{left:0}.carousel-inner>.active.left{left:-100%}.carousel-inner>.active.right{left:100%}.carousel-control{position:absolute;top:40%;left:15px;width:40px;height:40px;margin-top:-20px;font-size:60px;font-weight:100;line-height:30px;color:#fff;text-align:center;background:#222;border:3px solid #fff;-webkit-border-radius:23px;-moz-border-radius:23px;border-radius:23px;opacity:.5;filter:alpha(opacity=50)}.carousel-control.right{right:15px;left:auto}.carousel-control:hover,.carousel-control:focus{color:#fff;text-decoration:none;opacity:.9;filter:alpha(opacity=90)}.carousel-indicators{position:absolute;top:15px;right:15px;z-index:5;margin:0;list-style:none}.carousel-indicators li{display:block;float:left;width:10px;height:10px;margin-left:5px;text-indent:-999px;background-color:#ccc;background-color:rgba(255,255,255,0.25);border-radius:5px}.carousel-indicators .active{background-color:#fff}.carousel-caption{position:absolute;right:0;bottom:0;left:0;padding:15px;background:#333;background:rgba(0,0,0,0.75)}.carousel-caption h4,.carousel-caption p{line-height:20px;color:#fff}.carousel-caption h4{margin:0 0 5px}.carousel-caption p{margin-bottom:0}.hero-unit{padding:60px;margin-bottom:30px;font-size:18px;font-weight:200;line-height:30px;color:inherit;background-color:#eee;-webkit-border-radius:6px;-moz-border-radius:6px;border-radius:6px}.hero-unit h1{margin-bottom:0;font-size:60px;line-height:1;letter-spacing:-1px;color:inherit}.hero-unit li{line-height:30px}.pull-right{float:right}.pull-left{float:left}.hide{display:none}.show{display:block}.invisible{visibility:hidden}.affix{position:fixed} diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.js b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.js deleted file mode 100644 index 95c5ac5ee6..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/bootstrap.min.js +++ /dev/null @@ -1,6 +0,0 @@ -/*! -* Bootstrap.js by @fat & @mdo -* Copyright 2012 Twitter, Inc. -* http://www.apache.org/licenses/LICENSE-2.0.txt -*/ -!function(e){"use strict";e(function(){e.support.transition=function(){var e=function(){var e=document.createElement("bootstrap"),t={WebkitTransition:"webkitTransitionEnd",MozTransition:"transitionend",OTransition:"oTransitionEnd otransitionend",transition:"transitionend"},n;for(n in t)if(e.style[n]!==undefined)return t[n]}();return e&&{end:e}}()})}(window.jQuery),!function(e){"use strict";var t='[data-dismiss="alert"]',n=function(n){e(n).on("click",t,this.close)};n.prototype.close=function(t){function s(){i.trigger("closed").remove()}var n=e(this),r=n.attr("data-target"),i;r||(r=n.attr("href"),r=r&&r.replace(/.*(?=#[^\s]*$)/,"")),i=e(r),t&&t.preventDefault(),i.length||(i=n.hasClass("alert")?n:n.parent()),i.trigger(t=e.Event("close"));if(t.isDefaultPrevented())return;i.removeClass("in"),e.support.transition&&i.hasClass("fade")?i.on(e.support.transition.end,s):s()};var r=e.fn.alert;e.fn.alert=function(t){return this.each(function(){var r=e(this),i=r.data("alert");i||r.data("alert",i=new n(this)),typeof t=="string"&&i[t].call(r)})},e.fn.alert.Constructor=n,e.fn.alert.noConflict=function(){return e.fn.alert=r,this},e(document).on("click.alert.data-api",t,n.prototype.close)}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.options=e.extend({},e.fn.button.defaults,n)};t.prototype.setState=function(e){var t="disabled",n=this.$element,r=n.data(),i=n.is("input")?"val":"html";e+="Text",r.resetText||n.data("resetText",n[i]()),n[i](r[e]||this.options[e]),setTimeout(function(){e=="loadingText"?n.addClass(t).attr(t,t):n.removeClass(t).removeAttr(t)},0)},t.prototype.toggle=function(){var e=this.$element.closest('[data-toggle="buttons-radio"]');e&&e.find(".active").removeClass("active"),this.$element.toggleClass("active")};var n=e.fn.button;e.fn.button=function(n){return this.each(function(){var r=e(this),i=r.data("button"),s=typeof n=="object"&&n;i||r.data("button",i=new t(this,s)),n=="toggle"?i.toggle():n&&i.setState(n)})},e.fn.button.defaults={loadingText:"loading..."},e.fn.button.Constructor=t,e.fn.button.noConflict=function(){return e.fn.button=n,this},e(document).on("click.button.data-api","[data-toggle^=button]",function(t){var n=e(t.target);n.hasClass("btn")||(n=n.closest(".btn")),n.button("toggle")})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.$indicators=this.$element.find(".carousel-indicators"),this.options=n,this.options.pause=="hover"&&this.$element.on("mouseenter",e.proxy(this.pause,this)).on("mouseleave",e.proxy(this.cycle,this))};t.prototype={cycle:function(t){return t||(this.paused=!1),this.interval&&clearInterval(this.interval),this.options.interval&&!this.paused&&(this.interval=setInterval(e.proxy(this.next,this),this.options.interval)),this},getActiveIndex:function(){return this.$active=this.$element.find(".item.active"),this.$items=this.$active.parent().children(),this.$items.index(this.$active)},to:function(t){var n=this.getActiveIndex(),r=this;if(t>this.$items.length-1||t<0)return;return this.sliding?this.$element.one("slid",function(){r.to(t)}):n==t?this.pause().cycle():this.slide(t>n?"next":"prev",e(this.$items[t]))},pause:function(t){return t||(this.paused=!0),this.$element.find(".next, .prev").length&&e.support.transition.end&&(this.$element.trigger(e.support.transition.end),this.cycle(!0)),clearInterval(this.interval),this.interval=null,this},next:function(){if(this.sliding)return;return this.slide("next")},prev:function(){if(this.sliding)return;return this.slide("prev")},slide:function(t,n){var r=this.$element.find(".item.active"),i=n||r[t](),s=this.interval,o=t=="next"?"left":"right",u=t=="next"?"first":"last",a=this,f;this.sliding=!0,s&&this.pause(),i=i.length?i:this.$element.find(".item")[u](),f=e.Event("slide",{relatedTarget:i[0],direction:o});if(i.hasClass("active"))return;this.$indicators.length&&(this.$indicators.find(".active").removeClass("active"),this.$element.one("slid",function(){var t=e(a.$indicators.children()[a.getActiveIndex()]);t&&t.addClass("active")}));if(e.support.transition&&this.$element.hasClass("slide")){this.$element.trigger(f);if(f.isDefaultPrevented())return;i.addClass(t),i[0].offsetWidth,r.addClass(o),i.addClass(o),this.$element.one(e.support.transition.end,function(){i.removeClass([t,o].join(" ")).addClass("active"),r.removeClass(["active",o].join(" ")),a.sliding=!1,setTimeout(function(){a.$element.trigger("slid")},0)})}else{this.$element.trigger(f);if(f.isDefaultPrevented())return;r.removeClass("active"),i.addClass("active"),this.sliding=!1,this.$element.trigger("slid")}return s&&this.cycle(),this}};var n=e.fn.carousel;e.fn.carousel=function(n){return this.each(function(){var r=e(this),i=r.data("carousel"),s=e.extend({},e.fn.carousel.defaults,typeof n=="object"&&n),o=typeof n=="string"?n:s.slide;i||r.data("carousel",i=new t(this,s)),typeof n=="number"?i.to(n):o?i[o]():s.interval&&i.pause().cycle()})},e.fn.carousel.defaults={interval:5e3,pause:"hover"},e.fn.carousel.Constructor=t,e.fn.carousel.noConflict=function(){return e.fn.carousel=n,this},e(document).on("click.carousel.data-api","[data-slide], [data-slide-to]",function(t){var n=e(this),r,i=e(n.attr("data-target")||(r=n.attr("href"))&&r.replace(/.*(?=#[^\s]+$)/,"")),s=e.extend({},i.data(),n.data()),o;i.carousel(s),(o=n.attr("data-slide-to"))&&i.data("carousel").pause().to(o).cycle(),t.preventDefault()})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.options=e.extend({},e.fn.collapse.defaults,n),this.options.parent&&(this.$parent=e(this.options.parent)),this.options.toggle&&this.toggle()};t.prototype={constructor:t,dimension:function(){var e=this.$element.hasClass("width");return e?"width":"height"},show:function(){var t,n,r,i;if(this.transitioning||this.$element.hasClass("in"))return;t=this.dimension(),n=e.camelCase(["scroll",t].join("-")),r=this.$parent&&this.$parent.find("> .accordion-group > .in");if(r&&r.length){i=r.data("collapse");if(i&&i.transitioning)return;r.collapse("hide"),i||r.data("collapse",null)}this.$element[t](0),this.transition("addClass",e.Event("show"),"shown"),e.support.transition&&this.$element[t](this.$element[0][n])},hide:function(){var t;if(this.transitioning||!this.$element.hasClass("in"))return;t=this.dimension(),this.reset(this.$element[t]()),this.transition("removeClass",e.Event("hide"),"hidden"),this.$element[t](0)},reset:function(e){var t=this.dimension();return this.$element.removeClass("collapse")[t](e||"auto")[0].offsetWidth,this.$element[e!==null?"addClass":"removeClass"]("collapse"),this},transition:function(t,n,r){var i=this,s=function(){n.type=="show"&&i.reset(),i.transitioning=0,i.$element.trigger(r)};this.$element.trigger(n);if(n.isDefaultPrevented())return;this.transitioning=1,this.$element[t]("in"),e.support.transition&&this.$element.hasClass("collapse")?this.$element.one(e.support.transition.end,s):s()},toggle:function(){this[this.$element.hasClass("in")?"hide":"show"]()}};var n=e.fn.collapse;e.fn.collapse=function(n){return this.each(function(){var r=e(this),i=r.data("collapse"),s=e.extend({},e.fn.collapse.defaults,r.data(),typeof n=="object"&&n);i||r.data("collapse",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.collapse.defaults={toggle:!0},e.fn.collapse.Constructor=t,e.fn.collapse.noConflict=function(){return e.fn.collapse=n,this},e(document).on("click.collapse.data-api","[data-toggle=collapse]",function(t){var n=e(this),r,i=n.attr("data-target")||t.preventDefault()||(r=n.attr("href"))&&r.replace(/.*(?=#[^\s]+$)/,""),s=e(i).data("collapse")?"toggle":n.data();n[e(i).hasClass("in")?"addClass":"removeClass"]("collapsed"),e(i).collapse(s)})}(window.jQuery),!function(e){"use strict";function r(){e(t).each(function(){i(e(this)).removeClass("open")})}function i(t){var n=t.attr("data-target"),r;n||(n=t.attr("href"),n=n&&/#/.test(n)&&n.replace(/.*(?=#[^\s]*$)/,"")),r=n&&e(n);if(!r||!r.length)r=t.parent();return r}var t="[data-toggle=dropdown]",n=function(t){var n=e(t).on("click.dropdown.data-api",this.toggle);e("html").on("click.dropdown.data-api",function(){n.parent().removeClass("open")})};n.prototype={constructor:n,toggle:function(t){var n=e(this),s,o;if(n.is(".disabled, :disabled"))return;return s=i(n),o=s.hasClass("open"),r(),o||s.toggleClass("open"),n.focus(),!1},keydown:function(n){var r,s,o,u,a,f;if(!/(38|40|27)/.test(n.keyCode))return;r=e(this),n.preventDefault(),n.stopPropagation();if(r.is(".disabled, :disabled"))return;u=i(r),a=u.hasClass("open");if(!a||a&&n.keyCode==27)return n.which==27&&u.find(t).focus(),r.click();s=e("[role=menu] li:not(.divider):visible a",u);if(!s.length)return;f=s.index(s.filter(":focus")),n.keyCode==38&&f>0&&f--,n.keyCode==40&&f').appendTo(document.body),this.$backdrop.click(this.options.backdrop=="static"?e.proxy(this.$element[0].focus,this.$element[0]):e.proxy(this.hide,this)),i&&this.$backdrop[0].offsetWidth,this.$backdrop.addClass("in");if(!t)return;i?this.$backdrop.one(e.support.transition.end,t):t()}else!this.isShown&&this.$backdrop?(this.$backdrop.removeClass("in"),e.support.transition&&this.$element.hasClass("fade")?this.$backdrop.one(e.support.transition.end,t):t()):t&&t()}};var n=e.fn.modal;e.fn.modal=function(n){return this.each(function(){var r=e(this),i=r.data("modal"),s=e.extend({},e.fn.modal.defaults,r.data(),typeof n=="object"&&n);i||r.data("modal",i=new t(this,s)),typeof n=="string"?i[n]():s.show&&i.show()})},e.fn.modal.defaults={backdrop:!0,keyboard:!0,show:!0},e.fn.modal.Constructor=t,e.fn.modal.noConflict=function(){return e.fn.modal=n,this},e(document).on("click.modal.data-api",'[data-toggle="modal"]',function(t){var n=e(this),r=n.attr("href"),i=e(n.attr("data-target")||r&&r.replace(/.*(?=#[^\s]+$)/,"")),s=i.data("modal")?"toggle":e.extend({remote:!/#/.test(r)&&r},i.data(),n.data());t.preventDefault(),i.modal(s).one("hide",function(){n.focus()})})}(window.jQuery),!function(e){"use strict";var t=function(e,t){this.init("tooltip",e,t)};t.prototype={constructor:t,init:function(t,n,r){var i,s,o,u,a;this.type=t,this.$element=e(n),this.options=this.getOptions(r),this.enabled=!0,o=this.options.trigger.split(" ");for(a=o.length;a--;)u=o[a],u=="click"?this.$element.on("click."+this.type,this.options.selector,e.proxy(this.toggle,this)):u!="manual"&&(i=u=="hover"?"mouseenter":"focus",s=u=="hover"?"mouseleave":"blur",this.$element.on(i+"."+this.type,this.options.selector,e.proxy(this.enter,this)),this.$element.on(s+"."+this.type,this.options.selector,e.proxy(this.leave,this)));this.options.selector?this._options=e.extend({},this.options,{trigger:"manual",selector:""}):this.fixTitle()},getOptions:function(t){return t=e.extend({},e.fn[this.type].defaults,this.$element.data(),t),t.delay&&typeof t.delay=="number"&&(t.delay={show:t.delay,hide:t.delay}),t},enter:function(t){var n=e.fn[this.type].defaults,r={},i;this._options&&e.each(this._options,function(e,t){n[e]!=t&&(r[e]=t)},this),i=e(t.currentTarget)[this.type](r).data(this.type);if(!i.options.delay||!i.options.delay.show)return i.show();clearTimeout(this.timeout),i.hoverState="in",this.timeout=setTimeout(function(){i.hoverState=="in"&&i.show()},i.options.delay.show)},leave:function(t){var n=e(t.currentTarget)[this.type](this._options).data(this.type);this.timeout&&clearTimeout(this.timeout);if(!n.options.delay||!n.options.delay.hide)return n.hide();n.hoverState="out",this.timeout=setTimeout(function(){n.hoverState=="out"&&n.hide()},n.options.delay.hide)},show:function(){var t,n,r,i,s,o,u=e.Event("show");if(this.hasContent()&&this.enabled){this.$element.trigger(u);if(u.isDefaultPrevented())return;t=this.tip(),this.setContent(),this.options.animation&&t.addClass("fade"),s=typeof this.options.placement=="function"?this.options.placement.call(this,t[0],this.$element[0]):this.options.placement,t.detach().css({top:0,left:0,display:"block"}),this.options.container?t.appendTo(this.options.container):t.insertAfter(this.$element),n=this.getPosition(),r=t[0].offsetWidth,i=t[0].offsetHeight;switch(s){case"bottom":o={top:n.top+n.height,left:n.left+n.width/2-r/2};break;case"top":o={top:n.top-i,left:n.left+n.width/2-r/2};break;case"left":o={top:n.top+n.height/2-i/2,left:n.left-r};break;case"right":o={top:n.top+n.height/2-i/2,left:n.left+n.width}}this.applyPlacement(o,s),this.$element.trigger("shown")}},applyPlacement:function(e,t){var n=this.tip(),r=n[0].offsetWidth,i=n[0].offsetHeight,s,o,u,a;n.offset(e).addClass(t).addClass("in"),s=n[0].offsetWidth,o=n[0].offsetHeight,t=="top"&&o!=i&&(e.top=e.top+i-o,a=!0),t=="bottom"||t=="top"?(u=0,e.left<0&&(u=e.left*-2,e.left=0,n.offset(e),s=n[0].offsetWidth,o=n[0].offsetHeight),this.replaceArrow(u-r+s,s,"left")):this.replaceArrow(o-i,o,"top"),a&&n.offset(e)},replaceArrow:function(e,t,n){this.arrow().css(n,e?50*(1-e/t)+"%":"")},setContent:function(){var e=this.tip(),t=this.getTitle();e.find(".tooltip-inner")[this.options.html?"html":"text"](t),e.removeClass("fade in top bottom left right")},hide:function(){function i(){var t=setTimeout(function(){n.off(e.support.transition.end).detach()},500);n.one(e.support.transition.end,function(){clearTimeout(t),n.detach()})}var t=this,n=this.tip(),r=e.Event("hide");this.$element.trigger(r);if(r.isDefaultPrevented())return;return n.removeClass("in"),e.support.transition&&this.$tip.hasClass("fade")?i():n.detach(),this.$element.trigger("hidden"),this},fixTitle:function(){var e=this.$element;(e.attr("title")||typeof e.attr("data-original-title")!="string")&&e.attr("data-original-title",e.attr("title")||"").attr("title","")},hasContent:function(){return this.getTitle()},getPosition:function(){var t=this.$element[0];return e.extend({},typeof t.getBoundingClientRect=="function"?t.getBoundingClientRect():{width:t.offsetWidth,height:t.offsetHeight},this.$element.offset())},getTitle:function(){var e,t=this.$element,n=this.options;return e=t.attr("data-original-title")||(typeof n.title=="function"?n.title.call(t[0]):n.title),e},tip:function(){return this.$tip=this.$tip||e(this.options.template)},arrow:function(){return this.$arrow=this.$arrow||this.tip().find(".tooltip-arrow")},validate:function(){this.$element[0].parentNode||(this.hide(),this.$element=null,this.options=null)},enable:function(){this.enabled=!0},disable:function(){this.enabled=!1},toggleEnabled:function(){this.enabled=!this.enabled},toggle:function(t){var n=t?e(t.currentTarget)[this.type](this._options).data(this.type):this;n.tip().hasClass("in")?n.hide():n.show()},destroy:function(){this.hide().$element.off("."+this.type).removeData(this.type)}};var n=e.fn.tooltip;e.fn.tooltip=function(n){return this.each(function(){var r=e(this),i=r.data("tooltip"),s=typeof n=="object"&&n;i||r.data("tooltip",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.tooltip.Constructor=t,e.fn.tooltip.defaults={animation:!0,placement:"top",selector:!1,template:'
    ',trigger:"hover focus",title:"",delay:0,html:!1,container:!1},e.fn.tooltip.noConflict=function(){return e.fn.tooltip=n,this}}(window.jQuery),!function(e){"use strict";var t=function(e,t){this.init("popover",e,t)};t.prototype=e.extend({},e.fn.tooltip.Constructor.prototype,{constructor:t,setContent:function(){var e=this.tip(),t=this.getTitle(),n=this.getContent();e.find(".popover-title")[this.options.html?"html":"text"](t),e.find(".popover-content")[this.options.html?"html":"text"](n),e.removeClass("fade top bottom left right in")},hasContent:function(){return this.getTitle()||this.getContent()},getContent:function(){var e,t=this.$element,n=this.options;return e=(typeof n.content=="function"?n.content.call(t[0]):n.content)||t.attr("data-content"),e},tip:function(){return this.$tip||(this.$tip=e(this.options.template)),this.$tip},destroy:function(){this.hide().$element.off("."+this.type).removeData(this.type)}});var n=e.fn.popover;e.fn.popover=function(n){return this.each(function(){var r=e(this),i=r.data("popover"),s=typeof n=="object"&&n;i||r.data("popover",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.popover.Constructor=t,e.fn.popover.defaults=e.extend({},e.fn.tooltip.defaults,{placement:"right",trigger:"click",content:"",template:'

    '}),e.fn.popover.noConflict=function(){return e.fn.popover=n,this}}(window.jQuery),!function(e){"use strict";function t(t,n){var r=e.proxy(this.process,this),i=e(t).is("body")?e(window):e(t),s;this.options=e.extend({},e.fn.scrollspy.defaults,n),this.$scrollElement=i.on("scroll.scroll-spy.data-api",r),this.selector=(this.options.target||(s=e(t).attr("href"))&&s.replace(/.*(?=#[^\s]+$)/,"")||"")+" .nav li > a",this.$body=e("body"),this.refresh(),this.process()}t.prototype={constructor:t,refresh:function(){var t=this,n;this.offsets=e([]),this.targets=e([]),n=this.$body.find(this.selector).map(function(){var n=e(this),r=n.data("target")||n.attr("href"),i=/^#\w/.test(r)&&e(r);return i&&i.length&&[[i.position().top+(!e.isWindow(t.$scrollElement.get(0))&&t.$scrollElement.scrollTop()),r]]||null}).sort(function(e,t){return e[0]-t[0]}).each(function(){t.offsets.push(this[0]),t.targets.push(this[1])})},process:function(){var e=this.$scrollElement.scrollTop()+this.options.offset,t=this.$scrollElement[0].scrollHeight||this.$body[0].scrollHeight,n=t-this.$scrollElement.height(),r=this.offsets,i=this.targets,s=this.activeTarget,o;if(e>=n)return s!=(o=i.last()[0])&&this.activate(o);for(o=r.length;o--;)s!=i[o]&&e>=r[o]&&(!r[o+1]||e<=r[o+1])&&this.activate(i[o])},activate:function(t){var n,r;this.activeTarget=t,e(this.selector).parent(".active").removeClass("active"),r=this.selector+'[data-target="'+t+'"],'+this.selector+'[href="'+t+'"]',n=e(r).parent("li").addClass("active"),n.parent(".dropdown-menu").length&&(n=n.closest("li.dropdown").addClass("active")),n.trigger("activate")}};var n=e.fn.scrollspy;e.fn.scrollspy=function(n){return this.each(function(){var r=e(this),i=r.data("scrollspy"),s=typeof n=="object"&&n;i||r.data("scrollspy",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.scrollspy.Constructor=t,e.fn.scrollspy.defaults={offset:10},e.fn.scrollspy.noConflict=function(){return e.fn.scrollspy=n,this},e(window).on("load",function(){e('[data-spy="scroll"]').each(function(){var t=e(this);t.scrollspy(t.data())})})}(window.jQuery),!function(e){"use strict";var t=function(t){this.element=e(t)};t.prototype={constructor:t,show:function(){var t=this.element,n=t.closest("ul:not(.dropdown-menu)"),r=t.attr("data-target"),i,s,o;r||(r=t.attr("href"),r=r&&r.replace(/.*(?=#[^\s]*$)/,""));if(t.parent("li").hasClass("active"))return;i=n.find(".active:last a")[0],o=e.Event("show",{relatedTarget:i}),t.trigger(o);if(o.isDefaultPrevented())return;s=e(r),this.activate(t.parent("li"),n),this.activate(s,s.parent(),function(){t.trigger({type:"shown",relatedTarget:i})})},activate:function(t,n,r){function o(){i.removeClass("active").find("> .dropdown-menu > .active").removeClass("active"),t.addClass("active"),s?(t[0].offsetWidth,t.addClass("in")):t.removeClass("fade"),t.parent(".dropdown-menu")&&t.closest("li.dropdown").addClass("active"),r&&r()}var i=n.find("> .active"),s=r&&e.support.transition&&i.hasClass("fade");s?i.one(e.support.transition.end,o):o(),i.removeClass("in")}};var n=e.fn.tab;e.fn.tab=function(n){return this.each(function(){var r=e(this),i=r.data("tab");i||r.data("tab",i=new t(this)),typeof n=="string"&&i[n]()})},e.fn.tab.Constructor=t,e.fn.tab.noConflict=function(){return e.fn.tab=n,this},e(document).on("click.tab.data-api",'[data-toggle="tab"], [data-toggle="pill"]',function(t){t.preventDefault(),e(this).tab("show")})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.$element=e(t),this.options=e.extend({},e.fn.typeahead.defaults,n),this.matcher=this.options.matcher||this.matcher,this.sorter=this.options.sorter||this.sorter,this.highlighter=this.options.highlighter||this.highlighter,this.updater=this.options.updater||this.updater,this.source=this.options.source,this.$menu=e(this.options.menu),this.shown=!1,this.listen()};t.prototype={constructor:t,select:function(){var e=this.$menu.find(".active").attr("data-value");return this.$element.val(this.updater(e)).change(),this.hide()},updater:function(e){return e},show:function(){var t=e.extend({},this.$element.position(),{height:this.$element[0].offsetHeight});return this.$menu.insertAfter(this.$element).css({top:t.top+t.height,left:t.left}).show(),this.shown=!0,this},hide:function(){return this.$menu.hide(),this.shown=!1,this},lookup:function(t){var n;return this.query=this.$element.val(),!this.query||this.query.length"+t+""})},render:function(t){var n=this;return t=e(t).map(function(t,r){return t=e(n.options.item).attr("data-value",r),t.find("a").html(n.highlighter(r)),t[0]}),t.first().addClass("active"),this.$menu.html(t),this},next:function(t){var n=this.$menu.find(".active").removeClass("active"),r=n.next();r.length||(r=e(this.$menu.find("li")[0])),r.addClass("active")},prev:function(e){var t=this.$menu.find(".active").removeClass("active"),n=t.prev();n.length||(n=this.$menu.find("li").last()),n.addClass("active")},listen:function(){this.$element.on("focus",e.proxy(this.focus,this)).on("blur",e.proxy(this.blur,this)).on("keypress",e.proxy(this.keypress,this)).on("keyup",e.proxy(this.keyup,this)),this.eventSupported("keydown")&&this.$element.on("keydown",e.proxy(this.keydown,this)),this.$menu.on("click",e.proxy(this.click,this)).on("mouseenter","li",e.proxy(this.mouseenter,this)).on("mouseleave","li",e.proxy(this.mouseleave,this))},eventSupported:function(e){var t=e in this.$element;return t||(this.$element.setAttribute(e,"return;"),t=typeof this.$element[e]=="function"),t},move:function(e){if(!this.shown)return;switch(e.keyCode){case 9:case 13:case 27:e.preventDefault();break;case 38:e.preventDefault(),this.prev();break;case 40:e.preventDefault(),this.next()}e.stopPropagation()},keydown:function(t){this.suppressKeyPressRepeat=~e.inArray(t.keyCode,[40,38,9,13,27]),this.move(t)},keypress:function(e){if(this.suppressKeyPressRepeat)return;this.move(e)},keyup:function(e){switch(e.keyCode){case 40:case 38:case 16:case 17:case 18:break;case 9:case 13:if(!this.shown)return;this.select();break;case 27:if(!this.shown)return;this.hide();break;default:this.lookup()}e.stopPropagation(),e.preventDefault()},focus:function(e){this.focused=!0},blur:function(e){this.focused=!1,!this.mousedover&&this.shown&&this.hide()},click:function(e){e.stopPropagation(),e.preventDefault(),this.select(),this.$element.focus()},mouseenter:function(t){this.mousedover=!0,this.$menu.find(".active").removeClass("active"),e(t.currentTarget).addClass("active")},mouseleave:function(e){this.mousedover=!1,!this.focused&&this.shown&&this.hide()}};var n=e.fn.typeahead;e.fn.typeahead=function(n){return this.each(function(){var r=e(this),i=r.data("typeahead"),s=typeof n=="object"&&n;i||r.data("typeahead",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.typeahead.defaults={source:[],items:8,menu:'',item:'
  • ',minLength:1},e.fn.typeahead.Constructor=t,e.fn.typeahead.noConflict=function(){return e.fn.typeahead=n,this},e(document).on("focus.typeahead.data-api",'[data-provide="typeahead"]',function(t){var n=e(this);if(n.data("typeahead"))return;n.typeahead(n.data())})}(window.jQuery),!function(e){"use strict";var t=function(t,n){this.options=e.extend({},e.fn.affix.defaults,n),this.$window=e(window).on("scroll.affix.data-api",e.proxy(this.checkPosition,this)).on("click.affix.data-api",e.proxy(function(){setTimeout(e.proxy(this.checkPosition,this),1)},this)),this.$element=e(t),this.checkPosition()};t.prototype.checkPosition=function(){if(!this.$element.is(":visible"))return;var t=e(document).height(),n=this.$window.scrollTop(),r=this.$element.offset(),i=this.options.offset,s=i.bottom,o=i.top,u="affix affix-top affix-bottom",a;typeof i!="object"&&(s=o=i),typeof o=="function"&&(o=i.top()),typeof s=="function"&&(s=i.bottom()),a=this.unpin!=null&&n+this.unpin<=r.top?!1:s!=null&&r.top+this.$element.height()>=t-s?"bottom":o!=null&&n<=o?"top":!1;if(this.affixed===a)return;this.affixed=a,this.unpin=a=="bottom"?r.top-n:null,this.$element.removeClass(u).addClass("affix"+(a?"-"+a:""))};var n=e.fn.affix;e.fn.affix=function(n){return this.each(function(){var r=e(this),i=r.data("affix"),s=typeof n=="object"&&n;i||r.data("affix",i=new t(this,s)),typeof n=="string"&&i[n]()})},e.fn.affix.Constructor=t,e.fn.affix.defaults={offset:0},e.fn.affix.noConflict=function(){return e.fn.affix=n,this},e(window).on("load",function(){e('[data-spy="affix"]').each(function(){var t=e(this),n=t.data();n.offset=n.offset||{},n.offsetBottom&&(n.offset.bottom=n.offsetBottom),n.offsetTop&&(n.offset.top=n.offsetTop),t.affix(n)})})}(window.jQuery); \ No newline at end of file diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/glyphicons-halflings-white.png b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/glyphicons-halflings-white.png deleted file mode 100644 index 3bf6484a29d8da269f9bc874b25493a45fae3bae..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 8777 zcmZvC1yGz#v+m*$LXcp=A$ZWB0fL7wNbp_U*$~{_gL`my3oP#L!5tQYy99Ta`+g_q zKlj|KJ2f@c)ARJx{q*bbkhN_!|Wn*Vos8{TEhUT@5e;_WJsIMMcG5%>DiS&dv_N`4@J0cnAQ-#>RjZ z00W5t&tJ^l-QC*ST1-p~00u^9XJ=AUl7oW-;2a+x2k__T=grN{+1c4XK0ZL~^z^i$ zp&>vEhr@4fZWb380S18T&!0cQ3IKpHF)?v=b_NIm0Q>vwY7D0baZ)n z31Fa5sELUQARIVaU0nqf0XzT+fB_63aA;@<$l~wse|mcA;^G1TmX?-)e)jkGPfkuA z92@|!<>h5S_4f8QP-JRq>d&7)^Yin8l7K8gED$&_FaV?gY+wLjpoW%~7NDe=nHfMG z5DO3j{R9kv5GbssrUpO)OyvVrlx>u0UKD0i;Dpm5S5dY16(DL5l{ixz|mhJU@&-OWCTb7_%}8-fE(P~+XIRO zJU|wp1|S>|J3KrLcz^+v1f&BDpd>&MAaibR4#5A_4(MucZwG9E1h4@u0P@C8;oo+g zIVj7kfJi{oV~E(NZ*h(@^-(Q(C`Psb3KZ{N;^GB(a8NE*Vwc715!9 zr-H4Ao|T_c6+VT_JH9H+P3>iXSt!a$F`>s`jn`w9GZ_~B!{0soaiV|O_c^R2aWa%}O3jUE)WO=pa zs~_Wz08z|ieY5A%$@FcBF9^!1a}m5ks@7gjn;67N>}S~Hrm`4sM5Hh`q7&5-N{|31 z6x1{ol7BnskoViZ0GqbLa#kW`Z)VCjt1MysKg|rT zi!?s##Ck>8c zpi|>$lGlw#@yMNi&V4`6OBGJ(H&7lqLlcTQ&1zWriG_fL>BnFcr~?;E93{M-xIozQ zO=EHQ#+?<}%@wbWWv23#!V70h9MOuUVaU>3kpTvYfc|LBw?&b*89~Gc9i&8tlT#kF ztpbZoAzkdB+UTy=tx%L3Z4)I{zY(Kb)eg{InobSJmNwPZt$14aS-uc4eKuY8h$dtfyxu^a%zA)>fYI&)@ZXky?^{5>xSC?;w4r&td6vBdi%vHm4=XJH!3yL3?Ep+T5aU_>i;yr_XGq zxZfCzUU@GvnoIk+_Nd`aky>S&H!b*{A%L>?*XPAgWL(Vf(k7qUS}>Zn=U(ZfcOc{B z3*tOHH@t5Ub5D~#N7!Fxx}P2)sy{vE_l(R7$aW&CX>c|&HY+7};vUIietK%}!phrCuh+;C@1usp;XLU<8Gq8P!rEI3ieg#W$!= zQcZr{hp>8sF?k&Yl0?B84OneiQxef-4TEFrq3O~JAZR}yEJHA|Xkqd49tR&8oq{zP zY@>J^HBV*(gJvJZc_0VFN7Sx?H7#75E3#?N8Z!C+_f53YU}pyggxx1?wQi5Yb-_`I`_V*SMx5+*P^b=ec5RON-k1cIlsBLk}(HiaJyab0`CI zo0{=1_LO$~oE2%Tl_}KURuX<`+mQN_sTdM&* zkFf!Xtl^e^gTy6ON=&gTn6)$JHQq2)33R@_!#9?BLNq-Wi{U|rVX7Vny$l6#+SZ@KvQt@VYb%<9JfapI^b9j=wa+Tqb4ei;8c5 z&1>Uz@lVFv6T4Z*YU$r4G`g=91lSeA<=GRZ!*KTWKDPR}NPUW%peCUj`Ix_LDq!8| zMH-V`Pv!a~QkTL||L@cqiTz)*G-0=ytr1KqTuFPan9y4gYD5>PleK`NZB$ev@W%t= zkp)_=lBUTLZJpAtZg;pjI;7r2y|26-N7&a(hX|`1YNM9N8{>8JAuv}hp1v`3JHT-=5lbXpbMq7X~2J5Kl zh7tyU`_AusMFZ{ej9D;Uyy;SQ!4nwgSnngsYBwdS&EO3NS*o04)*juAYl;57c2Ly0(DEZ8IY?zSph-kyxu+D`tt@oU{32J#I{vmy=#0ySPK zA+i(A3yl)qmTz*$dZi#y9FS;$;h%bY+;StNx{_R56Otq+?pGe^T^{5d7Gs&?`_r`8 zD&dzOA|j8@3A&FR5U3*eQNBf<4^4W_iS_()*8b4aaUzfk2 zzIcMWSEjm;EPZPk{j{1>oXd}pXAj!NaRm8{Sjz!D=~q3WJ@vmt6ND_?HI~|wUS1j5 z9!S1MKr7%nxoJ3k`GB^7yV~*{n~O~n6($~x5Bu{7s|JyXbAyKI4+tO(zZYMslK;Zc zzeHGVl{`iP@jfSKq>R;{+djJ9n%$%EL()Uw+sykjNQdflkJZSjqV_QDWivbZS~S{K zkE@T^Jcv)Dfm93!mf$XYnCT--_A$zo9MOkPB6&diM8MwOfV?+ApNv`moV@nqn>&lv zYbN1-M|jc~sG|yLN^1R2=`+1ih3jCshg`iP&mY$GMTcY^W^T`WOCX!{-KHmZ#GiRH zYl{|+KLn5!PCLtBy~9i}`#d^gCDDx$+GQb~uc;V#K3OgbbOG0j5{BRG-si%Bo{@lB zGIt+Ain8^C`!*S0d0OSWVO+Z89}}O8aFTZ>p&k}2gGCV zh#<$gswePFxWGT$4DC^8@84_e*^KT74?7n8!$8cg=sL$OlKr&HMh@Rr5%*Wr!xoOl zo7jItnj-xYgVTX)H1=A2bD(tleEH57#V{xAeW_ezISg5OC zg=k>hOLA^urTH_e6*vSYRqCm$J{xo}-x3@HH;bsHD1Z`Pzvsn}%cvfw%Q(}h`Dgtb z0_J^niUmoCM5$*f)6}}qi(u;cPgxfyeVaaVmOsG<)5`6tzU4wyhF;k|~|x>7-2hXpVBpc5k{L4M`Wbe6Q?tr^*B z`Y*>6*&R#~%JlBIitlZ^qGe3s21~h3U|&k%%jeMM;6!~UH|+0+<5V-_zDqZQN79?n?!Aj!Nj`YMO9?j>uqI9-Tex+nJD z%e0#Yca6(zqGUR|KITa?9x-#C0!JKJHO(+fy@1!B$%ZwJwncQW7vGYv?~!^`#L~Um zOL++>4qmqW`0Chc0T23G8|vO)tK=Z2`gvS4*qpqhIJCEv9i&&$09VO8YOz|oZ+ubd zNXVdLc&p=KsSgtmIPLN69P7xYkYQ1vJ?u1g)T!6Ru`k2wkdj*wDC)VryGu2=yb0?F z>q~~e>KZ0d_#7f3UgV%9MY1}vMgF{B8yfE{HL*pMyhYF)WDZ^^3vS8F zGlOhs%g_~pS3=WQ#494@jAXwOtr^Y|TnQ5zki>qRG)(oPY*f}U_=ip_{qB0!%w7~G zWE!P4p3khyW-JJnE>eECuYfI?^d366Shq!Wm#x&jAo>=HdCllE$>DPO0N;y#4G)D2y#B@5=N=+F%Xo2n{gKcPcK2!hP*^WSXl+ut; zyLvVoY>VL{H%Kd9^i~lsb8j4>$EllrparEOJNT?Ym>vJa$(P^tOG)5aVb_5w^*&M0 zYOJ`I`}9}UoSnYg#E(&yyK(tqr^@n}qU2H2DhkK-`2He% zgXr_4kpXoQHxAO9S`wEdmqGU4j=1JdG!OixdqB4PPP6RXA}>GM zumruUUH|ZG2$bBj)Qluj&uB=dRb)?^qomw?Z$X%#D+Q*O97eHrgVB2*mR$bFBU`*} zIem?dM)i}raTFDn@5^caxE^XFXVhBePmH9fqcTi`TLaXiueH=@06sl}>F%}h9H_e9 z>^O?LxM1EjX}NVppaO@NNQr=AtHcH-BU{yBT_vejJ#J)l^cl69Z7$sk`82Zyw7Wxt z=~J?hZm{f@W}|96FUJfy65Gk8?^{^yjhOahUMCNNpt5DJw}ZKH7b!bGiFY9y6OY&T z_N)?Jj(MuLTN36ZCJ6I5Xy7uVlrb$o*Z%=-)kPo9s?<^Yqz~!Z* z_mP8(unFq65XSi!$@YtieSQ!<7IEOaA9VkKI?lA`*(nURvfKL8cX}-+~uw9|_5)uC2`ZHcaeX7L8aG6Ghleg@F9aG%X$#g6^yP5apnB>YTz&EfS{q z9UVfSyEIczebC)qlVu5cOoMzS_jrC|)rQlAzK7sfiW0`M8mVIohazPE9Jzn*qPt%6 zZL8RELY@L09B83@Be;x5V-IHnn$}{RAT#<2JA%ttlk#^(%u}CGze|1JY5MPhbfnYG zIw%$XfBmA-<_pKLpGKwbRF$#P;@_)ech#>vj25sv25VM$ouo)?BXdRcO{)*OwTw)G zv43W~T6ekBMtUD%5Bm>`^Ltv!w4~65N!Ut5twl!Agrzyq4O2Fi3pUMtCU~>9gt_=h-f% z;1&OuSu?A_sJvIvQ+dZNo3?m1%b1+s&UAx?8sUHEe_sB7zkm4R%6)<@oYB_i5>3Ip zIA+?jVdX|zL{)?TGpx+=Ta>G80}0}Ax+722$XFNJsC1gcH56{8B)*)eU#r~HrC&}` z|EWW92&;6y;3}!L5zXa385@?-D%>dSvyK;?jqU2t_R3wvBW;$!j45uQ7tyEIQva;Db}r&bR3kqNSh)Q_$MJ#Uj3Gj1F;)sO|%6z#@<+ zi{pbYsYS#u`X$Nf($OS+lhw>xgjos1OnF^$-I$u;qhJswhH~p|ab*nO>zBrtb0ndn zxV0uh!LN`&xckTP+JW}gznSpU492)u+`f{9Yr)js`NmfYH#Wdtradc0TnKNz@Su!e zu$9}G_=ku;%4xk}eXl>)KgpuT>_<`Ud(A^a++K&pm3LbN;gI}ku@YVrA%FJBZ5$;m zobR8}OLtW4-i+qPPLS-(7<>M{)rhiPoi@?&vDeVq5%fmZk=mDdRV>Pb-l7pP1y6|J z8I>sF+TypKV=_^NwBU^>4JJq<*14GLfM2*XQzYdlqqjnE)gZsPW^E@mp&ww* zW9i>XL=uwLVZ9pO*8K>t>vdL~Ek_NUL$?LQi5sc#1Q-f6-ywKcIT8Kw?C(_3pbR`e|)%9S-({if|E+hR2W!&qfQ&UiF^I!|M#xhdWsenv^wpKCBiuxXbnp85`{i|;BM?Ba`lqTA zyRm=UWJl&E{8JzYDHFu>*Z10-?#A8D|5jW9Ho0*CAs0fAy~MqbwYuOq9jjt9*nuHI zbDwKvh)5Ir$r!fS5|;?Dt>V+@F*v8=TJJF)TdnC#Mk>+tGDGCw;A~^PC`gUt*<(|i zB{{g{`uFehu`$fm4)&k7`u{xIV)yvA(%5SxX9MS80p2EKnLtCZ>tlX>*Z6nd&6-Mv$5rHD*db;&IBK3KH&M<+ArlGXDRdX1VVO4)&R$f4NxXI>GBh zSv|h>5GDAI(4E`@F?EnW zS>#c&Gw6~_XL`qQG4bK`W*>hek4LX*efn6|_MY+rXkNyAuu?NxS%L7~9tD3cn7&p( zCtfqe6sjB&Q-Vs7BP5+%;#Gk};4xtwU!KY0XXbmkUy$kR9)!~?*v)qw00!+Yg^#H> zc#8*z6zZo>+(bud?K<*!QO4ehiTCK&PD4G&n)Tr9X_3r-we z?fI+}-G~Yn93gI6F{}Dw_SC*FLZ)5(85zp4%uubtD)J)UELLkvGk4#tw&Tussa)mTD$R2&O~{ zCI3>fr-!-b@EGRI%g0L8UU%%u_<;e9439JNV;4KSxd|78v+I+8^rmMf3f40Jb}wEszROD?xBZu>Ll3;sUIoNxDK3|j3*sam2tC@@e$ z^!;+AK>efeBJB%ALsQ{uFui)oDoq()2USi?n=6C3#eetz?wPswc={I<8x=(8lE4EIsUfyGNZ{|KYn1IR|=E==f z(;!A5(-2y^2xRFCSPqzHAZn5RCN_bp22T(KEtjA(rFZ%>a4@STrHZflxKoqe9Z4@^ zM*scx_y73?Q{vt6?~WEl?2q*;@8 z3M*&@%l)SQmXkcUm)d@GT2#JdzhfSAP9|n#C;$E8X|pwD!r#X?0P>0ZisQ~TNqupW z*lUY~+ikD`vQb?@SAWX#r*Y+;=_|oacL$2CL$^(mV}aKO77pg}O+-=T1oLBT5sL2i z42Qth2+0@C`c+*D0*5!qy26sis<9a7>LN2{z%Qj49t z=L@x`4$ALHb*3COHoT?5S_c(Hs}g!V>W^=6Q0}zaubkDn)(lTax0+!+%B}9Vqw6{H zvL|BRM`O<@;eVi1DzM!tXtBrA20Ce@^Jz|>%X-t`vi-%WweXCh_LhI#bUg2*pcP~R z*RuTUzBKLXO~~uMd&o$v3@d0shHfUjC6c539PE6rF&;Ufa(Rw@K1*m7?f5)t`MjH0 z)_V(cajV5Am>f!kWcI@5rE8t6$S>5M=k=aRZROH6fA^jJp~2NlR4;Q2>L$7F#RT#9 z>4@1RhWG`Khy>P2j1Yx^BBL{S`niMaxlSWV-JBU0-T9zZ%>7mR3l$~QV$({o0;jTI ze5=cN^!Bc2bT|BcojXp~K#2cM>OTe*cM{Kg-j*CkiW)EGQot^}s;cy8_1_@JA0Whq zlrNr+R;Efa+`6N)s5rH*|E)nYZ3uqkk2C(E7@A|3YI`ozP~9Lexx#*1(r8luq+YPk z{J}c$s` zPM35Fx(YWB3Z5IYnN+L_4|jaR(5iWJi2~l&xy}aU7kW?o-V*6Av2wyZTG!E2KSW2* zGRLQkQU;Oz##ie-Z4fI)WSRxn$(ZcD;TL+;^r=a4(G~H3ZhK$lSXZj?cvyY8%d9JM zzc3#pD^W_QnWy#rx#;c&N@sqHhrnHRmj#i;s%zLm6SE(n&BWpd&f7>XnjV}OlZntI70fq%8~9<7 zMYaw`E-rp49-oC1N_uZTo)Cu%RR2QWdHpzQIcNsoDp`3xfP+`gI?tVQZ4X={qU?(n zV>0ASES^Xuc;9JBji{)RnFL(Lez;8XbB1uWaMp@p?7xhXk6V#!6B@aP4Rz7-K%a>i z?fvf}va_DGUXlI#4--`A3qK7J?-HwnG7O~H2;zR~RLW)_^#La!=}+>KW#anZ{|^D3 B7G?kd diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/glyphicons-halflings.png b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/glyphicons-halflings.png deleted file mode 100644 index a9969993201f9cee63cf9f49217646347297b643..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 12799 zcma*OWmH^Ivn@*S;K3nSf_t!#;0f+&pm7Po8`nk}2q8f5;M%x$SdAkd9FAvlc$ zx660V9e3Ox@4WZ^?7jZ%QFGU-T~%||Ug4iK6bbQY@zBuF2$hxOw9wF=A)nUSxR_5@ zEX>HBryGrjyuOFFv$Y4<+|3H@gQfEqD<)+}a~mryD|1U9*I_FOG&F%+Ww{SJ-V2BR zjt<81Ek$}Yb*95D4RS0HCps|uLyovt;P05hchQb-u2bzLtmog&f2}1VlNhxXV);S9 zM2buBg~!q9PtF)&KGRgf3#z7B(hm5WlNClaCWFs!-P!4-u*u5+=+D|ZE9e`KvhTHT zJBnLwGM%!u&vlE%1ytJ=!xt~y_YkFLQb6bS!E+s8l7PiPGSt9xrmg?LV&&SL?J~cI zS(e9TF1?SGyh+M_p@o1dyWu7o7_6p;N6hO!;4~ z2B`I;y`;$ZdtBpvK5%oQ^p4eR2L)BH>B$FQeC*t)c`L71gXHPUa|vyu`Bnz)H$ZcXGve(}XvR!+*8a>BLV;+ryG1kt0=)ytl zNJxFUN{V7P?#|Cp85QTa@(*Q3%K-R(Pkv1N8YU*(d(Y}9?PQ(j;NzWoEVWRD-~H$=f>j9~PN^BM2okI(gY-&_&BCV6RP&I$FnSEM3d=0fCxbxA6~l>54-upTrw zYgX@%m>jsSGi`0cQt6b8cX~+02IghVlNblR7eI;0ps}mpWUcxty1yG56C5rh%ep(X z?)#2d?C<4t-KLc*EAn>>M8%HvC1TyBSoPNg(4id~H8JwO#I)Bf;N*y6ai6K9_bA`4 z_g9(-R;qyH&6I$`b42v|0V3Z8IXN*p*8g$gE98+JpXNY+jXxU0zsR^W$#V=KP z3AEFp@OL}WqwOfsV<)A^UTF4&HF1vQecz?LWE@p^Z2){=KEC_3Iopx_eS42>DeiDG zWMXGbYfG~W7C8s@@m<_?#Gqk;!&)_Key@^0xJxrJahv{B&{^!>TV7TEDZlP|$=ZCz zmX=ZWtt4QZKx**)lQQoW8y-XLiOQy#T`2t}p6l*S`68ojyH@UXJ-b~@tN`WpjF z%7%Yzv807gsO!v=!(2uR)16!&U5~VPrPHtGzUU?2w(b1Xchq}(5Ed^G|SD7IG+kvgyVksU) z(0R)SW1V(>&q2nM%Z!C9=;pTg!(8pPSc%H01urXmQI6Gi^dkYCYfu6b4^tW))b^U+ z$2K&iOgN_OU7n#GC2jgiXU{caO5hZt0(>k+c^(r><#m|#J^s?zA6pi;^#*rp&;aqL zRcZi0Q4HhVX3$ybclxo4FFJW*`IV`)Bj_L3rQe?5{wLJh168Ve1jZv+f1D}f0S$N= zm4i|9cEWz&C9~ZI3q*gwWH^<6sBWuphgy@S3Qy?MJiL>gwd|E<2h9-$3;gT9V~S6r z)cAcmE0KXOwDA5eJ02-75d~f?3;n7a9d_xPBJaO;Z)#@s7gk5$Qn(Fc^w@9c5W0zY z59is0?Mt^@Rolcn{4%)Ioat(kxQH6}hIykSA)zht=9F_W*D#<}N(k&&;k;&gKkWIL z0Of*sP=X(Uyu$Pw;?F@?j{}=>{aSHFcii#78FC^6JGrg-)!)MV4AKz>pXnhVgTgx8 z1&5Y=>|8RGA6++FrSy=__k_imx|z-EI@foKi>tK0Hq2LetjUotCgk2QFXaej!BWYL zJc{fv(&qA7UUJ|AXLc5z*_NW#yWzKtl(c8mEW{A>5Hj^gfZ^HC9lQNQ?RowXjmuCj4!!54Us1=hY z0{@-phvC}yls!PmA~_z>Y&n&IW9FQcj}9(OLO-t^NN$c0o}YksCUWt|DV(MJB%%Sr zdf}8!9ylU2TW!=T{?)g-ojAMKc>3pW;KiZ7f0;&g)k}K^#HBhE5ot)%oxq$*$W@b# zg4p<Ou`ME|Kd1WHK@8 zzLD+0(NHWa`B{em3Ye?@aVsEi>y#0XVZfaFuq#;X5C3{*ikRx7UY4FF{ZtNHNO?A_ z#Q?hwRv~D8fPEc%B5E-ZMI&TAmikl||EERumQCRh7p;)>fdZMxvKq;ky0}7IjhJph zW*uuu*(Y6)S;Od--8uR^R#sb$cmFCnPcj9PPCWhPN;n`i1Q#Qn>ii z{WR|0>8F`vf&#E(c2NsoH=I7Cd-FV|%(7a`i}gZw4N~QFFG2WtS^H%@c?%9UZ+kez z;PwGgg_r6V>Kn5n(nZ40P4qMyrCP3bDkJp@hp6&X3>gzC>=f@Hsen<%I~7W+x@}b> z0}Et*vx_50-q@PIV=(3&Tbm}}QRo*FP2@)A#XX-8jYspIhah`9ukPBr)$8>Tmtg&R z?JBoH17?+1@Y@r>anoKPQ}F8o9?vhcG79Cjv^V6ct709VOQwg{c0Q#rBSsSmK3Q;O zBpNihl3S0_IGVE)^`#94#j~$;7+u870yWiV$@={|GrBmuz4b)*bCOPkaN0{6$MvazOEBxFdKZDlbVvv{8_*kJ zfE6C`4&Kkz<5u%dEdStd85-5UHG5IOWbo8i9azgg#zw-(P1AA049hddAB*UdG3Vn0 zX`OgM+EM|<+KhJ<=k?z~WA5waVj?T9eBdfJGebVifBKS1u<$#vl^BvSg)xsnT5Aw_ZY#}v*LXO#htB>f}x3qDdDHoFeb zAq7;0CW;XJ`d&G*9V)@H&739DpfWYzdQt+Kx_E1K#Cg1EMtFa8eQRk_JuUdHD*2;W zR~XFnl!L2A?48O;_iqCVr1oxEXvOIiN_9CUVTZs3C~P+11}ebyTRLACiJuMIG#`xP zKlC|E(S@QvN+%pBc6vPiQS8KgQAUh75C0a2xcPQDD$}*bM&z~g8+=9ltmkT$;c;s z5_=8%i0H^fEAOQbHXf0;?DN5z-5+1 zDxj50yYkz4ox9p$HbZ|H?8ukAbLE^P$@h}L%i6QVcY>)i!w=hkv2zvrduut%!8>6b zcus3bh1w~L804EZ*s96?GB&F7c5?m?|t$-tp2rKMy>F*=4;w*jW}^;8v`st&8)c; z2Ct2{)?S(Z;@_mjAEjb8x=qAQvx=}S6l9?~H?PmP`-xu;ME*B8sm|!h@BX4>u(xg_ zIHmQzp4Tgf*J}Y=8STR5_s)GKcmgV!$JKTg@LO402{{Wrg>#D4-L%vjmtJ4r?p&$F!o-BOf7ej~ z6)BuK^^g1b#(E>$s`t3i13{6-mmSp7{;QkeG5v}GAN&lM2lQT$@(aQCcFP(%UyZbF z#$HLTqGT^@F#A29b0HqiJsRJAlh8kngU`BDI6 zJUE~&!cQ*&f95Ot$#mxU5+*^$qg_DWNdfu+1irglB7yDglzH()2!@#rpu)^3S8weW z_FE$=j^GTY*|5SH95O8o8W9FluYwB=2PwtbW|JG6kcV^dMVmX(wG+Otj;E$%gfu^K z!t~<3??8=()WQSycsBKy24>NjRtuZ>zxJIED;YXaUz$@0z4rl+TW zWxmvM$%4jYIpO>j5k1t1&}1VKM~s!eLsCVQ`TTjn3JRXZD~>GM z$-IT~(Y)flNqDkC%DfbxaV9?QuWCV&-U1yzrV@0jRhE;)ZO0=r-{s@W?HOFbRHDDV zq;eLo+wOW;nI|#mNf(J?RImB9{YSO2Y`9825Lz#u4(nk3)RGv3X8B(A$TsontJ8L! z9JP^eWxtKC?G8^xAZa1HECx*rp35s!^%;&@Jyk)NexVc)@U4$^X1Dag6`WKs|(HhZ#rzO2KEw3xh~-0<;|zcs0L>OcO#YYX{SN8m6`9pp+ zQG@q$I)T?aoe#AoR@%om_#z=c@ych!bj~lV13Qi-xg$i$hXEAB#l=t7QWENGbma4L zbBf*X*4oNYZUd_;1{Ln_ZeAwQv4z?n9$eoxJeI?lU9^!AB2Y~AwOSq67dT9ADZ)s@ zCRYS7W$Zpkdx$3T>7$I%3EI2ik~m!f7&$Djpt6kZqDWZJ-G{*_eXs*B8$1R4+I}Kf zqniwCI64r;>h2Lu{0c(#Atn)%E8&)=0S4BMhq9$`vu|Ct;^ur~gL`bD>J@l)P$q_A zO7b3HGOUG`vgH{}&&AgrFy%K^>? z>wf**coZ2vdSDcNYSm~dZ(vk6&m6bVKmVgrx-X<>{QzA!)2*L+HLTQz$e8UcB&Djq zl)-%s$ZtUN-R!4ZiG=L0#_P=BbUyH+YPmFl_ogkkQ$=s@T1v}rNnZ^eMaqJ|quc+6 z*ygceDOrldsL30w`H;rNu+IjlS+G~p&0SawXCA1+D zC%cZtjUkLNq%FadtHE?O(yQTP486A{1x<{krq#rpauNQaeyhM3*i0%tBpQHQo-u)x z{0{&KS`>}vf2_}b160XZO2$b)cyrHq7ZSeiSbRvaxnKUH{Q`-P(nL&^fcF2){vhN- zbX&WEjP7?b4A%0y6n_=m%l00uZ+}mCYO(!x?j$+O$*TqoD_Q5EoyDJ?w?^UIa491H zE}87(bR`X;@u#3Qy~9wWdWQIg1`cXrk$x9=ccR|RY1~%{fAJ@uq@J3e872x0v$hmv ze_KcL(wM|n0EOp;t{hKoohYyDmYO;!`7^Lx;0k=PWPGZpI>V5qYlzjSL_(%|mud50 z7#{p97s`U|Sn$WYF>-i{i4`kzlrV6a<}=72q2sAT7Zh{>P%*6B;Zl;~0xWymt10Mo zl5{bmR(wJefJpNGK=fSRP|mpCI-)Nf6?Pv==FcFmpSwF1%CTOucV{yqxSyx4Zws3O z8hr5Uyd%ezIO7?PnEO0T%af#KOiXD$e?V&OX-B|ZX-YsgSs%sv-6U+sLPuz{D4bq| zpd&|o5tNCmpT>(uIbRf?8c}d3IpOb3sn6>_dr*26R#ev<_~vi)wleW$PX|5)$_ z+_|=pi(0D(AB_sjQ;sQQSM&AWqzDO1@NHw;C9cPdXRKRI#@nUW)CgFxzQ1nyd!+h& zcjU!U=&u|>@}R(9D$%lu2TlV>@I2-n@fCr5PrZNVyKWR7hm zWjoy^p7v8m#$qN0K#8jT- zq`mSirDZDa1Jxm;Rg3rAPhC)LcI4@-RvKT+@9&KsR3b0_0zuM!Fg7u>oF>3bzOxZPU&$ab$Z9@ zY)f7pKh22I7ZykL{YsdjcqeN++=0a}elQM-4;Q)(`Ep3|VFHqnXOh14`!Bus& z9w%*EWK6AiAM{s$6~SEQS;A>ey$#`7)khZvamem{P?>k)5&7Sl&&NXKk}o!%vd;-! zpo2p-_h^b$DNBO>{h4JdGB=D>fvGIYN8v&XsfxU~VaefL?q} z3ekM?iOKkCzQHkBkhg=hD!@&(L}FcHKoa zbZ7)H1C|lHjwEb@tu=n^OvdHOo7o+W`0-y3KdP#bb~wM=Vr_gyoEq|#B?$&d$tals ziIs-&7isBpvS|CjC|7C&3I0SE?~`a%g~$PI%;au^cUp@ER3?mn-|vyu!$7MV6(uvt z+CcGuM(Ku2&G0tcRCo7#D$Dirfqef2qPOE5I)oCGzmR5G!o#Q~(k~)c=LpIfrhHQk zeAva6MilEifE7rgP1M7AyWmLOXK}i8?=z2;N=no)`IGm#y%aGE>-FN zyXCp0Sln{IsfOBuCdE*#@CQof%jzuU*jkR*Su3?5t}F(#g0BD0Zzu|1MDes8U7f9; z$JBg|mqTXt`muZ8=Z`3wx$uizZG_7>GI7tcfOHW`C2bKxNOR)XAwRkLOaHS4xwlH4 zDpU29#6wLXI;H?0Se`SRa&I_QmI{zo7p%uveBZ0KZKd9H6@U?YGArbfm)D*^5=&Rp z`k{35?Z5GbZnv>z@NmJ%+sx=1WanWg)8r}C_>EGR8mk(NR$pW<-l8OTU^_u3M@gwS z7}GGa1)`z5G|DZirw;FB@VhH7Dq*0qc=|9lLe{w2#`g+_nt>_%o<~9(VZe=zI*SSz4w43-_o>4E4`M@NPKTWZuQJs)?KXbWp1M zimd5F;?AP(LWcaI-^Sl{`~>tmxsQB9Y$Xi*{Zr#py_+I$vx7@NY`S?HFfS!hUiz$a z{>!&e1(16T!Om)m)&k1W#*d#GslD^4!TwiF2WjFBvi=Ms!ADT)ArEW6zfVuIXcXVk z>AHjPADW+mJzY`_Ieq(s?jbk4iD2Rb8*V3t6?I+E06(K8H!!xnDzO%GB;Z$N-{M|B zeT`jo%9)s%op*XZKDd6*)-^lWO{#RaIGFdBH+;XXjI(8RxpBc~azG1H^2v7c^bkFE zZCVPE+E*Q=FSe8Vm&6|^3ki{9~qafiMAf7i4APZg>b%&5>nT@pHH z%O*pOv(77?ZiT{W zBibx}Q12tRc7Py1NcZTp`Q4ey%T_nj@1WKg5Fz_Rjl4wlJQj)rtp8yL3r!Shy zvZvnmh!tH4T6Js-?vI0<-rzzl{mgT*S0d_7^AU_8gBg^03o-J=p(1o6kww2hx|!%T z-jqp}m^G*W?$!R#M%Ef?&2jYxmx+lXWZszpI4d$pUN`(S)|*c^CgdwY>Fa>> zgGBJhwe8y#Xd*q0=@SLEgPF>+Qe4?%E*v{a`||luZ~&dqMBrRfJ{SDMaJ!s_;cSJp zSqZHXIdc@@XteNySUZs^9SG7xK`8=NBNM)fRVOjw)D^)w%L2OPkTQ$Tel-J)GD3=YXy+F4in(ILy*A3m@3o73uv?JC}Q>f zrY&8SWmesiba0|3X-jmlMT3 z*ST|_U@O=i*sM_*48G)dgXqlwoFp5G6qSM3&%_f_*n!PiT>?cNI)fAUkA{qWnqdMi+aNK_yVQ&lx4UZknAc9FIzVk% zo6JmFH~c{_tK!gt4+o2>)zoP{sR}!!vfRjI=13!z5}ijMFQ4a4?QIg-BE4T6!#%?d&L;`j5=a`4is>U;%@Rd~ zXC~H7eGQhhYWhMPWf9znDbYIgwud(6$W3e>$W4$~d%qoJ z+JE`1g$qJ%>b|z*xCKenmpV$0pM=Gl-Y*LT8K+P)2X#;XYEFF4mRbc~jj?DM@(1e`nL=F4Syv)TKIePQUz)bZ?Bi3@G@HO$Aps1DvDGkYF50O$_welu^cL7;vPiMGho74$;4fDqKbE{U zd1h{;LfM#Fb|Z&uH~Rm_J)R~Vy4b;1?tW_A)Iz#S_=F|~pISaVkCnQ0&u%Yz%o#|! zS-TSg87LUfFSs{tTuM3$!06ZzH&MFtG)X-l7>3)V?Txuj2HyG*5u;EY2_5vU0ujA? zHXh5G%6e3y7v?AjhyX79pnRBVr}RmPmtrxoB7lkxEzChX^(vKd+sLh?SBic=Q)5nA zdz7Mw3_iA>;T^_Kl~?1|5t%GZ;ki_+i>Q~Q1EVdKZ)$Sh3LM@ea&D~{2HOG++7*wF zAC6jW4>fa~!Vp5+$Z{<)Qxb|{unMgCv2)@%3j=7)Zc%U<^i|SAF88s!A^+Xs!OASYT%7;Jx?olg_6NFP1475N z#0s<@E~FI}#LNQ{?B1;t+N$2k*`K$Hxb%#8tRQi*Z#No0J}Pl;HWb){l7{A8(pu#@ zfE-OTvEreoz1+p`9sUI%Y{e5L-oTP_^NkgpYhZjp&ykinnW;(fu1;ttpSsgYM8ABX4dHe_HxU+%M(D=~) zYM}XUJ5guZ;=_ZcOsC`_{CiU$zN3$+x&5C`vX-V3`8&RjlBs^rf00MNYZW+jCd~7N z%{jJuUUwY(M`8$`B>K&_48!Li682ZaRknMgQ3~dnlp8C?__!P2z@=Auv;T^$yrsNy zCARmaA@^Yo2sS%2$`031-+h9KMZsIHfB>s@}>Y(z988e!`%4=EDoAQ0kbk>+lCoK60Mx9P!~I zlq~wf7kcm_NFImt3ZYlE(b3O1K^QWiFb$V^a2Jlwvm(!XYx<`i@ZMS3UwFt{;x+-v zhx{m=m;4dgvkKp5{*lfSN3o^keSpp9{hlXj%=}e_7Ou{Yiw(J@NXuh*;pL6@$HsfB zh?v+r^cp@jQ4EspC#RqpwPY(}_SS$wZ{S959`C25777&sgtNh%XTCo9VHJC-G z;;wi9{-iv+ETiY;K9qvlEc04f;ZnUP>cUL_T*ms``EtGoP^B#Q>n2dSrbAg8a>*Lg zd0EJ^=tdW~7fbcLFsqryFEcy*-8!?;n%;F+8i{eZyCDaiYxghr z$8k>L|2&-!lhvuVdk!r-kpSFl`5F5d4DJr%M4-qOy3gdmQbqF1=aBtRM7)c_Ae?$b8 zQg4c8*KQ{XJmL)1c7#0Yn0#PTMEs4-IHPjkn0!=;JdhMXqzMLeh`yOylXROP- zl#z3+fwM9l3%VN(6R77ua*uI9%hO7l7{+Hcbr(peh;afUK?B4EC09J{-u{mv)+u#? zdKVBCPt`eU@IzL)OXA`Ebu`Xp?u0m%h&X41}FNfnJ*g1!1wcbbpo%F4x!-#R9ft!8{5`Ho}04?FI#Kg zL|k`tF1t_`ywdy8(wnTut>HND(qNnq%Sq=AvvZbXnLx|mJhi!*&lwG2g|edBdVgLy zjvVTKHAx(+&P;P#2Xobo7_RttUi)Nllc}}hX>|N?-u5g7VJ-NNdwYcaOG?NK=5)}` zMtOL;o|i0mSKm(UI_7BL_^6HnVOTkuPI6y@ZLR(H?c1cr-_ouSLp{5!bx^DiKd*Yb z{K78Ci&Twup zTKm)ioN|wcYy%Qnwb)IzbH>W!;Ah5Zdm_jRY`+VRJ2 zhkspZ9hbK3iQD91A$d!0*-1i#%x81|s+SPRmD}d~<1p6!A13(!vABP2kNgqEG z?AMgl^P+iRoIY(9@_I?n1829lGvAsRnHwS~|5vD2+Zi53j<5N4wNn0{q>>jF9*bI) zL$kMXM-awNOElF>{?Jr^tOz1glbwaD-M0OKOlTeW3C!1ZyxRbB>8JDof(O&R1bh%3x#>y2~<>OXO#IIedH0Q`(&&?eo-c~ z>*Ah#3~09unym~UC-UFqqI>{dmUD$Y4@evG#ORLI*{ZM)Jl=e1it!XzY($S3V zLG!Y6fCjE>x6r@5FG1n|8ompSZaJ>9)q6jqU;XxCQk9zV(?C9+i*>w z21+KYt1gXX&0`x3E)hS7I5}snbBzox9C@Xzcr|{B8Hw;SY1$}&BoYKXH^hpjW-RgJ z-Fb}tannKCv>y~^`r|(1Q9;+sZlYf3XPSX|^gR01UFtu$B*R;$sPZdIZShRr>|b@J z;#G{EdoY+O;REEjQ}X7_YzWLO+Ey3>a_KDe1CjSe| z6arqcEZ)CX!8r(si`dqbF$uu&pnf^Np{1f*TdJ`r2;@SaZ z#hb4xlaCA@Pwqj#LlUEe5L{I$k(Zj$d3(~)u(F%&xb8={N9hKxlZIO1ABsM{Mt|)2 zJ^t9Id;?%4PfR4&Ph9B9cFK~@tG3wlFW-0fXZS_L4U*EiAA%+`h%q2^6BCC;t0iO4V=s4Qug{M|iDV@s zC7|ef-dxiR7T&Mpre!%hiUhHM%3Qxi$Lzw6&(Tvlx9QA_7LhYq<(o~=Y>3ka-zrQa zhGpfFK@)#)rtfz61w35^sN1=IFw&Oc!Nah+8@qhJ0UEGr;JplaxOGI82OVqZHsqfX ze1}r{jy;G?&}Da}a7>SCDsFDuzuseeCKof|Dz2BPsP8? zY;a)Tkr2P~0^2BeO?wnzF_Ul-ekY=-w26VnU%U3f19Z-pj&2 z4J_a|o4Dci+MO)mPQIM>kdPG1xydiR9@#8m zh27D7GF{p|a{8({Q-Pr-;#jV{2zHR>lGoFtIfIpoMo?exuQyX_A;;l0AP4!)JEM$EwMInZkj+8*IHP4vKRd zKx_l-i*>A*C@{u%ct`y~s6MWAfO{@FPIX&sg8H{GMDc{4M3%$@c8&RAlw0-R<4DO3 trJqdc$mBpWeznn?E0M$F`|3v=`3%T2A17h;rxP7$%JLd=6(2u;`(N3pt&so# diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/guzzle.css_t b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/guzzle.css_t deleted file mode 100644 index ffd2ec294c..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/guzzle.css_t +++ /dev/null @@ -1,729 +0,0 @@ -/* Sphinx themes --------------------------------------------------- */ - -.container { - max-width: 960px; -} - -body { - position: relative; - padding: 40px 0 0 0; - font-family: Helvetica, arial, freesans, clean, sans-serif; -} - -div.clearer { - clear: both; -} - -div.document { - margin: 0 auto; - font-size: 15px; - line-height: 1.5em; -} - -.body { - width: 713px; - float: right; -} - -div.related { - display: none; -} - -a.headerlink { - visibility: hidden; - color: #ddd; - padding: 0 4px; - text-decoration: none; -} - -h1:hover > a.headerlink, -h2:hover > a.headerlink, -h3:hover > a.headerlink, -h4:hover > a.headerlink, -h5:hover > a.headerlink, -h6:hover > a.headerlink, -dt:hover > a.headerlink { - visibility: visible; -} - -a.headerlink:hover { - color: #444; - background: #eaeaea; - text-decoration: none; -} - -div.body h1, -div.body h2, -div.body h3, -div.body h4, -div.body h5, -div.body h6 { - font-family: Helvetica, arial, freesans, clean, sans-serif; - font-weight: normal; - margin: 30px 0px 10px 0px; - padding: 0; -} - -div.body h1 { margin-top: 0; padding-top: 0; font-size: 240%; font-weight: bold; } -div.body h2 { font-size: 180%; border-bottom: 1px solid #ccc; padding-bottom: 6px; font-weight: bold; } -div.body h3 { font-size: 150%; font-weight: bold; } -div.body h4 { font-size: 130%; } -div.body h5 { font-size: 110%; } -div.body h6 { font-size: 100%; } - -p { - margin: 1em 0; -} - -div.body p, div.body dd, div.body li { - line-height: 1.5em; -} - -tt { - font-family: Consolas, Monaco, Menlo, "Courier New", monospace; - font-size: 14px; - color: #333; -} - -code, pre { - font-family: Consolas, Monaco, Menlo, "Courier New", monospace; - font-size: 14px; - color: #333333; - line-height: 23px; - margin: 20px 0; -} - -div.highlight { - background-color: white; -} - -.navbar .brand { - font-weight: bold; -} - -[class^="icon-"], -[class*=" icon-"] { - background-image: url("../_static/glyphicons-halflings.png"); -} - -.icon-white, -.nav-pills > .active > a > [class^="icon-"], -.nav-pills > .active > a > [class*=" icon-"], -.nav-list > .active > a > [class^="icon-"], -.nav-list > .active > a > [class*=" icon-"], -.navbar-inverse .nav > .active > a > [class^="icon-"], -.navbar-inverse .nav > .active > a > [class*=" icon-"], -.dropdown-menu > li > a:hover > [class^="icon-"], -.dropdown-menu > li > a:focus > [class^="icon-"], -.dropdown-menu > li > a:hover > [class*=" icon-"], -.dropdown-menu > li > a:focus > [class*=" icon-"], -.dropdown-menu > .active > a > [class^="icon-"], -.dropdown-menu > .active > a > [class*=" icon-"], -.dropdown-submenu:hover > a > [class^="icon-"], -.dropdown-submenu:focus > a > [class^="icon-"], -.dropdown-submenu:hover > a > [class*=" icon-"], -.dropdown-submenu:focus > a > [class*=" icon-"] { - background-image: url("../_static/glyphicons-halflings-white.png"); -} - -a.internal em { - font-style: normal; -} - -/* Sphinx sidebar --------------------------------------------------- */ - -div.sphinxsidebar { - font-size: 14px; - line-height: 1.5; - float: left; - width: 200px; - padding: 0; - word-wrap: break-word; -} - -div.sphinxsidebarwrapper p.logo { - padding: 0 0 8px 0; - margin: 0; -} - -div.sphinxsidebar h3, -div.sphinxsidebar h4 { - font-family: Helvetica, arial, freesans, clean, sans-serif; - color: #444; - font-size: 24px; - font-weight: normal; - margin: 0 0 5px 0; - padding: 0; -} - -div.sphinxsidebar h4 { - font-size: 20px; -} - -div.sphinxsidebar h3 a { - color: #444; -} - -div.sphinxsidebar p.logo a, -div.sphinxsidebar h3 a, -div.sphinxsidebar p.logo a:hover, -div.sphinxsidebar h3 a:hover { - border: none; -} - -div.sphinxsidebarwrapper h3.logo { - margin: 0; -} - -div.sphinxsidebar p { - color: #555; - margin: 10px 0; -} - -div.sphinxsidebar ul { - margin: 0 0 12px 0; - list-style-type: none; - color: #000; -} - -div.sphinxsidebar ul ul, -div.sphinxsidebar ul.want-points { - margin-left: 20px; - list-style: square; -} - -div.sphinxsidebar ul ul { - margin-top: 0; - margin-bottom: 0; -} - -div.sphinxsidebar input { - border: 1px solid #ccc; - font-family: Helvetica, arial, freesans, clean, sans-serif; - font-size: 1em; -} - -.margin-top-1em { - margin-top: 1em; -} - -/* Two-pane table list --------------------------------------------------- */ - -table.two-column.table-bordered caption+thead tr:first-child th:first-child, -table.two-column.table-bordered caption+tbody tr:first-child td:first-child, -table.two-column.table-bordered colgroup+thead tr:first-child th:first-child, -table.two-column.table-bordered colgroup+tbody tr:first-child td:first-child, -table.two-column tbody td { - border: 0 0 1px 0 solid #eee; - border-left: none; - padding: 8px 4px; - font-size: 16px; -} - -table.two-column { - width: 100%; - border: 0px none; - box-shadow: none; -} - -/* GitHub stars in the menu --------------------------------------------------- */ - -#github-stars { - float: right; - margin: 10px 0 0 0; - padding: 0; -} - -#github-stars iframe { - margin: 0; - padding: 0; -} - -/* Disqus comments styles --------------------------------------------------- */ - -.comment-container { - margin: 24px auto; -} - -/* Next and previous links --------------------------------------------------- */ - -.top-links .breadcrumb { - margin: 5px 0 0 0; -} - -.top-links .breadcrumb, .footer-links .breadcrumb { - background-color: #fff; -} - -.top-links a { - opacity: 0.7; -} - -.footer-links { - border-top: 1px solid #ccc; - padding-top: 12px; - margin-top: 30px; -} - -.rel-spacer { - height: 40px; -} - -/* Footer styling --------------------------------------------------- */ - -div.footer { - margin: 20px auto 30px auto; - font-size: 14px; - color: #888; - text-align: right; -} - -div.footer a { - color: #888; -} - -/* -- relbar ---------------------------------------------------------------- */ - -div.related { - width: 100%; - font-size: 90%; -} - -div.related h3 { - display: none; -} - -div.related ul { - margin: 0; - padding: 0 0 0 10px; - list-style: none; -} - -div.related li { - display: inline; -} - -div.related li.right { - float: right; - margin-right: 5px; -} - -/* -- search page ----------------------------------------------------------- */ - -ul.search { - margin: 10px 0 0 20px; - padding: 0; -} - -ul.search li { - padding: 5px 0 5px 20px; - background: url(file.png) no-repeat 0 7px; -} - -ul.search li a { - font-weight: bold; -} - -ul.search li div.context { - color: #888; - margin: 2px 0 0 30px; - text-align: left; -} - -ul.keywordmatches li.goodmatch a { - font-weight: bold; -} - -/* -- general index --------------------------------------------------------- */ - -table.indextable { - width: 100%; -} - -table.indextable td { - text-align: left; - vertical-align: top; -} - -table.indextable dl, table.indextable dd { - margin-top: 0; - margin-bottom: 0; -} - -table.indextable tr.pcap { - height: 10px; -} - -table.indextable tr.cap { - margin-top: 10px; - background-color: #f2f2f2; -} - -img.toggler { - margin-right: 3px; - margin-top: 3px; - cursor: pointer; -} - -div.modindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -div.genindex-jumpbox { - border-top: 1px solid #ddd; - border-bottom: 1px solid #ddd; - margin: 1em 0 1em 0; - padding: 0.4em; -} - -/* -- general body styles --------------------------------------------------- */ - -div.body p.caption { - text-align: inherit; -} - -.field-list ul { - padding-left: 1em; -} - -.first { - margin-top: 0 !important; -} - -p.rubric { - margin-top: 30px; - font-weight: bold; -} - -img.align-left, .figure.align-left, object.align-left { - clear: left; - float: left; - margin-right: 1em; -} - -img.align-right, .figure.align-right, object.align-right { - clear: right; - float: right; - margin-left: 1em; -} - -img.align-center, .figure.align-center, object.align-center { - display: block; - margin-left: auto; - margin-right: auto; -} - -.align-left { - text-align: left; -} - -.align-center { - text-align: center; -} - -.align-right { - text-align: right; -} - -/* -- topics ---------------------------------------------------------------- */ - -div.topic { - border: 1px solid #ccc; - padding: 7px 7px 0 7px; - margin: 10px 0 10px 0; - background-color: #f8f8f8; -} - -p.topic-title { - font-size: 1.1em; - font-weight: bold; - margin-top: 10px; -} - -/* -- admonitions ----------------------------------------------------------- */ - -div.admonition { - background: #fafafa; - margin: 20px 0; - padding: 10px 30px; - border-top: 1px solid #ccc; - border-bottom: 1px solid #ccc; -} - -div.admonition dt { - font-weight: bold; -} - -div.admonition dl { - margin-bottom: 0; -} - -p.admonition-title { - margin: 0px 10px 5px 0px; - font-weight: bold; - display: inline; -} - -p.admonition-title:after { - content: ":"; -} - -div.body p.centered { - text-align: center; - margin-top: 25px; -} - -div.seealso { - background-color: #ffc; - border: 1px solid #ff6; -} - -div.admonition tt.xref, div.admonition a tt { - border-bottom: 1px solid #fafafa; -} - -dd div.admonition { - margin-left: -60px; - padding-left: 60px; -} - -div.admonition p.admonition-title { - font-weight: normal; - font-size: 22px; - margin: 0 0 10px 0; - padding: 0; - line-height: 1; -} - -div.admonition p.last { - margin-bottom: 0; -} - -div.note { - background-color: #f8f8f8; - border: 1px solid #ccc; - border-radius: 3px; -} - -/* -- other body styles ----------------------------------------------------- */ - -ol.arabic { - list-style: decimal; -} - -ol.loweralpha { - list-style: lower-alpha; -} - -ol.upperalpha { - list-style: upper-alpha; -} - -ol.lowerroman { - list-style: lower-roman; -} - -ol.upperroman { - list-style: upper-roman; -} - -.highlighted { - background-color: #fbe54e; -} - -dl.glossary dt { - font-weight: bold; - font-size: 1.1em; -} - -.field-list ul { - margin: 0; - padding-left: 1em; -} - -.field-list p { - margin: 0; -} - -.refcount { - color: #060; -} - -.optional { - font-size: 1.3em; -} - -.versionmodified { - font-style: italic; -} - -.system-message { - background-color: #fda; - padding: 5px; - border: 3px solid red; -} - -.footnote:target { - background-color: #ffa; -} - -.line-block { - display: block; - margin-top: 1em; - margin-bottom: 1em; -} - -.line-block .line-block { - margin-top: 0; - margin-bottom: 0; - margin-left: 1.5em; -} - -.guilabel, .menuselection { - font-family: sans-serif; -} - -.accelerator { - text-decoration: underline; -} - -.classifier { - font-style: oblique; -} - -abbr, acronym { - border-bottom: dotted 1px; - cursor: help; -} - -dt:target, .highlight { - background: #FAF3E8; -} - -/* -- code displays --------------------------------------------------------- */ - -td.linenos pre { - padding: 5px 0px; - border: 0; - background-color: transparent; - color: #aaa; -} - -table.highlighttable { - margin-left: 0.5em; -} - -table.highlighttable td { - padding: 0 0.5em 0 0.5em; -} - -tt.descname { - background-color: transparent; - font-weight: bold; - padding-right: 0.08em; -} - -tt.descclassname { - background-color: transparent; -} - -tt.descname, tt.descclassname { - font-size: 0.95em; -} - -tt.xref, a tt { - background-color: transparent; - font-weight: bold; -} - -h1 tt, h2 tt, h3 tt, h4 tt, h5 tt, h6 tt { - background-color: transparent; -} - -.viewcode-link { - float: right; -} - -.viewcode-back { - float: right; - font-family: sans-serif; -} - -div.viewcode-block:target { - margin: -1px -10px; - padding: 0 10px; -} - -/* -- math display ---------------------------------------------------------- */ - -img.math { - vertical-align: middle; -} - -div.body div.math p { - text-align: center; -} - -span.eqno { - float: right; -} - -/* -- Theme specific classes - */ - -.overflow-height-500px { - overflow: auto; - height: 500px; -} - -.overflow-height-250px { - overflow: auto; - height: 250px; -} - -/* Small screen styles --------------------------------------------------- */ - -@media screen and (max-device-width: 480px) { - - body { - margin: 0; - padding-top: 0; - } - - .body { - width: 100%; - float: none; - min-height: 0; - padding: 10px; - margin-top: 20px; - } - - .navbar-fixed-top { - position: static; - } - - div.sphinxsidebar { - float: none; - width: 100%; - margin: 0 0 24px 0; - padding: 10px 20px; - } - - div.sphinxsidebar p.logo { - display: none; - } - - #github-stars { - display: none; - } -} diff --git a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/jquery-1.9.1.min.js b/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/jquery-1.9.1.min.js deleted file mode 100644 index c6a59aebb3..0000000000 --- a/docs/guzzle_sphinx_theme/guzzle_sphinx_theme/static/jquery-1.9.1.min.js +++ /dev/null @@ -1,5 +0,0 @@ -/*! jQuery v1.9.1 | (c) 2005, 2012 jQuery Foundation, Inc. | jquery.org/license - //@ sourceMappingURL=jquery.min.map - */(function(e,t){var n,r,i=typeof t,o=e.document,a=e.location,s=e.jQuery,u=e.$,l={},c=[],p="1.9.1",f=c.concat,d=c.push,h=c.slice,g=c.indexOf,m=l.toString,y=l.hasOwnProperty,v=p.trim,b=function(e,t){return new b.fn.init(e,t,r)},x=/[+-]?(?:\d*\.|)\d+(?:[eE][+-]?\d+|)/.source,w=/\S+/g,T=/^[\s\uFEFF\xA0]+|[\s\uFEFF\xA0]+$/g,N=/^(?:(<[\w\W]+>)[^>]*|#([\w-]*))$/,C=/^<(\w+)\s*\/?>(?:<\/\1>|)$/,k=/^[\],:{}\s]*$/,E=/(?:^|:|,)(?:\s*\[)+/g,S=/\\(?:["\\\/bfnrt]|u[\da-fA-F]{4})/g,A=/"[^"\\\r\n]*"|true|false|null|-?(?:\d+\.|)\d+(?:[eE][+-]?\d+|)/g,j=/^-ms-/,D=/-([\da-z])/gi,L=function(e,t){return t.toUpperCase()},H=function(e){(o.addEventListener||"load"===e.type||"complete"===o.readyState)&&(q(),b.ready())},q=function(){o.addEventListener?(o.removeEventListener("DOMContentLoaded",H,!1),e.removeEventListener("load",H,!1)):(o.detachEvent("onreadystatechange",H),e.detachEvent("onload",H))};b.fn=b.prototype={jquery:p,constructor:b,init:function(e,n,r){var i,a;if(!e)return this;if("string"==typeof e){if(i="<"===e.charAt(0)&&">"===e.charAt(e.length-1)&&e.length>=3?[null,e,null]:N.exec(e),!i||!i[1]&&n)return!n||n.jquery?(n||r).find(e):this.constructor(n).find(e);if(i[1]){if(n=n instanceof b?n[0]:n,b.merge(this,b.parseHTML(i[1],n&&n.nodeType?n.ownerDocument||n:o,!0)),C.test(i[1])&&b.isPlainObject(n))for(i in n)b.isFunction(this[i])?this[i](n[i]):this.attr(i,n[i]);return this}if(a=o.getElementById(i[2]),a&&a.parentNode){if(a.id!==i[2])return r.find(e);this.length=1,this[0]=a}return this.context=o,this.selector=e,this}return e.nodeType?(this.context=this[0]=e,this.length=1,this):b.isFunction(e)?r.ready(e):(e.selector!==t&&(this.selector=e.selector,this.context=e.context),b.makeArray(e,this))},selector:"",length:0,size:function(){return this.length},toArray:function(){return h.call(this)},get:function(e){return null==e?this.toArray():0>e?this[this.length+e]:this[e]},pushStack:function(e){var t=b.merge(this.constructor(),e);return t.prevObject=this,t.context=this.context,t},each:function(e,t){return b.each(this,e,t)},ready:function(e){return b.ready.promise().done(e),this},slice:function(){return this.pushStack(h.apply(this,arguments))},first:function(){return this.eq(0)},last:function(){return this.eq(-1)},eq:function(e){var t=this.length,n=+e+(0>e?t:0);return this.pushStack(n>=0&&t>n?[this[n]]:[])},map:function(e){return this.pushStack(b.map(this,function(t,n){return e.call(t,n,t)}))},end:function(){return this.prevObject||this.constructor(null)},push:d,sort:[].sort,splice:[].splice},b.fn.init.prototype=b.fn,b.extend=b.fn.extend=function(){var e,n,r,i,o,a,s=arguments[0]||{},u=1,l=arguments.length,c=!1;for("boolean"==typeof s&&(c=s,s=arguments[1]||{},u=2),"object"==typeof s||b.isFunction(s)||(s={}),l===u&&(s=this,--u);l>u;u++)if(null!=(o=arguments[u]))for(i in o)e=s[i],r=o[i],s!==r&&(c&&r&&(b.isPlainObject(r)||(n=b.isArray(r)))?(n?(n=!1,a=e&&b.isArray(e)?e:[]):a=e&&b.isPlainObject(e)?e:{},s[i]=b.extend(c,a,r)):r!==t&&(s[i]=r));return s},b.extend({noConflict:function(t){return e.$===b&&(e.$=u),t&&e.jQuery===b&&(e.jQuery=s),b},isReady:!1,readyWait:1,holdReady:function(e){e?b.readyWait++:b.ready(!0)},ready:function(e){if(e===!0?!--b.readyWait:!b.isReady){if(!o.body)return setTimeout(b.ready);b.isReady=!0,e!==!0&&--b.readyWait>0||(n.resolveWith(o,[b]),b.fn.trigger&&b(o).trigger("ready").off("ready"))}},isFunction:function(e){return"function"===b.type(e)},isArray:Array.isArray||function(e){return"array"===b.type(e)},isWindow:function(e){return null!=e&&e==e.window},isNumeric:function(e){return!isNaN(parseFloat(e))&&isFinite(e)},type:function(e){return null==e?e+"":"object"==typeof e||"function"==typeof e?l[m.call(e)]||"object":typeof e},isPlainObject:function(e){if(!e||"object"!==b.type(e)||e.nodeType||b.isWindow(e))return!1;try{if(e.constructor&&!y.call(e,"constructor")&&!y.call(e.constructor.prototype,"isPrototypeOf"))return!1}catch(n){return!1}var r;for(r in e);return r===t||y.call(e,r)},isEmptyObject:function(e){var t;for(t in e)return!1;return!0},error:function(e){throw Error(e)},parseHTML:function(e,t,n){if(!e||"string"!=typeof e)return null;"boolean"==typeof t&&(n=t,t=!1),t=t||o;var r=C.exec(e),i=!n&&[];return r?[t.createElement(r[1])]:(r=b.buildFragment([e],t,i),i&&b(i).remove(),b.merge([],r.childNodes))},parseJSON:function(n){return e.JSON&&e.JSON.parse?e.JSON.parse(n):null===n?n:"string"==typeof n&&(n=b.trim(n),n&&k.test(n.replace(S,"@").replace(A,"]").replace(E,"")))?Function("return "+n)():(b.error("Invalid JSON: "+n),t)},parseXML:function(n){var r,i;if(!n||"string"!=typeof n)return null;try{e.DOMParser?(i=new DOMParser,r=i.parseFromString(n,"text/xml")):(r=new ActiveXObject("Microsoft.XMLDOM"),r.async="false",r.loadXML(n))}catch(o){r=t}return r&&r.documentElement&&!r.getElementsByTagName("parsererror").length||b.error("Invalid XML: "+n),r},noop:function(){},globalEval:function(t){t&&b.trim(t)&&(e.execScript||function(t){e.eval.call(e,t)})(t)},camelCase:function(e){return e.replace(j,"ms-").replace(D,L)},nodeName:function(e,t){return e.nodeName&&e.nodeName.toLowerCase()===t.toLowerCase()},each:function(e,t,n){var r,i=0,o=e.length,a=M(e);if(n){if(a){for(;o>i;i++)if(r=t.apply(e[i],n),r===!1)break}else for(i in e)if(r=t.apply(e[i],n),r===!1)break}else if(a){for(;o>i;i++)if(r=t.call(e[i],i,e[i]),r===!1)break}else for(i in e)if(r=t.call(e[i],i,e[i]),r===!1)break;return e},trim:v&&!v.call("\ufeff\u00a0")?function(e){return null==e?"":v.call(e)}:function(e){return null==e?"":(e+"").replace(T,"")},makeArray:function(e,t){var n=t||[];return null!=e&&(M(Object(e))?b.merge(n,"string"==typeof e?[e]:e):d.call(n,e)),n},inArray:function(e,t,n){var r;if(t){if(g)return g.call(t,e,n);for(r=t.length,n=n?0>n?Math.max(0,r+n):n:0;r>n;n++)if(n in t&&t[n]===e)return n}return-1},merge:function(e,n){var r=n.length,i=e.length,o=0;if("number"==typeof r)for(;r>o;o++)e[i++]=n[o];else while(n[o]!==t)e[i++]=n[o++];return e.length=i,e},grep:function(e,t,n){var r,i=[],o=0,a=e.length;for(n=!!n;a>o;o++)r=!!t(e[o],o),n!==r&&i.push(e[o]);return i},map:function(e,t,n){var r,i=0,o=e.length,a=M(e),s=[];if(a)for(;o>i;i++)r=t(e[i],i,n),null!=r&&(s[s.length]=r);else for(i in e)r=t(e[i],i,n),null!=r&&(s[s.length]=r);return f.apply([],s)},guid:1,proxy:function(e,n){var r,i,o;return"string"==typeof n&&(o=e[n],n=e,e=o),b.isFunction(e)?(r=h.call(arguments,2),i=function(){return e.apply(n||this,r.concat(h.call(arguments)))},i.guid=e.guid=e.guid||b.guid++,i):t},access:function(e,n,r,i,o,a,s){var u=0,l=e.length,c=null==r;if("object"===b.type(r)){o=!0;for(u in r)b.access(e,n,u,r[u],!0,a,s)}else if(i!==t&&(o=!0,b.isFunction(i)||(s=!0),c&&(s?(n.call(e,i),n=null):(c=n,n=function(e,t,n){return c.call(b(e),n)})),n))for(;l>u;u++)n(e[u],r,s?i:i.call(e[u],u,n(e[u],r)));return o?e:c?n.call(e):l?n(e[0],r):a},now:function(){return(new Date).getTime()}}),b.ready.promise=function(t){if(!n)if(n=b.Deferred(),"complete"===o.readyState)setTimeout(b.ready);else if(o.addEventListener)o.addEventListener("DOMContentLoaded",H,!1),e.addEventListener("load",H,!1);else{o.attachEvent("onreadystatechange",H),e.attachEvent("onload",H);var r=!1;try{r=null==e.frameElement&&o.documentElement}catch(i){}r&&r.doScroll&&function a(){if(!b.isReady){try{r.doScroll("left")}catch(e){return setTimeout(a,50)}q(),b.ready()}}()}return n.promise(t)},b.each("Boolean Number String Function Array Date RegExp Object Error".split(" "),function(e,t){l["[object "+t+"]"]=t.toLowerCase()});function M(e){var t=e.length,n=b.type(e);return b.isWindow(e)?!1:1===e.nodeType&&t?!0:"array"===n||"function"!==n&&(0===t||"number"==typeof t&&t>0&&t-1 in e)}r=b(o);var _={};function F(e){var t=_[e]={};return b.each(e.match(w)||[],function(e,n){t[n]=!0}),t}b.Callbacks=function(e){e="string"==typeof e?_[e]||F(e):b.extend({},e);var n,r,i,o,a,s,u=[],l=!e.once&&[],c=function(t){for(r=e.memory&&t,i=!0,a=s||0,s=0,o=u.length,n=!0;u&&o>a;a++)if(u[a].apply(t[0],t[1])===!1&&e.stopOnFalse){r=!1;break}n=!1,u&&(l?l.length&&c(l.shift()):r?u=[]:p.disable())},p={add:function(){if(u){var t=u.length;(function i(t){b.each(t,function(t,n){var r=b.type(n);"function"===r?e.unique&&p.has(n)||u.push(n):n&&n.length&&"string"!==r&&i(n)})})(arguments),n?o=u.length:r&&(s=t,c(r))}return this},remove:function(){return u&&b.each(arguments,function(e,t){var r;while((r=b.inArray(t,u,r))>-1)u.splice(r,1),n&&(o>=r&&o--,a>=r&&a--)}),this},has:function(e){return e?b.inArray(e,u)>-1:!(!u||!u.length)},empty:function(){return u=[],this},disable:function(){return u=l=r=t,this},disabled:function(){return!u},lock:function(){return l=t,r||p.disable(),this},locked:function(){return!l},fireWith:function(e,t){return t=t||[],t=[e,t.slice?t.slice():t],!u||i&&!l||(n?l.push(t):c(t)),this},fire:function(){return p.fireWith(this,arguments),this},fired:function(){return!!i}};return p},b.extend({Deferred:function(e){var t=[["resolve","done",b.Callbacks("once memory"),"resolved"],["reject","fail",b.Callbacks("once memory"),"rejected"],["notify","progress",b.Callbacks("memory")]],n="pending",r={state:function(){return n},always:function(){return i.done(arguments).fail(arguments),this},then:function(){var e=arguments;return b.Deferred(function(n){b.each(t,function(t,o){var a=o[0],s=b.isFunction(e[t])&&e[t];i[o[1]](function(){var e=s&&s.apply(this,arguments);e&&b.isFunction(e.promise)?e.promise().done(n.resolve).fail(n.reject).progress(n.notify):n[a+"With"](this===r?n.promise():this,s?[e]:arguments)})}),e=null}).promise()},promise:function(e){return null!=e?b.extend(e,r):r}},i={};return r.pipe=r.then,b.each(t,function(e,o){var a=o[2],s=o[3];r[o[1]]=a.add,s&&a.add(function(){n=s},t[1^e][2].disable,t[2][2].lock),i[o[0]]=function(){return i[o[0]+"With"](this===i?r:this,arguments),this},i[o[0]+"With"]=a.fireWith}),r.promise(i),e&&e.call(i,i),i},when:function(e){var t=0,n=h.call(arguments),r=n.length,i=1!==r||e&&b.isFunction(e.promise)?r:0,o=1===i?e:b.Deferred(),a=function(e,t,n){return function(r){t[e]=this,n[e]=arguments.length>1?h.call(arguments):r,n===s?o.notifyWith(t,n):--i||o.resolveWith(t,n)}},s,u,l;if(r>1)for(s=Array(r),u=Array(r),l=Array(r);r>t;t++)n[t]&&b.isFunction(n[t].promise)?n[t].promise().done(a(t,l,n)).fail(o.reject).progress(a(t,u,s)):--i;return i||o.resolveWith(l,n),o.promise()}}),b.support=function(){var t,n,r,a,s,u,l,c,p,f,d=o.createElement("div");if(d.setAttribute("className","t"),d.innerHTML="
    a",n=d.getElementsByTagName("*"),r=d.getElementsByTagName("a")[0],!n||!r||!n.length)return{};s=o.createElement("select"),l=s.appendChild(o.createElement("option")),a=d.getElementsByTagName("input")[0],r.style.cssText="top:1px;float:left;opacity:.5",t={getSetAttribute:"t"!==d.className,leadingWhitespace:3===d.firstChild.nodeType,tbody:!d.getElementsByTagName("tbody").length,htmlSerialize:!!d.getElementsByTagName("link").length,style:/top/.test(r.getAttribute("style")),hrefNormalized:"/a"===r.getAttribute("href"),opacity:/^0.5/.test(r.style.opacity),cssFloat:!!r.style.cssFloat,checkOn:!!a.value,optSelected:l.selected,enctype:!!o.createElement("form").enctype,html5Clone:"<:nav>"!==o.createElement("nav").cloneNode(!0).outerHTML,boxModel:"CSS1Compat"===o.compatMode,deleteExpando:!0,noCloneEvent:!0,inlineBlockNeedsLayout:!1,shrinkWrapBlocks:!1,reliableMarginRight:!0,boxSizingReliable:!0,pixelPosition:!1},a.checked=!0,t.noCloneChecked=a.cloneNode(!0).checked,s.disabled=!0,t.optDisabled=!l.disabled;try{delete d.test}catch(h){t.deleteExpando=!1}a=o.createElement("input"),a.setAttribute("value",""),t.input=""===a.getAttribute("value"),a.value="t",a.setAttribute("type","radio"),t.radioValue="t"===a.value,a.setAttribute("checked","t"),a.setAttribute("name","t"),u=o.createDocumentFragment(),u.appendChild(a),t.appendChecked=a.checked,t.checkClone=u.cloneNode(!0).cloneNode(!0).lastChild.checked,d.attachEvent&&(d.attachEvent("onclick",function(){t.noCloneEvent=!1}),d.cloneNode(!0).click());for(f in{submit:!0,change:!0,focusin:!0})d.setAttribute(c="on"+f,"t"),t[f+"Bubbles"]=c in e||d.attributes[c].expando===!1;return d.style.backgroundClip="content-box",d.cloneNode(!0).style.backgroundClip="",t.clearCloneStyle="content-box"===d.style.backgroundClip,b(function(){var n,r,a,s="padding:0;margin:0;border:0;display:block;box-sizing:content-box;-moz-box-sizing:content-box;-webkit-box-sizing:content-box;",u=o.getElementsByTagName("body")[0];u&&(n=o.createElement("div"),n.style.cssText="border:0;width:0;height:0;position:absolute;top:0;left:-9999px;margin-top:1px",u.appendChild(n).appendChild(d),d.innerHTML="
    t
    ",a=d.getElementsByTagName("td"),a[0].style.cssText="padding:0;margin:0;border:0;display:none",p=0===a[0].offsetHeight,a[0].style.display="",a[1].style.display="none",t.reliableHiddenOffsets=p&&0===a[0].offsetHeight,d.innerHTML="",d.style.cssText="box-sizing:border-box;-moz-box-sizing:border-box;-webkit-box-sizing:border-box;padding:1px;border:1px;display:block;width:4px;margin-top:1%;position:absolute;top:1%;",t.boxSizing=4===d.offsetWidth,t.doesNotIncludeMarginInBodyOffset=1!==u.offsetTop,e.getComputedStyle&&(t.pixelPosition="1%"!==(e.getComputedStyle(d,null)||{}).top,t.boxSizingReliable="4px"===(e.getComputedStyle(d,null)||{width:"4px"}).width,r=d.appendChild(o.createElement("div")),r.style.cssText=d.style.cssText=s,r.style.marginRight=r.style.width="0",d.style.width="1px",t.reliableMarginRight=!parseFloat((e.getComputedStyle(r,null)||{}).marginRight)),typeof d.style.zoom!==i&&(d.innerHTML="",d.style.cssText=s+"width:1px;padding:1px;display:inline;zoom:1",t.inlineBlockNeedsLayout=3===d.offsetWidth,d.style.display="block",d.innerHTML="
    ",d.firstChild.style.width="5px",t.shrinkWrapBlocks=3!==d.offsetWidth,t.inlineBlockNeedsLayout&&(u.style.zoom=1)),u.removeChild(n),n=d=a=r=null)}),n=s=u=l=r=a=null,t}();var O=/(?:\{[\s\S]*\}|\[[\s\S]*\])$/,B=/([A-Z])/g;function P(e,n,r,i){if(b.acceptData(e)){var o,a,s=b.expando,u="string"==typeof n,l=e.nodeType,p=l?b.cache:e,f=l?e[s]:e[s]&&s;if(f&&p[f]&&(i||p[f].data)||!u||r!==t)return f||(l?e[s]=f=c.pop()||b.guid++:f=s),p[f]||(p[f]={},l||(p[f].toJSON=b.noop)),("object"==typeof n||"function"==typeof n)&&(i?p[f]=b.extend(p[f],n):p[f].data=b.extend(p[f].data,n)),o=p[f],i||(o.data||(o.data={}),o=o.data),r!==t&&(o[b.camelCase(n)]=r),u?(a=o[n],null==a&&(a=o[b.camelCase(n)])):a=o,a}}function R(e,t,n){if(b.acceptData(e)){var r,i,o,a=e.nodeType,s=a?b.cache:e,u=a?e[b.expando]:b.expando;if(s[u]){if(t&&(o=n?s[u]:s[u].data)){b.isArray(t)?t=t.concat(b.map(t,b.camelCase)):t in o?t=[t]:(t=b.camelCase(t),t=t in o?[t]:t.split(" "));for(r=0,i=t.length;i>r;r++)delete o[t[r]];if(!(n?$:b.isEmptyObject)(o))return}(n||(delete s[u].data,$(s[u])))&&(a?b.cleanData([e],!0):b.support.deleteExpando||s!=s.window?delete s[u]:s[u]=null)}}}b.extend({cache:{},expando:"jQuery"+(p+Math.random()).replace(/\D/g,""),noData:{embed:!0,object:"clsid:D27CDB6E-AE6D-11cf-96B8-444553540000",applet:!0},hasData:function(e){return e=e.nodeType?b.cache[e[b.expando]]:e[b.expando],!!e&&!$(e)},data:function(e,t,n){return P(e,t,n)},removeData:function(e,t){return R(e,t)},_data:function(e,t,n){return P(e,t,n,!0)},_removeData:function(e,t){return R(e,t,!0)},acceptData:function(e){if(e.nodeType&&1!==e.nodeType&&9!==e.nodeType)return!1;var t=e.nodeName&&b.noData[e.nodeName.toLowerCase()];return!t||t!==!0&&e.getAttribute("classid")===t}}),b.fn.extend({data:function(e,n){var r,i,o=this[0],a=0,s=null;if(e===t){if(this.length&&(s=b.data(o),1===o.nodeType&&!b._data(o,"parsedAttrs"))){for(r=o.attributes;r.length>a;a++)i=r[a].name,i.indexOf("data-")||(i=b.camelCase(i.slice(5)),W(o,i,s[i]));b._data(o,"parsedAttrs",!0)}return s}return"object"==typeof e?this.each(function(){b.data(this,e)}):b.access(this,function(n){return n===t?o?W(o,e,b.data(o,e)):null:(this.each(function(){b.data(this,e,n)}),t)},null,n,arguments.length>1,null,!0)},removeData:function(e){return this.each(function(){b.removeData(this,e)})}});function W(e,n,r){if(r===t&&1===e.nodeType){var i="data-"+n.replace(B,"-$1").toLowerCase();if(r=e.getAttribute(i),"string"==typeof r){try{r="true"===r?!0:"false"===r?!1:"null"===r?null:+r+""===r?+r:O.test(r)?b.parseJSON(r):r}catch(o){}b.data(e,n,r)}else r=t}return r}function $(e){var t;for(t in e)if(("data"!==t||!b.isEmptyObject(e[t]))&&"toJSON"!==t)return!1;return!0}b.extend({queue:function(e,n,r){var i;return e?(n=(n||"fx")+"queue",i=b._data(e,n),r&&(!i||b.isArray(r)?i=b._data(e,n,b.makeArray(r)):i.push(r)),i||[]):t},dequeue:function(e,t){t=t||"fx";var n=b.queue(e,t),r=n.length,i=n.shift(),o=b._queueHooks(e,t),a=function(){b.dequeue(e,t)};"inprogress"===i&&(i=n.shift(),r--),o.cur=i,i&&("fx"===t&&n.unshift("inprogress"),delete o.stop,i.call(e,a,o)),!r&&o&&o.empty.fire()},_queueHooks:function(e,t){var n=t+"queueHooks";return b._data(e,n)||b._data(e,n,{empty:b.Callbacks("once memory").add(function(){b._removeData(e,t+"queue"),b._removeData(e,n)})})}}),b.fn.extend({queue:function(e,n){var r=2;return"string"!=typeof e&&(n=e,e="fx",r--),r>arguments.length?b.queue(this[0],e):n===t?this:this.each(function(){var t=b.queue(this,e,n);b._queueHooks(this,e),"fx"===e&&"inprogress"!==t[0]&&b.dequeue(this,e)})},dequeue:function(e){return this.each(function(){b.dequeue(this,e)})},delay:function(e,t){return e=b.fx?b.fx.speeds[e]||e:e,t=t||"fx",this.queue(t,function(t,n){var r=setTimeout(t,e);n.stop=function(){clearTimeout(r)}})},clearQueue:function(e){return this.queue(e||"fx",[])},promise:function(e,n){var r,i=1,o=b.Deferred(),a=this,s=this.length,u=function(){--i||o.resolveWith(a,[a])};"string"!=typeof e&&(n=e,e=t),e=e||"fx";while(s--)r=b._data(a[s],e+"queueHooks"),r&&r.empty&&(i++,r.empty.add(u));return u(),o.promise(n)}});var I,z,X=/[\t\r\n]/g,U=/\r/g,V=/^(?:input|select|textarea|button|object)$/i,Y=/^(?:a|area)$/i,J=/^(?:checked|selected|autofocus|autoplay|async|controls|defer|disabled|hidden|loop|multiple|open|readonly|required|scoped)$/i,G=/^(?:checked|selected)$/i,Q=b.support.getSetAttribute,K=b.support.input;b.fn.extend({attr:function(e,t){return b.access(this,b.attr,e,t,arguments.length>1)},removeAttr:function(e){return this.each(function(){b.removeAttr(this,e)})},prop:function(e,t){return b.access(this,b.prop,e,t,arguments.length>1)},removeProp:function(e){return e=b.propFix[e]||e,this.each(function(){try{this[e]=t,delete this[e]}catch(n){}})},addClass:function(e){var t,n,r,i,o,a=0,s=this.length,u="string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).addClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):" ")){o=0;while(i=t[o++])0>r.indexOf(" "+i+" ")&&(r+=i+" ");n.className=b.trim(r)}return this},removeClass:function(e){var t,n,r,i,o,a=0,s=this.length,u=0===arguments.length||"string"==typeof e&&e;if(b.isFunction(e))return this.each(function(t){b(this).removeClass(e.call(this,t,this.className))});if(u)for(t=(e||"").match(w)||[];s>a;a++)if(n=this[a],r=1===n.nodeType&&(n.className?(" "+n.className+" ").replace(X," "):"")){o=0;while(i=t[o++])while(r.indexOf(" "+i+" ")>=0)r=r.replace(" "+i+" "," ");n.className=e?b.trim(r):""}return this},toggleClass:function(e,t){var n=typeof e,r="boolean"==typeof t;return b.isFunction(e)?this.each(function(n){b(this).toggleClass(e.call(this,n,this.className,t),t)}):this.each(function(){if("string"===n){var o,a=0,s=b(this),u=t,l=e.match(w)||[];while(o=l[a++])u=r?u:!s.hasClass(o),s[u?"addClass":"removeClass"](o)}else(n===i||"boolean"===n)&&(this.className&&b._data(this,"__className__",this.className),this.className=this.className||e===!1?"":b._data(this,"__className__")||"")})},hasClass:function(e){var t=" "+e+" ",n=0,r=this.length;for(;r>n;n++)if(1===this[n].nodeType&&(" "+this[n].className+" ").replace(X," ").indexOf(t)>=0)return!0;return!1},val:function(e){var n,r,i,o=this[0];{if(arguments.length)return i=b.isFunction(e),this.each(function(n){var o,a=b(this);1===this.nodeType&&(o=i?e.call(this,n,a.val()):e,null==o?o="":"number"==typeof o?o+="":b.isArray(o)&&(o=b.map(o,function(e){return null==e?"":e+""})),r=b.valHooks[this.type]||b.valHooks[this.nodeName.toLowerCase()],r&&"set"in r&&r.set(this,o,"value")!==t||(this.value=o))});if(o)return r=b.valHooks[o.type]||b.valHooks[o.nodeName.toLowerCase()],r&&"get"in r&&(n=r.get(o,"value"))!==t?n:(n=o.value,"string"==typeof n?n.replace(U,""):null==n?"":n)}}}),b.extend({valHooks:{option:{get:function(e){var t=e.attributes.value;return!t||t.specified?e.value:e.text}},select:{get:function(e){var t,n,r=e.options,i=e.selectedIndex,o="select-one"===e.type||0>i,a=o?null:[],s=o?i+1:r.length,u=0>i?s:o?i:0;for(;s>u;u++)if(n=r[u],!(!n.selected&&u!==i||(b.support.optDisabled?n.disabled:null!==n.getAttribute("disabled"))||n.parentNode.disabled&&b.nodeName(n.parentNode,"optgroup"))){if(t=b(n).val(),o)return t;a.push(t)}return a},set:function(e,t){var n=b.makeArray(t);return b(e).find("option").each(function(){this.selected=b.inArray(b(this).val(),n)>=0}),n.length||(e.selectedIndex=-1),n}}},attr:function(e,n,r){var o,a,s,u=e.nodeType;if(e&&3!==u&&8!==u&&2!==u)return typeof e.getAttribute===i?b.prop(e,n,r):(a=1!==u||!b.isXMLDoc(e),a&&(n=n.toLowerCase(),o=b.attrHooks[n]||(J.test(n)?z:I)),r===t?o&&a&&"get"in o&&null!==(s=o.get(e,n))?s:(typeof e.getAttribute!==i&&(s=e.getAttribute(n)),null==s?t:s):null!==r?o&&a&&"set"in o&&(s=o.set(e,r,n))!==t?s:(e.setAttribute(n,r+""),r):(b.removeAttr(e,n),t))},removeAttr:function(e,t){var n,r,i=0,o=t&&t.match(w);if(o&&1===e.nodeType)while(n=o[i++])r=b.propFix[n]||n,J.test(n)?!Q&&G.test(n)?e[b.camelCase("default-"+n)]=e[r]=!1:e[r]=!1:b.attr(e,n,""),e.removeAttribute(Q?n:r)},attrHooks:{type:{set:function(e,t){if(!b.support.radioValue&&"radio"===t&&b.nodeName(e,"input")){var n=e.value;return e.setAttribute("type",t),n&&(e.value=n),t}}}},propFix:{tabindex:"tabIndex",readonly:"readOnly","for":"htmlFor","class":"className",maxlength:"maxLength",cellspacing:"cellSpacing",cellpadding:"cellPadding",rowspan:"rowSpan",colspan:"colSpan",usemap:"useMap",frameborder:"frameBorder",contenteditable:"contentEditable"},prop:function(e,n,r){var i,o,a,s=e.nodeType;if(e&&3!==s&&8!==s&&2!==s)return a=1!==s||!b.isXMLDoc(e),a&&(n=b.propFix[n]||n,o=b.propHooks[n]),r!==t?o&&"set"in o&&(i=o.set(e,r,n))!==t?i:e[n]=r:o&&"get"in o&&null!==(i=o.get(e,n))?i:e[n]},propHooks:{tabIndex:{get:function(e){var n=e.getAttributeNode("tabindex");return n&&n.specified?parseInt(n.value,10):V.test(e.nodeName)||Y.test(e.nodeName)&&e.href?0:t}}}}),z={get:function(e,n){var r=b.prop(e,n),i="boolean"==typeof r&&e.getAttribute(n),o="boolean"==typeof r?K&&Q?null!=i:G.test(n)?e[b.camelCase("default-"+n)]:!!i:e.getAttributeNode(n);return o&&o.value!==!1?n.toLowerCase():t},set:function(e,t,n){return t===!1?b.removeAttr(e,n):K&&Q||!G.test(n)?e.setAttribute(!Q&&b.propFix[n]||n,n):e[b.camelCase("default-"+n)]=e[n]=!0,n}},K&&Q||(b.attrHooks.value={get:function(e,n){var r=e.getAttributeNode(n);return b.nodeName(e,"input")?e.defaultValue:r&&r.specified?r.value:t},set:function(e,n,r){return b.nodeName(e,"input")?(e.defaultValue=n,t):I&&I.set(e,n,r)}}),Q||(I=b.valHooks.button={get:function(e,n){var r=e.getAttributeNode(n);return r&&("id"===n||"name"===n||"coords"===n?""!==r.value:r.specified)?r.value:t},set:function(e,n,r){var i=e.getAttributeNode(r);return i||e.setAttributeNode(i=e.ownerDocument.createAttribute(r)),i.value=n+="","value"===r||n===e.getAttribute(r)?n:t}},b.attrHooks.contenteditable={get:I.get,set:function(e,t,n){I.set(e,""===t?!1:t,n)}},b.each(["width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{set:function(e,r){return""===r?(e.setAttribute(n,"auto"),r):t}})})),b.support.hrefNormalized||(b.each(["href","src","width","height"],function(e,n){b.attrHooks[n]=b.extend(b.attrHooks[n],{get:function(e){var r=e.getAttribute(n,2);return null==r?t:r}})}),b.each(["href","src"],function(e,t){b.propHooks[t]={get:function(e){return e.getAttribute(t,4)}}})),b.support.style||(b.attrHooks.style={get:function(e){return e.style.cssText||t},set:function(e,t){return e.style.cssText=t+""}}),b.support.optSelected||(b.propHooks.selected=b.extend(b.propHooks.selected,{get:function(e){var t=e.parentNode;return t&&(t.selectedIndex,t.parentNode&&t.parentNode.selectedIndex),null}})),b.support.enctype||(b.propFix.enctype="encoding"),b.support.checkOn||b.each(["radio","checkbox"],function(){b.valHooks[this]={get:function(e){return null===e.getAttribute("value")?"on":e.value}}}),b.each(["radio","checkbox"],function(){b.valHooks[this]=b.extend(b.valHooks[this],{set:function(e,n){return b.isArray(n)?e.checked=b.inArray(b(e).val(),n)>=0:t}})});var Z=/^(?:input|select|textarea)$/i,et=/^key/,tt=/^(?:mouse|contextmenu)|click/,nt=/^(?:focusinfocus|focusoutblur)$/,rt=/^([^.]*)(?:\.(.+)|)$/;function it(){return!0}function ot(){return!1}b.event={global:{},add:function(e,n,r,o,a){var s,u,l,c,p,f,d,h,g,m,y,v=b._data(e);if(v){r.handler&&(c=r,r=c.handler,a=c.selector),r.guid||(r.guid=b.guid++),(u=v.events)||(u=v.events={}),(f=v.handle)||(f=v.handle=function(e){return typeof b===i||e&&b.event.triggered===e.type?t:b.event.dispatch.apply(f.elem,arguments)},f.elem=e),n=(n||"").match(w)||[""],l=n.length;while(l--)s=rt.exec(n[l])||[],g=y=s[1],m=(s[2]||"").split(".").sort(),p=b.event.special[g]||{},g=(a?p.delegateType:p.bindType)||g,p=b.event.special[g]||{},d=b.extend({type:g,origType:y,data:o,handler:r,guid:r.guid,selector:a,needsContext:a&&b.expr.match.needsContext.test(a),namespace:m.join(".")},c),(h=u[g])||(h=u[g]=[],h.delegateCount=0,p.setup&&p.setup.call(e,o,m,f)!==!1||(e.addEventListener?e.addEventListener(g,f,!1):e.attachEvent&&e.attachEvent("on"+g,f))),p.add&&(p.add.call(e,d),d.handler.guid||(d.handler.guid=r.guid)),a?h.splice(h.delegateCount++,0,d):h.push(d),b.event.global[g]=!0;e=null}},remove:function(e,t,n,r,i){var o,a,s,u,l,c,p,f,d,h,g,m=b.hasData(e)&&b._data(e);if(m&&(c=m.events)){t=(t||"").match(w)||[""],l=t.length;while(l--)if(s=rt.exec(t[l])||[],d=g=s[1],h=(s[2]||"").split(".").sort(),d){p=b.event.special[d]||{},d=(r?p.delegateType:p.bindType)||d,f=c[d]||[],s=s[2]&&RegExp("(^|\\.)"+h.join("\\.(?:.*\\.|)")+"(\\.|$)"),u=o=f.length;while(o--)a=f[o],!i&&g!==a.origType||n&&n.guid!==a.guid||s&&!s.test(a.namespace)||r&&r!==a.selector&&("**"!==r||!a.selector)||(f.splice(o,1),a.selector&&f.delegateCount--,p.remove&&p.remove.call(e,a));u&&!f.length&&(p.teardown&&p.teardown.call(e,h,m.handle)!==!1||b.removeEvent(e,d,m.handle),delete c[d])}else for(d in c)b.event.remove(e,d+t[l],n,r,!0);b.isEmptyObject(c)&&(delete m.handle,b._removeData(e,"events"))}},trigger:function(n,r,i,a){var s,u,l,c,p,f,d,h=[i||o],g=y.call(n,"type")?n.type:n,m=y.call(n,"namespace")?n.namespace.split("."):[];if(l=f=i=i||o,3!==i.nodeType&&8!==i.nodeType&&!nt.test(g+b.event.triggered)&&(g.indexOf(".")>=0&&(m=g.split("."),g=m.shift(),m.sort()),u=0>g.indexOf(":")&&"on"+g,n=n[b.expando]?n:new b.Event(g,"object"==typeof n&&n),n.isTrigger=!0,n.namespace=m.join("."),n.namespace_re=n.namespace?RegExp("(^|\\.)"+m.join("\\.(?:.*\\.|)")+"(\\.|$)"):null,n.result=t,n.target||(n.target=i),r=null==r?[n]:b.makeArray(r,[n]),p=b.event.special[g]||{},a||!p.trigger||p.trigger.apply(i,r)!==!1)){if(!a&&!p.noBubble&&!b.isWindow(i)){for(c=p.delegateType||g,nt.test(c+g)||(l=l.parentNode);l;l=l.parentNode)h.push(l),f=l;f===(i.ownerDocument||o)&&h.push(f.defaultView||f.parentWindow||e)}d=0;while((l=h[d++])&&!n.isPropagationStopped())n.type=d>1?c:p.bindType||g,s=(b._data(l,"events")||{})[n.type]&&b._data(l,"handle"),s&&s.apply(l,r),s=u&&l[u],s&&b.acceptData(l)&&s.apply&&s.apply(l,r)===!1&&n.preventDefault();if(n.type=g,!(a||n.isDefaultPrevented()||p._default&&p._default.apply(i.ownerDocument,r)!==!1||"click"===g&&b.nodeName(i,"a")||!b.acceptData(i)||!u||!i[g]||b.isWindow(i))){f=i[u],f&&(i[u]=null),b.event.triggered=g;try{i[g]()}catch(v){}b.event.triggered=t,f&&(i[u]=f)}return n.result}},dispatch:function(e){e=b.event.fix(e);var n,r,i,o,a,s=[],u=h.call(arguments),l=(b._data(this,"events")||{})[e.type]||[],c=b.event.special[e.type]||{};if(u[0]=e,e.delegateTarget=this,!c.preDispatch||c.preDispatch.call(this,e)!==!1){s=b.event.handlers.call(this,e,l),n=0;while((o=s[n++])&&!e.isPropagationStopped()){e.currentTarget=o.elem,a=0;while((i=o.handlers[a++])&&!e.isImmediatePropagationStopped())(!e.namespace_re||e.namespace_re.test(i.namespace))&&(e.handleObj=i,e.data=i.data,r=((b.event.special[i.origType]||{}).handle||i.handler).apply(o.elem,u),r!==t&&(e.result=r)===!1&&(e.preventDefault(),e.stopPropagation()))}return c.postDispatch&&c.postDispatch.call(this,e),e.result}},handlers:function(e,n){var r,i,o,a,s=[],u=n.delegateCount,l=e.target;if(u&&l.nodeType&&(!e.button||"click"!==e.type))for(;l!=this;l=l.parentNode||this)if(1===l.nodeType&&(l.disabled!==!0||"click"!==e.type)){for(o=[],a=0;u>a;a++)i=n[a],r=i.selector+" ",o[r]===t&&(o[r]=i.needsContext?b(r,this).index(l)>=0:b.find(r,this,null,[l]).length),o[r]&&o.push(i);o.length&&s.push({elem:l,handlers:o})}return n.length>u&&s.push({elem:this,handlers:n.slice(u)}),s},fix:function(e){if(e[b.expando])return e;var t,n,r,i=e.type,a=e,s=this.fixHooks[i];s||(this.fixHooks[i]=s=tt.test(i)?this.mouseHooks:et.test(i)?this.keyHooks:{}),r=s.props?this.props.concat(s.props):this.props,e=new b.Event(a),t=r.length;while(t--)n=r[t],e[n]=a[n];return e.target||(e.target=a.srcElement||o),3===e.target.nodeType&&(e.target=e.target.parentNode),e.metaKey=!!e.metaKey,s.filter?s.filter(e,a):e},props:"altKey bubbles cancelable ctrlKey currentTarget eventPhase metaKey relatedTarget shiftKey target timeStamp view which".split(" "),fixHooks:{},keyHooks:{props:"char charCode key keyCode".split(" "),filter:function(e,t){return null==e.which&&(e.which=null!=t.charCode?t.charCode:t.keyCode),e}},mouseHooks:{props:"button buttons clientX clientY fromElement offsetX offsetY pageX pageY screenX screenY toElement".split(" "),filter:function(e,n){var r,i,a,s=n.button,u=n.fromElement;return null==e.pageX&&null!=n.clientX&&(i=e.target.ownerDocument||o,a=i.documentElement,r=i.body,e.pageX=n.clientX+(a&&a.scrollLeft||r&&r.scrollLeft||0)-(a&&a.clientLeft||r&&r.clientLeft||0),e.pageY=n.clientY+(a&&a.scrollTop||r&&r.scrollTop||0)-(a&&a.clientTop||r&&r.clientTop||0)),!e.relatedTarget&&u&&(e.relatedTarget=u===e.target?n.toElement:u),e.which||s===t||(e.which=1&s?1:2&s?3:4&s?2:0),e}},special:{load:{noBubble:!0},click:{trigger:function(){return b.nodeName(this,"input")&&"checkbox"===this.type&&this.click?(this.click(),!1):t}},focus:{trigger:function(){if(this!==o.activeElement&&this.focus)try{return this.focus(),!1}catch(e){}},delegateType:"focusin"},blur:{trigger:function(){return this===o.activeElement&&this.blur?(this.blur(),!1):t},delegateType:"focusout"},beforeunload:{postDispatch:function(e){e.result!==t&&(e.originalEvent.returnValue=e.result)}}},simulate:function(e,t,n,r){var i=b.extend(new b.Event,n,{type:e,isSimulated:!0,originalEvent:{}});r?b.event.trigger(i,null,t):b.event.dispatch.call(t,i),i.isDefaultPrevented()&&n.preventDefault()}},b.removeEvent=o.removeEventListener?function(e,t,n){e.removeEventListener&&e.removeEventListener(t,n,!1)}:function(e,t,n){var r="on"+t;e.detachEvent&&(typeof e[r]===i&&(e[r]=null),e.detachEvent(r,n))},b.Event=function(e,n){return this instanceof b.Event?(e&&e.type?(this.originalEvent=e,this.type=e.type,this.isDefaultPrevented=e.defaultPrevented||e.returnValue===!1||e.getPreventDefault&&e.getPreventDefault()?it:ot):this.type=e,n&&b.extend(this,n),this.timeStamp=e&&e.timeStamp||b.now(),this[b.expando]=!0,t):new b.Event(e,n)},b.Event.prototype={isDefaultPrevented:ot,isPropagationStopped:ot,isImmediatePropagationStopped:ot,preventDefault:function(){var e=this.originalEvent;this.isDefaultPrevented=it,e&&(e.preventDefault?e.preventDefault():e.returnValue=!1)},stopPropagation:function(){var e=this.originalEvent;this.isPropagationStopped=it,e&&(e.stopPropagation&&e.stopPropagation(),e.cancelBubble=!0)},stopImmediatePropagation:function(){this.isImmediatePropagationStopped=it,this.stopPropagation()}},b.each({mouseenter:"mouseover",mouseleave:"mouseout"},function(e,t){b.event.special[e]={delegateType:t,bindType:t,handle:function(e){var n,r=this,i=e.relatedTarget,o=e.handleObj; - return(!i||i!==r&&!b.contains(r,i))&&(e.type=o.origType,n=o.handler.apply(this,arguments),e.type=t),n}}}),b.support.submitBubbles||(b.event.special.submit={setup:function(){return b.nodeName(this,"form")?!1:(b.event.add(this,"click._submit keypress._submit",function(e){var n=e.target,r=b.nodeName(n,"input")||b.nodeName(n,"button")?n.form:t;r&&!b._data(r,"submitBubbles")&&(b.event.add(r,"submit._submit",function(e){e._submit_bubble=!0}),b._data(r,"submitBubbles",!0))}),t)},postDispatch:function(e){e._submit_bubble&&(delete e._submit_bubble,this.parentNode&&!e.isTrigger&&b.event.simulate("submit",this.parentNode,e,!0))},teardown:function(){return b.nodeName(this,"form")?!1:(b.event.remove(this,"._submit"),t)}}),b.support.changeBubbles||(b.event.special.change={setup:function(){return Z.test(this.nodeName)?(("checkbox"===this.type||"radio"===this.type)&&(b.event.add(this,"propertychange._change",function(e){"checked"===e.originalEvent.propertyName&&(this._just_changed=!0)}),b.event.add(this,"click._change",function(e){this._just_changed&&!e.isTrigger&&(this._just_changed=!1),b.event.simulate("change",this,e,!0)})),!1):(b.event.add(this,"beforeactivate._change",function(e){var t=e.target;Z.test(t.nodeName)&&!b._data(t,"changeBubbles")&&(b.event.add(t,"change._change",function(e){!this.parentNode||e.isSimulated||e.isTrigger||b.event.simulate("change",this.parentNode,e,!0)}),b._data(t,"changeBubbles",!0))}),t)},handle:function(e){var n=e.target;return this!==n||e.isSimulated||e.isTrigger||"radio"!==n.type&&"checkbox"!==n.type?e.handleObj.handler.apply(this,arguments):t},teardown:function(){return b.event.remove(this,"._change"),!Z.test(this.nodeName)}}),b.support.focusinBubbles||b.each({focus:"focusin",blur:"focusout"},function(e,t){var n=0,r=function(e){b.event.simulate(t,e.target,b.event.fix(e),!0)};b.event.special[t]={setup:function(){0===n++&&o.addEventListener(e,r,!0)},teardown:function(){0===--n&&o.removeEventListener(e,r,!0)}}}),b.fn.extend({on:function(e,n,r,i,o){var a,s;if("object"==typeof e){"string"!=typeof n&&(r=r||n,n=t);for(a in e)this.on(a,n,r,e[a],o);return this}if(null==r&&null==i?(i=n,r=n=t):null==i&&("string"==typeof n?(i=r,r=t):(i=r,r=n,n=t)),i===!1)i=ot;else if(!i)return this;return 1===o&&(s=i,i=function(e){return b().off(e),s.apply(this,arguments)},i.guid=s.guid||(s.guid=b.guid++)),this.each(function(){b.event.add(this,e,i,r,n)})},one:function(e,t,n,r){return this.on(e,t,n,r,1)},off:function(e,n,r){var i,o;if(e&&e.preventDefault&&e.handleObj)return i=e.handleObj,b(e.delegateTarget).off(i.namespace?i.origType+"."+i.namespace:i.origType,i.selector,i.handler),this;if("object"==typeof e){for(o in e)this.off(o,n,e[o]);return this}return(n===!1||"function"==typeof n)&&(r=n,n=t),r===!1&&(r=ot),this.each(function(){b.event.remove(this,e,r,n)})},bind:function(e,t,n){return this.on(e,null,t,n)},unbind:function(e,t){return this.off(e,null,t)},delegate:function(e,t,n,r){return this.on(t,e,n,r)},undelegate:function(e,t,n){return 1===arguments.length?this.off(e,"**"):this.off(t,e||"**",n)},trigger:function(e,t){return this.each(function(){b.event.trigger(e,t,this)})},triggerHandler:function(e,n){var r=this[0];return r?b.event.trigger(e,n,r,!0):t}}),function(e,t){var n,r,i,o,a,s,u,l,c,p,f,d,h,g,m,y,v,x="sizzle"+-new Date,w=e.document,T={},N=0,C=0,k=it(),E=it(),S=it(),A=typeof t,j=1<<31,D=[],L=D.pop,H=D.push,q=D.slice,M=D.indexOf||function(e){var t=0,n=this.length;for(;n>t;t++)if(this[t]===e)return t;return-1},_="[\\x20\\t\\r\\n\\f]",F="(?:\\\\.|[\\w-]|[^\\x00-\\xa0])+",O=F.replace("w","w#"),B="([*^$|!~]?=)",P="\\["+_+"*("+F+")"+_+"*(?:"+B+_+"*(?:(['\"])((?:\\\\.|[^\\\\])*?)\\3|("+O+")|)|)"+_+"*\\]",R=":("+F+")(?:\\(((['\"])((?:\\\\.|[^\\\\])*?)\\3|((?:\\\\.|[^\\\\()[\\]]|"+P.replace(3,8)+")*)|.*)\\)|)",W=RegExp("^"+_+"+|((?:^|[^\\\\])(?:\\\\.)*)"+_+"+$","g"),$=RegExp("^"+_+"*,"+_+"*"),I=RegExp("^"+_+"*([\\x20\\t\\r\\n\\f>+~])"+_+"*"),z=RegExp(R),X=RegExp("^"+O+"$"),U={ID:RegExp("^#("+F+")"),CLASS:RegExp("^\\.("+F+")"),NAME:RegExp("^\\[name=['\"]?("+F+")['\"]?\\]"),TAG:RegExp("^("+F.replace("w","w*")+")"),ATTR:RegExp("^"+P),PSEUDO:RegExp("^"+R),CHILD:RegExp("^:(only|first|last|nth|nth-last)-(child|of-type)(?:\\("+_+"*(even|odd|(([+-]|)(\\d*)n|)"+_+"*(?:([+-]|)"+_+"*(\\d+)|))"+_+"*\\)|)","i"),needsContext:RegExp("^"+_+"*[>+~]|:(even|odd|eq|gt|lt|nth|first|last)(?:\\("+_+"*((?:-\\d)?\\d*)"+_+"*\\)|)(?=[^-]|$)","i")},V=/[\x20\t\r\n\f]*[+~]/,Y=/^[^{]+\{\s*\[native code/,J=/^(?:#([\w-]+)|(\w+)|\.([\w-]+))$/,G=/^(?:input|select|textarea|button)$/i,Q=/^h\d$/i,K=/'|\\/g,Z=/\=[\x20\t\r\n\f]*([^'"\]]*)[\x20\t\r\n\f]*\]/g,et=/\\([\da-fA-F]{1,6}[\x20\t\r\n\f]?|.)/g,tt=function(e,t){var n="0x"+t-65536;return n!==n?t:0>n?String.fromCharCode(n+65536):String.fromCharCode(55296|n>>10,56320|1023&n)};try{q.call(w.documentElement.childNodes,0)[0].nodeType}catch(nt){q=function(e){var t,n=[];while(t=this[e++])n.push(t);return n}}function rt(e){return Y.test(e+"")}function it(){var e,t=[];return e=function(n,r){return t.push(n+=" ")>i.cacheLength&&delete e[t.shift()],e[n]=r}}function ot(e){return e[x]=!0,e}function at(e){var t=p.createElement("div");try{return e(t)}catch(n){return!1}finally{t=null}}function st(e,t,n,r){var i,o,a,s,u,l,f,g,m,v;if((t?t.ownerDocument||t:w)!==p&&c(t),t=t||p,n=n||[],!e||"string"!=typeof e)return n;if(1!==(s=t.nodeType)&&9!==s)return[];if(!d&&!r){if(i=J.exec(e))if(a=i[1]){if(9===s){if(o=t.getElementById(a),!o||!o.parentNode)return n;if(o.id===a)return n.push(o),n}else if(t.ownerDocument&&(o=t.ownerDocument.getElementById(a))&&y(t,o)&&o.id===a)return n.push(o),n}else{if(i[2])return H.apply(n,q.call(t.getElementsByTagName(e),0)),n;if((a=i[3])&&T.getByClassName&&t.getElementsByClassName)return H.apply(n,q.call(t.getElementsByClassName(a),0)),n}if(T.qsa&&!h.test(e)){if(f=!0,g=x,m=t,v=9===s&&e,1===s&&"object"!==t.nodeName.toLowerCase()){l=ft(e),(f=t.getAttribute("id"))?g=f.replace(K,"\\$&"):t.setAttribute("id",g),g="[id='"+g+"'] ",u=l.length;while(u--)l[u]=g+dt(l[u]);m=V.test(e)&&t.parentNode||t,v=l.join(",")}if(v)try{return H.apply(n,q.call(m.querySelectorAll(v),0)),n}catch(b){}finally{f||t.removeAttribute("id")}}}return wt(e.replace(W,"$1"),t,n,r)}a=st.isXML=function(e){var t=e&&(e.ownerDocument||e).documentElement;return t?"HTML"!==t.nodeName:!1},c=st.setDocument=function(e){var n=e?e.ownerDocument||e:w;return n!==p&&9===n.nodeType&&n.documentElement?(p=n,f=n.documentElement,d=a(n),T.tagNameNoComments=at(function(e){return e.appendChild(n.createComment("")),!e.getElementsByTagName("*").length}),T.attributes=at(function(e){e.innerHTML="";var t=typeof e.lastChild.getAttribute("multiple");return"boolean"!==t&&"string"!==t}),T.getByClassName=at(function(e){return e.innerHTML="",e.getElementsByClassName&&e.getElementsByClassName("e").length?(e.lastChild.className="e",2===e.getElementsByClassName("e").length):!1}),T.getByName=at(function(e){e.id=x+0,e.innerHTML="
    ",f.insertBefore(e,f.firstChild);var t=n.getElementsByName&&n.getElementsByName(x).length===2+n.getElementsByName(x+0).length;return T.getIdNotName=!n.getElementById(x),f.removeChild(e),t}),i.attrHandle=at(function(e){return e.innerHTML="",e.firstChild&&typeof e.firstChild.getAttribute!==A&&"#"===e.firstChild.getAttribute("href")})?{}:{href:function(e){return e.getAttribute("href",2)},type:function(e){return e.getAttribute("type")}},T.getIdNotName?(i.find.ID=function(e,t){if(typeof t.getElementById!==A&&!d){var n=t.getElementById(e);return n&&n.parentNode?[n]:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){return e.getAttribute("id")===t}}):(i.find.ID=function(e,n){if(typeof n.getElementById!==A&&!d){var r=n.getElementById(e);return r?r.id===e||typeof r.getAttributeNode!==A&&r.getAttributeNode("id").value===e?[r]:t:[]}},i.filter.ID=function(e){var t=e.replace(et,tt);return function(e){var n=typeof e.getAttributeNode!==A&&e.getAttributeNode("id");return n&&n.value===t}}),i.find.TAG=T.tagNameNoComments?function(e,n){return typeof n.getElementsByTagName!==A?n.getElementsByTagName(e):t}:function(e,t){var n,r=[],i=0,o=t.getElementsByTagName(e);if("*"===e){while(n=o[i++])1===n.nodeType&&r.push(n);return r}return o},i.find.NAME=T.getByName&&function(e,n){return typeof n.getElementsByName!==A?n.getElementsByName(name):t},i.find.CLASS=T.getByClassName&&function(e,n){return typeof n.getElementsByClassName===A||d?t:n.getElementsByClassName(e)},g=[],h=[":focus"],(T.qsa=rt(n.querySelectorAll))&&(at(function(e){e.innerHTML="",e.querySelectorAll("[selected]").length||h.push("\\["+_+"*(?:checked|disabled|ismap|multiple|readonly|selected|value)"),e.querySelectorAll(":checked").length||h.push(":checked")}),at(function(e){e.innerHTML="",e.querySelectorAll("[i^='']").length&&h.push("[*^$]="+_+"*(?:\"\"|'')"),e.querySelectorAll(":enabled").length||h.push(":enabled",":disabled"),e.querySelectorAll("*,:x"),h.push(",.*:")})),(T.matchesSelector=rt(m=f.matchesSelector||f.mozMatchesSelector||f.webkitMatchesSelector||f.oMatchesSelector||f.msMatchesSelector))&&at(function(e){T.disconnectedMatch=m.call(e,"div"),m.call(e,"[s!='']:x"),g.push("!=",R)}),h=RegExp(h.join("|")),g=RegExp(g.join("|")),y=rt(f.contains)||f.compareDocumentPosition?function(e,t){var n=9===e.nodeType?e.documentElement:e,r=t&&t.parentNode;return e===r||!(!r||1!==r.nodeType||!(n.contains?n.contains(r):e.compareDocumentPosition&&16&e.compareDocumentPosition(r)))}:function(e,t){if(t)while(t=t.parentNode)if(t===e)return!0;return!1},v=f.compareDocumentPosition?function(e,t){var r;return e===t?(u=!0,0):(r=t.compareDocumentPosition&&e.compareDocumentPosition&&e.compareDocumentPosition(t))?1&r||e.parentNode&&11===e.parentNode.nodeType?e===n||y(w,e)?-1:t===n||y(w,t)?1:0:4&r?-1:1:e.compareDocumentPosition?-1:1}:function(e,t){var r,i=0,o=e.parentNode,a=t.parentNode,s=[e],l=[t];if(e===t)return u=!0,0;if(!o||!a)return e===n?-1:t===n?1:o?-1:a?1:0;if(o===a)return ut(e,t);r=e;while(r=r.parentNode)s.unshift(r);r=t;while(r=r.parentNode)l.unshift(r);while(s[i]===l[i])i++;return i?ut(s[i],l[i]):s[i]===w?-1:l[i]===w?1:0},u=!1,[0,0].sort(v),T.detectDuplicates=u,p):p},st.matches=function(e,t){return st(e,null,null,t)},st.matchesSelector=function(e,t){if((e.ownerDocument||e)!==p&&c(e),t=t.replace(Z,"='$1']"),!(!T.matchesSelector||d||g&&g.test(t)||h.test(t)))try{var n=m.call(e,t);if(n||T.disconnectedMatch||e.document&&11!==e.document.nodeType)return n}catch(r){}return st(t,p,null,[e]).length>0},st.contains=function(e,t){return(e.ownerDocument||e)!==p&&c(e),y(e,t)},st.attr=function(e,t){var n;return(e.ownerDocument||e)!==p&&c(e),d||(t=t.toLowerCase()),(n=i.attrHandle[t])?n(e):d||T.attributes?e.getAttribute(t):((n=e.getAttributeNode(t))||e.getAttribute(t))&&e[t]===!0?t:n&&n.specified?n.value:null},st.error=function(e){throw Error("Syntax error, unrecognized expression: "+e)},st.uniqueSort=function(e){var t,n=[],r=1,i=0;if(u=!T.detectDuplicates,e.sort(v),u){for(;t=e[r];r++)t===e[r-1]&&(i=n.push(r));while(i--)e.splice(n[i],1)}return e};function ut(e,t){var n=t&&e,r=n&&(~t.sourceIndex||j)-(~e.sourceIndex||j);if(r)return r;if(n)while(n=n.nextSibling)if(n===t)return-1;return e?1:-1}function lt(e){return function(t){var n=t.nodeName.toLowerCase();return"input"===n&&t.type===e}}function ct(e){return function(t){var n=t.nodeName.toLowerCase();return("input"===n||"button"===n)&&t.type===e}}function pt(e){return ot(function(t){return t=+t,ot(function(n,r){var i,o=e([],n.length,t),a=o.length;while(a--)n[i=o[a]]&&(n[i]=!(r[i]=n[i]))})})}o=st.getText=function(e){var t,n="",r=0,i=e.nodeType;if(i){if(1===i||9===i||11===i){if("string"==typeof e.textContent)return e.textContent;for(e=e.firstChild;e;e=e.nextSibling)n+=o(e)}else if(3===i||4===i)return e.nodeValue}else for(;t=e[r];r++)n+=o(t);return n},i=st.selectors={cacheLength:50,createPseudo:ot,match:U,find:{},relative:{">":{dir:"parentNode",first:!0}," ":{dir:"parentNode"},"+":{dir:"previousSibling",first:!0},"~":{dir:"previousSibling"}},preFilter:{ATTR:function(e){return e[1]=e[1].replace(et,tt),e[3]=(e[4]||e[5]||"").replace(et,tt),"~="===e[2]&&(e[3]=" "+e[3]+" "),e.slice(0,4)},CHILD:function(e){return e[1]=e[1].toLowerCase(),"nth"===e[1].slice(0,3)?(e[3]||st.error(e[0]),e[4]=+(e[4]?e[5]+(e[6]||1):2*("even"===e[3]||"odd"===e[3])),e[5]=+(e[7]+e[8]||"odd"===e[3])):e[3]&&st.error(e[0]),e},PSEUDO:function(e){var t,n=!e[5]&&e[2];return U.CHILD.test(e[0])?null:(e[4]?e[2]=e[4]:n&&z.test(n)&&(t=ft(n,!0))&&(t=n.indexOf(")",n.length-t)-n.length)&&(e[0]=e[0].slice(0,t),e[2]=n.slice(0,t)),e.slice(0,3))}},filter:{TAG:function(e){return"*"===e?function(){return!0}:(e=e.replace(et,tt).toLowerCase(),function(t){return t.nodeName&&t.nodeName.toLowerCase()===e})},CLASS:function(e){var t=k[e+" "];return t||(t=RegExp("(^|"+_+")"+e+"("+_+"|$)"))&&k(e,function(e){return t.test(e.className||typeof e.getAttribute!==A&&e.getAttribute("class")||"")})},ATTR:function(e,t,n){return function(r){var i=st.attr(r,e);return null==i?"!="===t:t?(i+="","="===t?i===n:"!="===t?i!==n:"^="===t?n&&0===i.indexOf(n):"*="===t?n&&i.indexOf(n)>-1:"$="===t?n&&i.slice(-n.length)===n:"~="===t?(" "+i+" ").indexOf(n)>-1:"|="===t?i===n||i.slice(0,n.length+1)===n+"-":!1):!0}},CHILD:function(e,t,n,r,i){var o="nth"!==e.slice(0,3),a="last"!==e.slice(-4),s="of-type"===t;return 1===r&&0===i?function(e){return!!e.parentNode}:function(t,n,u){var l,c,p,f,d,h,g=o!==a?"nextSibling":"previousSibling",m=t.parentNode,y=s&&t.nodeName.toLowerCase(),v=!u&&!s;if(m){if(o){while(g){p=t;while(p=p[g])if(s?p.nodeName.toLowerCase()===y:1===p.nodeType)return!1;h=g="only"===e&&!h&&"nextSibling"}return!0}if(h=[a?m.firstChild:m.lastChild],a&&v){c=m[x]||(m[x]={}),l=c[e]||[],d=l[0]===N&&l[1],f=l[0]===N&&l[2],p=d&&m.childNodes[d];while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if(1===p.nodeType&&++f&&p===t){c[e]=[N,d,f];break}}else if(v&&(l=(t[x]||(t[x]={}))[e])&&l[0]===N)f=l[1];else while(p=++d&&p&&p[g]||(f=d=0)||h.pop())if((s?p.nodeName.toLowerCase()===y:1===p.nodeType)&&++f&&(v&&((p[x]||(p[x]={}))[e]=[N,f]),p===t))break;return f-=i,f===r||0===f%r&&f/r>=0}}},PSEUDO:function(e,t){var n,r=i.pseudos[e]||i.setFilters[e.toLowerCase()]||st.error("unsupported pseudo: "+e);return r[x]?r(t):r.length>1?(n=[e,e,"",t],i.setFilters.hasOwnProperty(e.toLowerCase())?ot(function(e,n){var i,o=r(e,t),a=o.length;while(a--)i=M.call(e,o[a]),e[i]=!(n[i]=o[a])}):function(e){return r(e,0,n)}):r}},pseudos:{not:ot(function(e){var t=[],n=[],r=s(e.replace(W,"$1"));return r[x]?ot(function(e,t,n,i){var o,a=r(e,null,i,[]),s=e.length;while(s--)(o=a[s])&&(e[s]=!(t[s]=o))}):function(e,i,o){return t[0]=e,r(t,null,o,n),!n.pop()}}),has:ot(function(e){return function(t){return st(e,t).length>0}}),contains:ot(function(e){return function(t){return(t.textContent||t.innerText||o(t)).indexOf(e)>-1}}),lang:ot(function(e){return X.test(e||"")||st.error("unsupported lang: "+e),e=e.replace(et,tt).toLowerCase(),function(t){var n;do if(n=d?t.getAttribute("xml:lang")||t.getAttribute("lang"):t.lang)return n=n.toLowerCase(),n===e||0===n.indexOf(e+"-");while((t=t.parentNode)&&1===t.nodeType);return!1}}),target:function(t){var n=e.location&&e.location.hash;return n&&n.slice(1)===t.id},root:function(e){return e===f},focus:function(e){return e===p.activeElement&&(!p.hasFocus||p.hasFocus())&&!!(e.type||e.href||~e.tabIndex)},enabled:function(e){return e.disabled===!1},disabled:function(e){return e.disabled===!0},checked:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&!!e.checked||"option"===t&&!!e.selected},selected:function(e){return e.parentNode&&e.parentNode.selectedIndex,e.selected===!0},empty:function(e){for(e=e.firstChild;e;e=e.nextSibling)if(e.nodeName>"@"||3===e.nodeType||4===e.nodeType)return!1;return!0},parent:function(e){return!i.pseudos.empty(e)},header:function(e){return Q.test(e.nodeName)},input:function(e){return G.test(e.nodeName)},button:function(e){var t=e.nodeName.toLowerCase();return"input"===t&&"button"===e.type||"button"===t},text:function(e){var t;return"input"===e.nodeName.toLowerCase()&&"text"===e.type&&(null==(t=e.getAttribute("type"))||t.toLowerCase()===e.type)},first:pt(function(){return[0]}),last:pt(function(e,t){return[t-1]}),eq:pt(function(e,t,n){return[0>n?n+t:n]}),even:pt(function(e,t){var n=0;for(;t>n;n+=2)e.push(n);return e}),odd:pt(function(e,t){var n=1;for(;t>n;n+=2)e.push(n);return e}),lt:pt(function(e,t,n){var r=0>n?n+t:n;for(;--r>=0;)e.push(r);return e}),gt:pt(function(e,t,n){var r=0>n?n+t:n;for(;t>++r;)e.push(r);return e})}};for(n in{radio:!0,checkbox:!0,file:!0,password:!0,image:!0})i.pseudos[n]=lt(n);for(n in{submit:!0,reset:!0})i.pseudos[n]=ct(n);function ft(e,t){var n,r,o,a,s,u,l,c=E[e+" "];if(c)return t?0:c.slice(0);s=e,u=[],l=i.preFilter;while(s){(!n||(r=$.exec(s)))&&(r&&(s=s.slice(r[0].length)||s),u.push(o=[])),n=!1,(r=I.exec(s))&&(n=r.shift(),o.push({value:n,type:r[0].replace(W," ")}),s=s.slice(n.length));for(a in i.filter)!(r=U[a].exec(s))||l[a]&&!(r=l[a](r))||(n=r.shift(),o.push({value:n,type:a,matches:r}),s=s.slice(n.length));if(!n)break}return t?s.length:s?st.error(e):E(e,u).slice(0)}function dt(e){var t=0,n=e.length,r="";for(;n>t;t++)r+=e[t].value;return r}function ht(e,t,n){var i=t.dir,o=n&&"parentNode"===i,a=C++;return t.first?function(t,n,r){while(t=t[i])if(1===t.nodeType||o)return e(t,n,r)}:function(t,n,s){var u,l,c,p=N+" "+a;if(s){while(t=t[i])if((1===t.nodeType||o)&&e(t,n,s))return!0}else while(t=t[i])if(1===t.nodeType||o)if(c=t[x]||(t[x]={}),(l=c[i])&&l[0]===p){if((u=l[1])===!0||u===r)return u===!0}else if(l=c[i]=[p],l[1]=e(t,n,s)||r,l[1]===!0)return!0}}function gt(e){return e.length>1?function(t,n,r){var i=e.length;while(i--)if(!e[i](t,n,r))return!1;return!0}:e[0]}function mt(e,t,n,r,i){var o,a=[],s=0,u=e.length,l=null!=t;for(;u>s;s++)(o=e[s])&&(!n||n(o,r,i))&&(a.push(o),l&&t.push(s));return a}function yt(e,t,n,r,i,o){return r&&!r[x]&&(r=yt(r)),i&&!i[x]&&(i=yt(i,o)),ot(function(o,a,s,u){var l,c,p,f=[],d=[],h=a.length,g=o||xt(t||"*",s.nodeType?[s]:s,[]),m=!e||!o&&t?g:mt(g,f,e,s,u),y=n?i||(o?e:h||r)?[]:a:m;if(n&&n(m,y,s,u),r){l=mt(y,d),r(l,[],s,u),c=l.length;while(c--)(p=l[c])&&(y[d[c]]=!(m[d[c]]=p))}if(o){if(i||e){if(i){l=[],c=y.length;while(c--)(p=y[c])&&l.push(m[c]=p);i(null,y=[],l,u)}c=y.length;while(c--)(p=y[c])&&(l=i?M.call(o,p):f[c])>-1&&(o[l]=!(a[l]=p))}}else y=mt(y===a?y.splice(h,y.length):y),i?i(null,a,y,u):H.apply(a,y)})}function vt(e){var t,n,r,o=e.length,a=i.relative[e[0].type],s=a||i.relative[" "],u=a?1:0,c=ht(function(e){return e===t},s,!0),p=ht(function(e){return M.call(t,e)>-1},s,!0),f=[function(e,n,r){return!a&&(r||n!==l)||((t=n).nodeType?c(e,n,r):p(e,n,r))}];for(;o>u;u++)if(n=i.relative[e[u].type])f=[ht(gt(f),n)];else{if(n=i.filter[e[u].type].apply(null,e[u].matches),n[x]){for(r=++u;o>r;r++)if(i.relative[e[r].type])break;return yt(u>1&>(f),u>1&&dt(e.slice(0,u-1)).replace(W,"$1"),n,r>u&&vt(e.slice(u,r)),o>r&&vt(e=e.slice(r)),o>r&&dt(e))}f.push(n)}return gt(f)}function bt(e,t){var n=0,o=t.length>0,a=e.length>0,s=function(s,u,c,f,d){var h,g,m,y=[],v=0,b="0",x=s&&[],w=null!=d,T=l,C=s||a&&i.find.TAG("*",d&&u.parentNode||u),k=N+=null==T?1:Math.random()||.1;for(w&&(l=u!==p&&u,r=n);null!=(h=C[b]);b++){if(a&&h){g=0;while(m=e[g++])if(m(h,u,c)){f.push(h);break}w&&(N=k,r=++n)}o&&((h=!m&&h)&&v--,s&&x.push(h))}if(v+=b,o&&b!==v){g=0;while(m=t[g++])m(x,y,u,c);if(s){if(v>0)while(b--)x[b]||y[b]||(y[b]=L.call(f));y=mt(y)}H.apply(f,y),w&&!s&&y.length>0&&v+t.length>1&&st.uniqueSort(f)}return w&&(N=k,l=T),x};return o?ot(s):s}s=st.compile=function(e,t){var n,r=[],i=[],o=S[e+" "];if(!o){t||(t=ft(e)),n=t.length;while(n--)o=vt(t[n]),o[x]?r.push(o):i.push(o);o=S(e,bt(i,r))}return o};function xt(e,t,n){var r=0,i=t.length;for(;i>r;r++)st(e,t[r],n);return n}function wt(e,t,n,r){var o,a,u,l,c,p=ft(e);if(!r&&1===p.length){if(a=p[0]=p[0].slice(0),a.length>2&&"ID"===(u=a[0]).type&&9===t.nodeType&&!d&&i.relative[a[1].type]){if(t=i.find.ID(u.matches[0].replace(et,tt),t)[0],!t)return n;e=e.slice(a.shift().value.length)}o=U.needsContext.test(e)?0:a.length;while(o--){if(u=a[o],i.relative[l=u.type])break;if((c=i.find[l])&&(r=c(u.matches[0].replace(et,tt),V.test(a[0].type)&&t.parentNode||t))){if(a.splice(o,1),e=r.length&&dt(a),!e)return H.apply(n,q.call(r,0)),n;break}}}return s(e,p)(r,t,d,n,V.test(e)),n}i.pseudos.nth=i.pseudos.eq;function Tt(){}i.filters=Tt.prototype=i.pseudos,i.setFilters=new Tt,c(),st.attr=b.attr,b.find=st,b.expr=st.selectors,b.expr[":"]=b.expr.pseudos,b.unique=st.uniqueSort,b.text=st.getText,b.isXMLDoc=st.isXML,b.contains=st.contains}(e);var at=/Until$/,st=/^(?:parents|prev(?:Until|All))/,ut=/^.[^:#\[\.,]*$/,lt=b.expr.match.needsContext,ct={children:!0,contents:!0,next:!0,prev:!0};b.fn.extend({find:function(e){var t,n,r,i=this.length;if("string"!=typeof e)return r=this,this.pushStack(b(e).filter(function(){for(t=0;i>t;t++)if(b.contains(r[t],this))return!0}));for(n=[],t=0;i>t;t++)b.find(e,this[t],n);return n=this.pushStack(i>1?b.unique(n):n),n.selector=(this.selector?this.selector+" ":"")+e,n},has:function(e){var t,n=b(e,this),r=n.length;return this.filter(function(){for(t=0;r>t;t++)if(b.contains(this,n[t]))return!0})},not:function(e){return this.pushStack(ft(this,e,!1))},filter:function(e){return this.pushStack(ft(this,e,!0))},is:function(e){return!!e&&("string"==typeof e?lt.test(e)?b(e,this.context).index(this[0])>=0:b.filter(e,this).length>0:this.filter(e).length>0)},closest:function(e,t){var n,r=0,i=this.length,o=[],a=lt.test(e)||"string"!=typeof e?b(e,t||this.context):0;for(;i>r;r++){n=this[r];while(n&&n.ownerDocument&&n!==t&&11!==n.nodeType){if(a?a.index(n)>-1:b.find.matchesSelector(n,e)){o.push(n);break}n=n.parentNode}}return this.pushStack(o.length>1?b.unique(o):o)},index:function(e){return e?"string"==typeof e?b.inArray(this[0],b(e)):b.inArray(e.jquery?e[0]:e,this):this[0]&&this[0].parentNode?this.first().prevAll().length:-1},add:function(e,t){var n="string"==typeof e?b(e,t):b.makeArray(e&&e.nodeType?[e]:e),r=b.merge(this.get(),n);return this.pushStack(b.unique(r))},addBack:function(e){return this.add(null==e?this.prevObject:this.prevObject.filter(e))}}),b.fn.andSelf=b.fn.addBack;function pt(e,t){do e=e[t];while(e&&1!==e.nodeType);return e}b.each({parent:function(e){var t=e.parentNode;return t&&11!==t.nodeType?t:null},parents:function(e){return b.dir(e,"parentNode")},parentsUntil:function(e,t,n){return b.dir(e,"parentNode",n)},next:function(e){return pt(e,"nextSibling")},prev:function(e){return pt(e,"previousSibling")},nextAll:function(e){return b.dir(e,"nextSibling")},prevAll:function(e){return b.dir(e,"previousSibling")},nextUntil:function(e,t,n){return b.dir(e,"nextSibling",n)},prevUntil:function(e,t,n){return b.dir(e,"previousSibling",n)},siblings:function(e){return b.sibling((e.parentNode||{}).firstChild,e)},children:function(e){return b.sibling(e.firstChild)},contents:function(e){return b.nodeName(e,"iframe")?e.contentDocument||e.contentWindow.document:b.merge([],e.childNodes)}},function(e,t){b.fn[e]=function(n,r){var i=b.map(this,t,n);return at.test(e)||(r=n),r&&"string"==typeof r&&(i=b.filter(r,i)),i=this.length>1&&!ct[e]?b.unique(i):i,this.length>1&&st.test(e)&&(i=i.reverse()),this.pushStack(i)}}),b.extend({filter:function(e,t,n){return n&&(e=":not("+e+")"),1===t.length?b.find.matchesSelector(t[0],e)?[t[0]]:[]:b.find.matches(e,t)},dir:function(e,n,r){var i=[],o=e[n];while(o&&9!==o.nodeType&&(r===t||1!==o.nodeType||!b(o).is(r)))1===o.nodeType&&i.push(o),o=o[n];return i},sibling:function(e,t){var n=[];for(;e;e=e.nextSibling)1===e.nodeType&&e!==t&&n.push(e);return n}});function ft(e,t,n){if(t=t||0,b.isFunction(t))return b.grep(e,function(e,r){var i=!!t.call(e,r,e);return i===n});if(t.nodeType)return b.grep(e,function(e){return e===t===n});if("string"==typeof t){var r=b.grep(e,function(e){return 1===e.nodeType});if(ut.test(t))return b.filter(t,r,!n);t=b.filter(t,r)}return b.grep(e,function(e){return b.inArray(e,t)>=0===n})}function dt(e){var t=ht.split("|"),n=e.createDocumentFragment();if(n.createElement)while(t.length)n.createElement(t.pop());return n}var ht="abbr|article|aside|audio|bdi|canvas|data|datalist|details|figcaption|figure|footer|header|hgroup|mark|meter|nav|output|progress|section|summary|time|video",gt=/ jQuery\d+="(?:null|\d+)"/g,mt=RegExp("<(?:"+ht+")[\\s/>]","i"),yt=/^\s+/,vt=/<(?!area|br|col|embed|hr|img|input|link|meta|param)(([\w:]+)[^>]*)\/>/gi,bt=/<([\w:]+)/,xt=/\s*$/g,At={option:[1,""],legend:[1,"
    ","
    "],area:[1,"",""],param:[1,"",""],thead:[1,"","
    "],tr:[2,"","
    "],col:[2,"","
    "],td:[3,"","
    "],_default:b.support.htmlSerialize?[0,"",""]:[1,"X
    ","
    "]},jt=dt(o),Dt=jt.appendChild(o.createElement("div"));At.optgroup=At.option,At.tbody=At.tfoot=At.colgroup=At.caption=At.thead,At.th=At.td,b.fn.extend({text:function(e){return b.access(this,function(e){return e===t?b.text(this):this.empty().append((this[0]&&this[0].ownerDocument||o).createTextNode(e))},null,e,arguments.length)},wrapAll:function(e){if(b.isFunction(e))return this.each(function(t){b(this).wrapAll(e.call(this,t))});if(this[0]){var t=b(e,this[0].ownerDocument).eq(0).clone(!0);this[0].parentNode&&t.insertBefore(this[0]),t.map(function(){var e=this;while(e.firstChild&&1===e.firstChild.nodeType)e=e.firstChild;return e}).append(this)}return this},wrapInner:function(e){return b.isFunction(e)?this.each(function(t){b(this).wrapInner(e.call(this,t))}):this.each(function(){var t=b(this),n=t.contents();n.length?n.wrapAll(e):t.append(e)})},wrap:function(e){var t=b.isFunction(e);return this.each(function(n){b(this).wrapAll(t?e.call(this,n):e)})},unwrap:function(){return this.parent().each(function(){b.nodeName(this,"body")||b(this).replaceWith(this.childNodes)}).end()},append:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.appendChild(e)})},prepend:function(){return this.domManip(arguments,!0,function(e){(1===this.nodeType||11===this.nodeType||9===this.nodeType)&&this.insertBefore(e,this.firstChild)})},before:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this)})},after:function(){return this.domManip(arguments,!1,function(e){this.parentNode&&this.parentNode.insertBefore(e,this.nextSibling)})},remove:function(e,t){var n,r=0;for(;null!=(n=this[r]);r++)(!e||b.filter(e,[n]).length>0)&&(t||1!==n.nodeType||b.cleanData(Ot(n)),n.parentNode&&(t&&b.contains(n.ownerDocument,n)&&Mt(Ot(n,"script")),n.parentNode.removeChild(n)));return this},empty:function(){var e,t=0;for(;null!=(e=this[t]);t++){1===e.nodeType&&b.cleanData(Ot(e,!1));while(e.firstChild)e.removeChild(e.firstChild);e.options&&b.nodeName(e,"select")&&(e.options.length=0)}return this},clone:function(e,t){return e=null==e?!1:e,t=null==t?e:t,this.map(function(){return b.clone(this,e,t)})},html:function(e){return b.access(this,function(e){var n=this[0]||{},r=0,i=this.length;if(e===t)return 1===n.nodeType?n.innerHTML.replace(gt,""):t;if(!("string"!=typeof e||Tt.test(e)||!b.support.htmlSerialize&&mt.test(e)||!b.support.leadingWhitespace&&yt.test(e)||At[(bt.exec(e)||["",""])[1].toLowerCase()])){e=e.replace(vt,"<$1>");try{for(;i>r;r++)n=this[r]||{},1===n.nodeType&&(b.cleanData(Ot(n,!1)),n.innerHTML=e);n=0}catch(o){}}n&&this.empty().append(e)},null,e,arguments.length)},replaceWith:function(e){var t=b.isFunction(e);return t||"string"==typeof e||(e=b(e).not(this).detach()),this.domManip([e],!0,function(e){var t=this.nextSibling,n=this.parentNode;n&&(b(this).remove(),n.insertBefore(e,t))})},detach:function(e){return this.remove(e,!0)},domManip:function(e,n,r){e=f.apply([],e);var i,o,a,s,u,l,c=0,p=this.length,d=this,h=p-1,g=e[0],m=b.isFunction(g);if(m||!(1>=p||"string"!=typeof g||b.support.checkClone)&&Ct.test(g))return this.each(function(i){var o=d.eq(i);m&&(e[0]=g.call(this,i,n?o.html():t)),o.domManip(e,n,r)});if(p&&(l=b.buildFragment(e,this[0].ownerDocument,!1,this),i=l.firstChild,1===l.childNodes.length&&(l=i),i)){for(n=n&&b.nodeName(i,"tr"),s=b.map(Ot(l,"script"),Ht),a=s.length;p>c;c++)o=l,c!==h&&(o=b.clone(o,!0,!0),a&&b.merge(s,Ot(o,"script"))),r.call(n&&b.nodeName(this[c],"table")?Lt(this[c],"tbody"):this[c],o,c);if(a)for(u=s[s.length-1].ownerDocument,b.map(s,qt),c=0;a>c;c++)o=s[c],kt.test(o.type||"")&&!b._data(o,"globalEval")&&b.contains(u,o)&&(o.src?b.ajax({url:o.src,type:"GET",dataType:"script",async:!1,global:!1,"throws":!0}):b.globalEval((o.text||o.textContent||o.innerHTML||"").replace(St,"")));l=i=null}return this}});function Lt(e,t){return e.getElementsByTagName(t)[0]||e.appendChild(e.ownerDocument.createElement(t))}function Ht(e){var t=e.getAttributeNode("type");return e.type=(t&&t.specified)+"/"+e.type,e}function qt(e){var t=Et.exec(e.type);return t?e.type=t[1]:e.removeAttribute("type"),e}function Mt(e,t){var n,r=0;for(;null!=(n=e[r]);r++)b._data(n,"globalEval",!t||b._data(t[r],"globalEval"))}function _t(e,t){if(1===t.nodeType&&b.hasData(e)){var n,r,i,o=b._data(e),a=b._data(t,o),s=o.events;if(s){delete a.handle,a.events={};for(n in s)for(r=0,i=s[n].length;i>r;r++)b.event.add(t,n,s[n][r])}a.data&&(a.data=b.extend({},a.data))}}function Ft(e,t){var n,r,i;if(1===t.nodeType){if(n=t.nodeName.toLowerCase(),!b.support.noCloneEvent&&t[b.expando]){i=b._data(t);for(r in i.events)b.removeEvent(t,r,i.handle);t.removeAttribute(b.expando)}"script"===n&&t.text!==e.text?(Ht(t).text=e.text,qt(t)):"object"===n?(t.parentNode&&(t.outerHTML=e.outerHTML),b.support.html5Clone&&e.innerHTML&&!b.trim(t.innerHTML)&&(t.innerHTML=e.innerHTML)):"input"===n&&Nt.test(e.type)?(t.defaultChecked=t.checked=e.checked,t.value!==e.value&&(t.value=e.value)):"option"===n?t.defaultSelected=t.selected=e.defaultSelected:("input"===n||"textarea"===n)&&(t.defaultValue=e.defaultValue)}}b.each({appendTo:"append",prependTo:"prepend",insertBefore:"before",insertAfter:"after",replaceAll:"replaceWith"},function(e,t){b.fn[e]=function(e){var n,r=0,i=[],o=b(e),a=o.length-1;for(;a>=r;r++)n=r===a?this:this.clone(!0),b(o[r])[t](n),d.apply(i,n.get());return this.pushStack(i)}});function Ot(e,n){var r,o,a=0,s=typeof e.getElementsByTagName!==i?e.getElementsByTagName(n||"*"):typeof e.querySelectorAll!==i?e.querySelectorAll(n||"*"):t;if(!s)for(s=[],r=e.childNodes||e;null!=(o=r[a]);a++)!n||b.nodeName(o,n)?s.push(o):b.merge(s,Ot(o,n));return n===t||n&&b.nodeName(e,n)?b.merge([e],s):s}function Bt(e){Nt.test(e.type)&&(e.defaultChecked=e.checked)}b.extend({clone:function(e,t,n){var r,i,o,a,s,u=b.contains(e.ownerDocument,e);if(b.support.html5Clone||b.isXMLDoc(e)||!mt.test("<"+e.nodeName+">")?o=e.cloneNode(!0):(Dt.innerHTML=e.outerHTML,Dt.removeChild(o=Dt.firstChild)),!(b.support.noCloneEvent&&b.support.noCloneChecked||1!==e.nodeType&&11!==e.nodeType||b.isXMLDoc(e)))for(r=Ot(o),s=Ot(e),a=0;null!=(i=s[a]);++a)r[a]&&Ft(i,r[a]);if(t)if(n)for(s=s||Ot(e),r=r||Ot(o),a=0;null!=(i=s[a]);a++)_t(i,r[a]);else _t(e,o);return r=Ot(o,"script"),r.length>0&&Mt(r,!u&&Ot(e,"script")),r=s=i=null,o},buildFragment:function(e,t,n,r){var i,o,a,s,u,l,c,p=e.length,f=dt(t),d=[],h=0;for(;p>h;h++)if(o=e[h],o||0===o)if("object"===b.type(o))b.merge(d,o.nodeType?[o]:o);else if(wt.test(o)){s=s||f.appendChild(t.createElement("div")),u=(bt.exec(o)||["",""])[1].toLowerCase(),c=At[u]||At._default,s.innerHTML=c[1]+o.replace(vt,"<$1>")+c[2],i=c[0];while(i--)s=s.lastChild;if(!b.support.leadingWhitespace&&yt.test(o)&&d.push(t.createTextNode(yt.exec(o)[0])),!b.support.tbody){o="table"!==u||xt.test(o)?""!==c[1]||xt.test(o)?0:s:s.firstChild,i=o&&o.childNodes.length;while(i--)b.nodeName(l=o.childNodes[i],"tbody")&&!l.childNodes.length&&o.removeChild(l) -}b.merge(d,s.childNodes),s.textContent="";while(s.firstChild)s.removeChild(s.firstChild);s=f.lastChild}else d.push(t.createTextNode(o));s&&f.removeChild(s),b.support.appendChecked||b.grep(Ot(d,"input"),Bt),h=0;while(o=d[h++])if((!r||-1===b.inArray(o,r))&&(a=b.contains(o.ownerDocument,o),s=Ot(f.appendChild(o),"script"),a&&Mt(s),n)){i=0;while(o=s[i++])kt.test(o.type||"")&&n.push(o)}return s=null,f},cleanData:function(e,t){var n,r,o,a,s=0,u=b.expando,l=b.cache,p=b.support.deleteExpando,f=b.event.special;for(;null!=(n=e[s]);s++)if((t||b.acceptData(n))&&(o=n[u],a=o&&l[o])){if(a.events)for(r in a.events)f[r]?b.event.remove(n,r):b.removeEvent(n,r,a.handle);l[o]&&(delete l[o],p?delete n[u]:typeof n.removeAttribute!==i?n.removeAttribute(u):n[u]=null,c.push(o))}}});var Pt,Rt,Wt,$t=/alpha\([^)]*\)/i,It=/opacity\s*=\s*([^)]*)/,zt=/^(top|right|bottom|left)$/,Xt=/^(none|table(?!-c[ea]).+)/,Ut=/^margin/,Vt=RegExp("^("+x+")(.*)$","i"),Yt=RegExp("^("+x+")(?!px)[a-z%]+$","i"),Jt=RegExp("^([+-])=("+x+")","i"),Gt={BODY:"block"},Qt={position:"absolute",visibility:"hidden",display:"block"},Kt={letterSpacing:0,fontWeight:400},Zt=["Top","Right","Bottom","Left"],en=["Webkit","O","Moz","ms"];function tn(e,t){if(t in e)return t;var n=t.charAt(0).toUpperCase()+t.slice(1),r=t,i=en.length;while(i--)if(t=en[i]+n,t in e)return t;return r}function nn(e,t){return e=t||e,"none"===b.css(e,"display")||!b.contains(e.ownerDocument,e)}function rn(e,t){var n,r,i,o=[],a=0,s=e.length;for(;s>a;a++)r=e[a],r.style&&(o[a]=b._data(r,"olddisplay"),n=r.style.display,t?(o[a]||"none"!==n||(r.style.display=""),""===r.style.display&&nn(r)&&(o[a]=b._data(r,"olddisplay",un(r.nodeName)))):o[a]||(i=nn(r),(n&&"none"!==n||!i)&&b._data(r,"olddisplay",i?n:b.css(r,"display"))));for(a=0;s>a;a++)r=e[a],r.style&&(t&&"none"!==r.style.display&&""!==r.style.display||(r.style.display=t?o[a]||"":"none"));return e}b.fn.extend({css:function(e,n){return b.access(this,function(e,n,r){var i,o,a={},s=0;if(b.isArray(n)){for(o=Rt(e),i=n.length;i>s;s++)a[n[s]]=b.css(e,n[s],!1,o);return a}return r!==t?b.style(e,n,r):b.css(e,n)},e,n,arguments.length>1)},show:function(){return rn(this,!0)},hide:function(){return rn(this)},toggle:function(e){var t="boolean"==typeof e;return this.each(function(){(t?e:nn(this))?b(this).show():b(this).hide()})}}),b.extend({cssHooks:{opacity:{get:function(e,t){if(t){var n=Wt(e,"opacity");return""===n?"1":n}}}},cssNumber:{columnCount:!0,fillOpacity:!0,fontWeight:!0,lineHeight:!0,opacity:!0,orphans:!0,widows:!0,zIndex:!0,zoom:!0},cssProps:{"float":b.support.cssFloat?"cssFloat":"styleFloat"},style:function(e,n,r,i){if(e&&3!==e.nodeType&&8!==e.nodeType&&e.style){var o,a,s,u=b.camelCase(n),l=e.style;if(n=b.cssProps[u]||(b.cssProps[u]=tn(l,u)),s=b.cssHooks[n]||b.cssHooks[u],r===t)return s&&"get"in s&&(o=s.get(e,!1,i))!==t?o:l[n];if(a=typeof r,"string"===a&&(o=Jt.exec(r))&&(r=(o[1]+1)*o[2]+parseFloat(b.css(e,n)),a="number"),!(null==r||"number"===a&&isNaN(r)||("number"!==a||b.cssNumber[u]||(r+="px"),b.support.clearCloneStyle||""!==r||0!==n.indexOf("background")||(l[n]="inherit"),s&&"set"in s&&(r=s.set(e,r,i))===t)))try{l[n]=r}catch(c){}}},css:function(e,n,r,i){var o,a,s,u=b.camelCase(n);return n=b.cssProps[u]||(b.cssProps[u]=tn(e.style,u)),s=b.cssHooks[n]||b.cssHooks[u],s&&"get"in s&&(a=s.get(e,!0,r)),a===t&&(a=Wt(e,n,i)),"normal"===a&&n in Kt&&(a=Kt[n]),""===r||r?(o=parseFloat(a),r===!0||b.isNumeric(o)?o||0:a):a},swap:function(e,t,n,r){var i,o,a={};for(o in t)a[o]=e.style[o],e.style[o]=t[o];i=n.apply(e,r||[]);for(o in t)e.style[o]=a[o];return i}}),e.getComputedStyle?(Rt=function(t){return e.getComputedStyle(t,null)},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s.getPropertyValue(n)||s[n]:t,l=e.style;return s&&(""!==u||b.contains(e.ownerDocument,e)||(u=b.style(e,n)),Yt.test(u)&&Ut.test(n)&&(i=l.width,o=l.minWidth,a=l.maxWidth,l.minWidth=l.maxWidth=l.width=u,u=s.width,l.width=i,l.minWidth=o,l.maxWidth=a)),u}):o.documentElement.currentStyle&&(Rt=function(e){return e.currentStyle},Wt=function(e,n,r){var i,o,a,s=r||Rt(e),u=s?s[n]:t,l=e.style;return null==u&&l&&l[n]&&(u=l[n]),Yt.test(u)&&!zt.test(n)&&(i=l.left,o=e.runtimeStyle,a=o&&o.left,a&&(o.left=e.currentStyle.left),l.left="fontSize"===n?"1em":u,u=l.pixelLeft+"px",l.left=i,a&&(o.left=a)),""===u?"auto":u});function on(e,t,n){var r=Vt.exec(t);return r?Math.max(0,r[1]-(n||0))+(r[2]||"px"):t}function an(e,t,n,r,i){var o=n===(r?"border":"content")?4:"width"===t?1:0,a=0;for(;4>o;o+=2)"margin"===n&&(a+=b.css(e,n+Zt[o],!0,i)),r?("content"===n&&(a-=b.css(e,"padding"+Zt[o],!0,i)),"margin"!==n&&(a-=b.css(e,"border"+Zt[o]+"Width",!0,i))):(a+=b.css(e,"padding"+Zt[o],!0,i),"padding"!==n&&(a+=b.css(e,"border"+Zt[o]+"Width",!0,i)));return a}function sn(e,t,n){var r=!0,i="width"===t?e.offsetWidth:e.offsetHeight,o=Rt(e),a=b.support.boxSizing&&"border-box"===b.css(e,"boxSizing",!1,o);if(0>=i||null==i){if(i=Wt(e,t,o),(0>i||null==i)&&(i=e.style[t]),Yt.test(i))return i;r=a&&(b.support.boxSizingReliable||i===e.style[t]),i=parseFloat(i)||0}return i+an(e,t,n||(a?"border":"content"),r,o)+"px"}function un(e){var t=o,n=Gt[e];return n||(n=ln(e,t),"none"!==n&&n||(Pt=(Pt||b("