diff --git a/.travis.yml b/.travis.yml
index ff55d410ae3..8fdc12d27c7 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -3,6 +3,7 @@ language: python
dist: xenial
python:
- "2.7"
+ - "3.6"
branches:
only:
@@ -10,7 +11,7 @@ branches:
services:
- docker
-
+
addons:
apt:
packages:
@@ -23,6 +24,7 @@ before_install:
install:
- "pip install demjson"
- "pip install -r requirements.txt"
+ - if [ "$TRAVIS_PYTHON_VERSION" = "3.6" ]; then pip install -r requirements3.txt; fi
env:
- MAKE_TARGET=test.syntax SHARD=0 SHARDS=1
diff --git a/openedx.yaml b/openedx.yaml
index f3f93b22192..4aa3eda9ef0 100644
--- a/openedx.yaml
+++ b/openedx.yaml
@@ -10,3 +10,4 @@ supporting_teams:
oeps:
oep-2: true
+ oep-7: true
diff --git a/playbooks/active_instances_in_asg.py b/playbooks/active_instances_in_asg.py
index 594b316dd81..c7492321ea6 100755
--- a/playbooks/active_instances_in_asg.py
+++ b/playbooks/active_instances_in_asg.py
@@ -20,6 +20,7 @@
"""
from __future__ import print_function
+from __future__ import absolute_import
import argparse
import botocore.session
import botocore.exceptions
diff --git a/playbooks/callback_plugins/task_timing.py b/playbooks/callback_plugins/task_timing.py
index 961c9d2f95b..6c414967a6e 100644
--- a/playbooks/callback_plugins/task_timing.py
+++ b/playbooks/callback_plugins/task_timing.py
@@ -1,5 +1,6 @@
# From https://github.com/ansible/ansible/issues/31527#issuecomment-335495855
from __future__ import (absolute_import, division, print_function)
+import six
__metaclass__ = type
import collections
@@ -193,8 +194,8 @@ def log_play(self, playbook_name, playbook_timestamp, results):
# Sort the tasks by their running time
sorted_results = sorted(
- results.items(),
- key=lambda (task, timestamp): timestamp.duration,
+ list(results.items()),
+ key=lambda task_timestamp: task_timestamp[1].duration,
reverse=True
)
@@ -275,7 +276,7 @@ def playbook_on_stats(self, stats):
# Flatten the stats so that multiple runs of the same task get listed
# individually.
flat_stats = {}
- for task_name, task_runs in self.stats.iteritems():
+ for task_name, task_runs in six.iteritems(self.stats):
if len(task_runs) == 1:
flat_stats[task_name] = task_runs[0]
else:
diff --git a/playbooks/ec2.py b/playbooks/ec2.py
index a5315f951b5..821a0c26745 100755
--- a/playbooks/ec2.py
+++ b/playbooks/ec2.py
@@ -108,6 +108,8 @@
######################################################################
+from __future__ import absolute_import
+from __future__ import print_function
import sys
import os
import argparse
@@ -117,8 +119,10 @@
from boto import ec2
from boto import rds
from boto import route53
-import ConfigParser
+import six.moves.configparser
import traceback
+import six
+from six.moves import range
try:
import json
@@ -157,7 +161,7 @@ def __init__(self):
data_to_print = self.get_inventory_from_cache()
else:
data_to_print = self.json_format_dict(self.inventory, True)
- print data_to_print
+ print(data_to_print)
def is_cache_valid(self):
@@ -181,7 +185,7 @@ def is_cache_valid(self):
def read_settings(self):
''' Reads the settings from the ec2.ini file '''
- config = ConfigParser.SafeConfigParser()
+ config = six.moves.configparser.SafeConfigParser()
config.read(self.args.inifile)
# is eucalyptus?
@@ -290,7 +294,7 @@ def get_instances_by_region(self, region):
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ print(("region name: %s likely not supported, or AWS is down. connection to region failed." % region))
sys.exit(1)
reservations = conn.get_all_instances()
@@ -301,8 +305,8 @@ def get_instances_by_region(self, region):
except boto.exception.BotoServerError as e:
if not self.eucalyptus:
- print "Looks like AWS is down again:"
- print e
+ print("Looks like AWS is down again:")
+ print(e)
sys.exit(1)
def get_rds_instances_by_region(self, region):
@@ -316,8 +320,8 @@ def get_rds_instances_by_region(self, region):
for instance in instances:
self.add_rds_instance(instance, region)
except boto.exception.BotoServerError as e:
- print "Looks like AWS RDS is down: "
- print e
+ print("Looks like AWS RDS is down: ")
+ print(e)
sys.exit(1)
def get_instance(self, region, instance_id):
@@ -330,7 +334,7 @@ def get_instance(self, region, instance_id):
# connect_to_region will fail "silently" by returning None if the region name is wrong or not supported
if conn is None:
- print("region name: %s likely not supported, or AWS is down. connection to region failed." % region)
+ print(("region name: %s likely not supported, or AWS is down. connection to region failed." % region))
sys.exit(1)
reservations = conn.get_all_instances([instance_id])
@@ -382,12 +386,12 @@ def add_instance(self, instance, region):
key = self.to_safe("security_group_" + group.name)
self.push(self.inventory, key, dest)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
+ print('Package boto seems a bit older.')
+ print('Please upgrade boto >= 2.3.0.')
sys.exit(1)
# Inventory: Group by tag keys
- for k, v in instance.tags.iteritems():
+ for k, v in six.iteritems(instance.tags):
key = self.to_safe("tag_" + k + "=" + v)
self.push(self.inventory, key, dest)
self.keep_first(self.inventory, 'first_in_' + key, dest)
@@ -439,8 +443,8 @@ def add_rds_instance(self, instance, region):
key = self.to_safe("security_group_" + instance.security_group.name)
self.push(self.inventory, key, dest)
except AttributeError:
- print 'Package boto seems a bit older.'
- print 'Please upgrade boto >= 2.3.0.'
+ print('Package boto seems a bit older.')
+ print('Please upgrade boto >= 2.3.0.')
sys.exit(1)
# Inventory: Group by engine
@@ -523,14 +527,14 @@ def get_host_info(self):
# Handle complex types
if type(value) in [int, bool]:
instance_vars[key] = value
- elif type(value) in [str, unicode]:
+ elif type(value) in [str, six.text_type]:
instance_vars[key] = value.strip()
elif type(value) == type(None):
instance_vars[key] = ''
elif key == 'ec2_region':
instance_vars[key] = value.name
elif key == 'ec2_tags':
- for k, v in value.iteritems():
+ for k, v in six.iteritems(value):
key = self.to_safe('ec2_tag_' + k)
instance_vars[key] = v
elif key == 'ec2_groups':
@@ -615,7 +619,7 @@ def json_format_dict(self, data, pretty=False):
# Run the script
RETRIES = 3
-for _ in xrange(RETRIES):
+for _ in range(RETRIES):
try:
Ec2Inventory()
break
diff --git a/playbooks/library/ec2_acl b/playbooks/library/ec2_acl
index d72cc0f1eef..901196f449f 100644
--- a/playbooks/library/ec2_acl
+++ b/playbooks/library/ec2_acl
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import absolute_import
+from __future__ import print_function
DOCUMENTATION = """
---
module: ec2_acl
@@ -54,7 +56,7 @@ import sys
try:
import boto.vpc
except ImportError:
- print "failed=True msg={0}".format(sys.executable)
+ print("failed=True msg={0}".format(sys.executable))
#print "failed=True msg='boto required for this module'"
sys.exit(1)
@@ -258,7 +260,7 @@ def main():
if region:
try:
connection = boto.vpc.connect_to_region(region, profile_name=profile)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
diff --git a/playbooks/library/ec2_group_local b/playbooks/library/ec2_group_local
index ec029593090..4a66c41377c 100644
--- a/playbooks/library/ec2_group_local
+++ b/playbooks/library/ec2_group_local
@@ -2,6 +2,10 @@
# -*- coding: utf-8 -*-
+from __future__ import absolute_import
+from __future__ import print_function
+import six
+from functools import reduce
DOCUMENTATION = '''
---
module: ec2_group
@@ -103,7 +107,7 @@ EXAMPLES = '''
try:
import boto.ec2
except ImportError:
- print "failed=True msg='boto required for this module'"
+ print("failed=True msg='boto required for this module'")
sys.exit(1)
@@ -239,7 +243,7 @@ def main():
'''found a match, delete it'''
try:
group.delete()
- except Exception, e:
+ except Exception as e:
module.fail_json(msg="Unable to delete security group '%s' - %s" % (group, e))
else:
group = None
@@ -318,7 +322,7 @@ def main():
changed = True
# Finally, remove anything left in the groupRules -- these will be defunct rules
- for rule in groupRules.itervalues():
+ for rule in six.itervalues(groupRules):
for grant in rule.grants:
grantGroup = None
if grant.group_id:
@@ -382,7 +386,7 @@ def main():
del groupRules[default_egress_rule]
# Finally, remove anything left in the groupRules -- these will be defunct rules
- for rule in groupRules.itervalues():
+ for rule in six.itervalues(groupRules):
for grant in rule.grants:
grantGroup = None
if grant.group_id:
diff --git a/playbooks/library/ec2_iam_role b/playbooks/library/ec2_iam_role
index cb339520247..490e7e48300 100644
--- a/playbooks/library/ec2_iam_role
+++ b/playbooks/library/ec2_iam_role
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import absolute_import
+from __future__ import print_function
DOCUMENTATION = """
---
module: ec2_iam_role
@@ -54,7 +56,7 @@ import sys
try:
import boto
except ImportError:
- print "failed=True msg='boto required for this module'"
+ print("failed=True msg='boto required for this module'")
sys.exit(1)
def present(connection, module):
@@ -151,7 +153,7 @@ def main():
try:
connection = boto.connect_iam(profile_name=profile)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
state = module.params.get('state')
diff --git a/playbooks/library/ec2_lookup b/playbooks/library/ec2_lookup
index f3a6edddab2..92c3161d351 100644
--- a/playbooks/library/ec2_lookup
+++ b/playbooks/library/ec2_lookup
@@ -14,6 +14,9 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import absolute_import
+from __future__ import print_function
+import six
DOCUMENTATION = '''
---
module: ec2_lookup
@@ -82,7 +85,7 @@ try:
import boto.ec2
from boto.ec2 import connect_to_region
except ImportError:
- print "failed=True msg='boto required for this module'"
+ print("failed=True msg='boto required for this module'")
sys.exit(1)
@@ -110,14 +113,14 @@ def main():
try:
ec2 = connect_to_region(region, aws_access_key_id=aws_access_key,
aws_secret_access_key=aws_secret_key)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
# If we specified an ec2_url then try connecting to it
elif ec2_url:
try:
ec2 = boto.connect_ec2_endpoint(ec2_url, aws_access_key,
aws_secret_key)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="Either region or ec2_url must be specified")
@@ -125,11 +128,11 @@ def main():
instances = []
instance_ids = []
for res in ec2.get_all_instances(filters={'tag:' + tag: value
- for tag, value in module.params.get('tags').iteritems()}):
+ for tag, value in six.iteritems(module.params.get('tags'))}):
for inst in res.instances:
if inst.state == "running":
- instances.append({k: v for k, v in inst.__dict__.iteritems()
- if isinstance(v, (basestring))})
+ instances.append({k: v for k, v in six.iteritems(inst.__dict__)
+ if isinstance(v, (six.string_types))})
instance_ids.append(inst.id)
module.exit_json(changed=False, instances=instances,
instance_ids=instance_ids)
diff --git a/playbooks/library/ec2_rt b/playbooks/library/ec2_rt
index 9bd22350181..138754d19be 100644
--- a/playbooks/library/ec2_rt
+++ b/playbooks/library/ec2_rt
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import absolute_import
+from __future__ import print_function
DOCUMENTATION = """
---
module: ec2_rt
@@ -53,7 +55,7 @@ import sys
try:
import boto.vpc
except ImportError:
- print "failed=True msg={0}".format(sys.executable)
+ print("failed=True msg={0}".format(sys.executable))
sys.exit(1)
@@ -211,7 +213,7 @@ def main():
if region:
try:
connection = boto.vpc.connect_to_region(region,profile_name=profile)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
diff --git a/playbooks/library/ec2_subnet b/playbooks/library/ec2_subnet
index 345ac31fa7b..254c91cfb87 100644
--- a/playbooks/library/ec2_subnet
+++ b/playbooks/library/ec2_subnet
@@ -14,6 +14,8 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import absolute_import
+from __future__ import print_function
DOCUMENTATION = """
---
module: ec2_subnet
@@ -57,7 +59,7 @@ import sys
try:
import boto.vpc
except ImportError:
- print "failed=True msg='boto required for this module'"
+ print("failed=True msg='boto required for this module'")
sys.exit(1)
from boto.exception import NoAuthHandlerFound
@@ -204,7 +206,7 @@ def main():
if region:
try:
connection = boto.vpc.connect_to_region(region, profile_name=profile)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg=str(e))
else:
module.fail_json(msg="region must be specified")
diff --git a/playbooks/library/ec2_tag_local b/playbooks/library/ec2_tag_local
index c634756cd93..f529a397f46 100644
--- a/playbooks/library/ec2_tag_local
+++ b/playbooks/library/ec2_tag_local
@@ -16,6 +16,8 @@
# edX: Edited to allow for variable tag names
+from __future__ import absolute_import
+from __future__ import print_function
DOCUMENTATION = '''
---
module: ec2_tag
@@ -94,7 +96,7 @@ import time
try:
import boto.ec2
except ImportError:
- print "failed=True msg='boto required for this module'"
+ print("failed=True msg='boto required for this module'")
sys.exit(1)
def main():
diff --git a/playbooks/library/ec2_vpc_local b/playbooks/library/ec2_vpc_local
index 73ebaba0721..8f457fec4eb 100644
--- a/playbooks/library/ec2_vpc_local
+++ b/playbooks/library/ec2_vpc_local
@@ -18,6 +18,7 @@
# https://github.com/ansible/ansible-modules-core/pull/1323
+from __future__ import absolute_import
DOCUMENTATION = '''
---
module: ec2_vpc
@@ -394,7 +395,7 @@ def create_vpc(module, vpc_conn):
pending = False
# sometimes vpc_conn.create_vpc() will return a vpc that can't be found yet by vpc_conn.get_all_vpcs()
# when that happens, just wait a bit longer and try again
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
if e.error_code != 'InvalidVpcID.NotFound':
raise
if pending:
@@ -403,7 +404,7 @@ def create_vpc(module, vpc_conn):
# waiting took too long
module.fail_json(msg = "wait for vpc availability timeout on %s" % time.asctime())
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
module.fail_json(msg = "%s: %s" % (e.error_code, e.error_message))
# Done with base VPC, now change to attributes and features.
@@ -457,7 +458,7 @@ def create_vpc(module, vpc_conn):
vpc_conn.create_tags(new_subnet.id, new_subnet_tags)
changed = True
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(msg='Unable to create subnet {0}, error: {1}'.format(subnet['cidr'], e))
# Now delete all absent subnets
@@ -470,7 +471,7 @@ def create_vpc(module, vpc_conn):
try:
vpc_conn.delete_subnet(csubnet.id)
changed = True
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(msg='Unable to delete subnet {0}, error: {1}'.format(csubnet.cidr_block, e))
# Handle Internet gateway (create/delete igw)
@@ -486,7 +487,7 @@ def create_vpc(module, vpc_conn):
vpc_conn.create_tags(igw.id, internet_gateway_tags)
vpc_conn.attach_internet_gateway(igw.id, vpc.id)
changed = True
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(msg='Unable to create Internet Gateway, error: {0}'.format(e))
else:
# Set igw variable to the current igw instance for use in route tables.
@@ -497,7 +498,7 @@ def create_vpc(module, vpc_conn):
vpc_conn.detach_internet_gateway(igws[0].id, vpc.id)
vpc_conn.delete_internet_gateway(igws[0].id)
changed = True
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(msg='Unable to delete Internet Gateway, error: {0}'.format(e))
# Handle route tables - this may be worth splitting into a
@@ -563,7 +564,7 @@ def create_vpc(module, vpc_conn):
all_route_tables.append(new_rt)
changed = True
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(
msg='Unable to create and associate route table {0}, error: ' \
'{1}'.format(rt, e)
@@ -592,7 +593,7 @@ def create_vpc(module, vpc_conn):
if not is_main:
vpc_conn.delete_route_table(rt.id)
changed = True
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(msg='Unable to delete old route table {0}, error: {1}'.format(rt.id, e))
vpc_dict = get_vpc_info(vpc)
@@ -670,7 +671,7 @@ def terminate_vpc(module, vpc_conn, vpc_id=None, cidr=None):
vpc_conn.delete_route_table(rt.id)
vpc_conn.delete_vpc(vpc.id)
- except EC2ResponseError, e:
+ except EC2ResponseError as e:
module.fail_json(
msg='Unable to delete VPC {0}, error: {1}'.format(vpc.id, e)
)
@@ -716,7 +717,7 @@ def main():
region,
**aws_connect_kwargs
)
- except boto.exception.NoAuthHandlerFound, e:
+ except boto.exception.NoAuthHandlerFound as e:
module.fail_json(msg = str(e))
else:
module.fail_json(msg="region must be specified")
diff --git a/playbooks/library/mongodb_replica_set b/playbooks/library/mongodb_replica_set
index 462c77f4459..8026d24ddfe 100644
--- a/playbooks/library/mongodb_replica_set
+++ b/playbooks/library/mongodb_replica_set
@@ -1,5 +1,10 @@
#!/usr/bin/env python
+from __future__ import absolute_import
+from six.moves import filter
+from six.moves import map
+from six.moves import range
+from six.moves import zip
DOCUMENTATION = """
---
module: mongodb_replica_set
@@ -92,7 +97,7 @@ else:
pymongo_found = True
import json, copy
-from urllib import quote_plus
+from six.moves.urllib.parse import quote_plus
from operator import itemgetter
########### Mongo API calls ###########
@@ -282,7 +287,7 @@ def update_replset(rs_config):
def get_mongo_uri(host, port, username, password, auth_database):
mongo_uri = 'mongodb://'
if username and password:
- mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password]))
+ mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password])))
mongo_uri += "{}:{}".format(quote_plus(host), port)
@@ -308,7 +313,7 @@ def primary_client(some_host, some_port, username, password, auth_database):
status = client.admin.command("replSetGetStatus")
# Find out who the primary is.
- rs_primary = filter(lambda member: member['stateStr']=='PRIMARY', status['members'])[0]
+ rs_primary = [member for member in status['members'] if member['stateStr']=='PRIMARY'][0]
primary_host, primary_port = rs_primary['name'].split(':')
# Connect to the primary if this is not the primary.
diff --git a/playbooks/library/mongodb_rs_config b/playbooks/library/mongodb_rs_config
index 7d6b29a3264..02c9ed1fb22 100644
--- a/playbooks/library/mongodb_rs_config
+++ b/playbooks/library/mongodb_rs_config
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import absolute_import
+from six.moves import map
DOCUMENTATION = """
---
module: mongodb_rs_config
@@ -59,7 +61,7 @@ else:
pymongo_found = True
import json
-from urllib import quote_plus
+from six.moves.urllib.parse import quote_plus
def main():
@@ -87,7 +89,7 @@ def main():
module.fail_json(msg="Must provide both username and password or neither.")
if username:
- mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password]))
+ mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password])))
mongo_uri += "{}:{}".format(quote_plus(host),port)
diff --git a/playbooks/library/mongodb_rs_status b/playbooks/library/mongodb_rs_status
index 4dbd3b9f08c..45e39437620 100644
--- a/playbooks/library/mongodb_rs_status
+++ b/playbooks/library/mongodb_rs_status
@@ -1,5 +1,7 @@
#!/usr/bin/env python
+from __future__ import absolute_import
+from six.moves import map
DOCUMENTATION = """
---
module: mongodb_rs_status
@@ -67,7 +69,7 @@ else:
pymongo_found = True
import json
-from urllib import quote_plus
+from six.moves.urllib.parse import quote_plus
def main():
@@ -95,7 +97,7 @@ def main():
module.fail_json(msg="Must provide both username and password or neither.")
if username:
- mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password]))
+ mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password])))
mongo_uri += "{}:{}".format(quote_plus(host),port)
diff --git a/playbooks/library/mongodb_step_down b/playbooks/library/mongodb_step_down
index dda30b399fa..a531801b192 100644
--- a/playbooks/library/mongodb_step_down
+++ b/playbooks/library/mongodb_step_down
@@ -1,5 +1,8 @@
#!/usr/bin/env python
+from __future__ import absolute_import
+from six.moves import map
+from six.moves import range
DOCUMENTATION = """
---
module: mongodb_step_down
@@ -58,7 +61,7 @@ else:
pymongo_found = True
import json
-from urllib import quote_plus
+from six.moves.urllib.parse import quote_plus
def main():
@@ -86,7 +89,7 @@ def main():
module.fail_json(msg="Must provide both username and password or neither.")
if username:
- mongo_uri += "{}:{}@".format(*map(quote_plus, [username,password]))
+ mongo_uri += "{}:{}@".format(*list(map(quote_plus, [username,password])))
mongo_uri += "{}:{}".format(quote_plus(host),port)
diff --git a/playbooks/library/rds_local b/playbooks/library/rds_local
index 19f0bbe58d6..e9f58276efc 100644
--- a/playbooks/library/rds_local
+++ b/playbooks/library/rds_local
@@ -14,6 +14,7 @@
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see .
+from __future__ import absolute_import
DOCUMENTATION = '''
---
module: rds
@@ -334,19 +335,19 @@ class RDSConnection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds, region, **aws_connect_params)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
try:
return RDSDBInstance(self.connection.get_all_dbinstances(instancename)[0])
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
return None
def get_db_snapshot(self, snapshotid):
try:
return RDSSnapshot(self.connection.get_all_dbsnapshots(snapshot_id=snapshotid)[0])
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
@@ -356,63 +357,63 @@ class RDSConnection:
result = self.connection.create_dbinstance(instance_name, size, instance_class,
username, password, **params)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.createdb_instance_read_replica(instance_name, source_instance, **params)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_dbinstance(instance_name, **params)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_dbsnapshot(snapshot)
return RDSSnapshot(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_dbinstance(instance_name, **params)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_dbinstance(instance_name)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_dbinstance_from_dbsnapshot(snapshot, instance_name, instance_type, **params)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_dbsnapshot(snapshot, instance_name)
return RDSSnapshot(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)
return RDSDBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
@@ -420,7 +421,7 @@ class RDS2Connection:
def __init__(self, module, region, **aws_connect_params):
try:
self.connection = connect_to_aws(boto.rds2, region, **aws_connect_params)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
module.fail_json(msg=e.error_message)
def get_db_instance(self, instancename):
@@ -428,9 +429,9 @@ class RDS2Connection:
dbinstances = self.connection.describe_db_instances(db_instance_identifier=instancename)['DescribeDBInstancesResponse']['DescribeDBInstancesResult']['DBInstances']
result = RDS2DBInstance(dbinstances[0])
return result
- except boto.rds2.exceptions.DBInstanceNotFound, e:
+ except boto.rds2.exceptions.DBInstanceNotFound as e:
return None
- except Exception, e:
+ except Exception as e:
raise e
def get_db_snapshot(self, snapshotid):
@@ -438,7 +439,7 @@ class RDS2Connection:
snapshots = self.connection.describe_db_snapshots(db_snapshot_identifier=snapshotid, snapshot_type='manual')['DescribeDBSnapshotsResponse']['DescribeDBSnapshotsResult']['DBSnapshots']
result = RDS2Snapshot(snapshots[0])
return result
- except boto.rds2.exceptions.DBSnapshotNotFound, e:
+ except boto.rds2.exceptions.DBSnapshotNotFound as e:
return None
def create_db_instance(self, instance_name, size, instance_class, db_engine,
@@ -447,63 +448,63 @@ class RDS2Connection:
result = self.connection.create_db_instance(instance_name, size, instance_class,
db_engine, username, password, **params)['CreateDBInstanceResponse']['CreateDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_instance_read_replica(self, instance_name, source_instance, **params):
try:
result = self.connection.create_db_instance_read_replica(instance_name, source_instance, **params)['CreateDBInstanceReadReplicaResponse']['CreateDBInstanceReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_instance(self, instance_name, **params):
try:
result = self.connection.delete_db_instance(instance_name, **params)['DeleteDBInstanceResponse']['DeleteDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def delete_db_snapshot(self, snapshot):
try:
result = self.connection.delete_db_snapshot(snapshot)['DeleteDBSnapshotResponse']['DeleteDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def modify_db_instance(self, instance_name, **params):
try:
result = self.connection.modify_db_instance(instance_name, **params)['ModifyDBInstanceResponse']['ModifyDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def reboot_db_instance(self, instance_name, **params):
try:
result = self.connection.reboot_db_instance(instance_name, **params)['RebootDBInstanceResponse']['RebootDBInstanceResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def restore_db_instance_from_db_snapshot(self, instance_name, snapshot, instance_type, **params):
try:
result = self.connection.restore_db_instance_from_db_snapshot(instance_name, snapshot, **params)['RestoreDBInstanceFromDBSnapshotResponse']['RestoreDBInstanceFromDBSnapshotResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def create_db_snapshot(self, snapshot, instance_name, **params):
try:
result = self.connection.create_db_snapshot(snapshot, instance_name, **params)['CreateDBSnapshotResponse']['CreateDBSnapshotResult']['DBSnapshot']
return RDS2Snapshot(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
def promote_read_replica(self, instance_name, **params):
try:
result = self.connection.promote_read_replica(instance_name, **params)['PromoteReadReplicaResponse']['PromoteReadReplicaResult']['DBInstance']
return RDS2DBInstance(result)
- except boto.exception.BotoServerError, e:
+ except boto.exception.BotoServerError as e:
raise RDSException(e)
@@ -544,7 +545,7 @@ class RDSDBInstance:
# ReadReplicaSourceDBInstanceIdentifier may or may not exist
try:
d["replication_source"] = self.instance.ReadReplicaSourceDBInstanceIdentifier
- except Exception, e:
+ except Exception as e:
d["replication_source"] = None
return d
@@ -679,7 +680,7 @@ def create_db_instance(module, conn):
module.params.get('instance_type'), module.params.get('db_engine'),
module.params.get('username'), module.params.get('password'), **params)
changed = True
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg="Failed to create instance: %s" % e.message)
if module.params.get('wait'):
@@ -706,7 +707,7 @@ def replicate_db_instance(module, conn):
try:
result = conn.create_db_instance_read_replica(instance_name, source_instance, **params)
changed = True
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg="Failed to create replica instance: %s " % e.message)
if module.params.get('wait'):
@@ -745,7 +746,7 @@ def delete_db_instance_or_snapshot(module, conn):
result = conn.delete_db_instance(instance_name, **params)
else:
result = conn.delete_db_snapshot(snapshot)
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg="Failed to delete instance: %s" % e.message)
# If we're not waiting for a delete to complete then we're all done
@@ -755,12 +756,12 @@ def delete_db_instance_or_snapshot(module, conn):
try:
resource = await_resource(conn, result, 'deleted', module)
module.exit_json(changed=True)
- except RDSException, e:
+ except RDSException as e:
if e.code == 'DBInstanceNotFound':
module.exit_json(changed=True)
else:
module.fail_json(msg=e.message)
- except Exception, e:
+ except Exception as e:
module.fail_json(msg=str(e))
@@ -798,7 +799,7 @@ def modify_db_instance(module, conn):
try:
result = conn.modify_db_instance(instance_name, **params)
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg=e.message)
if params.get('apply_immediately'):
if new_instance_name:
@@ -836,7 +837,7 @@ def promote_db_instance(module, conn):
try:
result = conn.promote_read_replica(instance_name, **params)
changed = True
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg=e.message)
else:
changed = False
@@ -861,7 +862,7 @@ def snapshot_db_instance(module, conn):
try:
result = conn.create_db_snapshot(snapshot, instance_name, **params)
changed = True
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
@@ -886,7 +887,7 @@ def reboot_db_instance(module, conn):
try:
result = conn.reboot_db_instance(instance_name, **params)
changed = True
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
@@ -917,7 +918,7 @@ def restore_db_instance(module, conn):
try:
result = conn.restore_db_instance_from_db_snapshot(instance_name, snapshot, instance_type, **params)
changed = True
- except RDSException, e:
+ except RDSException as e:
module.fail_json(msg=e.message)
if module.params.get('wait'):
@@ -1005,7 +1006,7 @@ def validate_parameters(required_vars, valid_vars, module):
# Convert tags dict to list of tuples that rds2 expects
if 'tags' in params:
- params['tags'] = module.params['tags'].items()
+ params['tags'] = list(module.params['tags'].items())
return params
diff --git a/playbooks/library/util_map b/playbooks/library/util_map
index f674d6eaa9d..ef5b60d2178 100755
--- a/playbooks/library/util_map
+++ b/playbooks/library/util_map
@@ -1,5 +1,6 @@
#!/usr/bin/env python
+from __future__ import absolute_import
DOCUMENTATION = """
---
module: util_map
diff --git a/playbooks/lifecycle_inventory.py b/playbooks/lifecycle_inventory.py
index fa126eb129b..892ab723988 100755
--- a/playbooks/lifecycle_inventory.py
+++ b/playbooks/lifecycle_inventory.py
@@ -28,6 +28,8 @@
}
"""
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
import boto3
import json
@@ -93,7 +95,7 @@ def run(self):
inventory[group['AutoScalingGroupName'] + "_" + instance['LifecycleState'].replace(":","_")].append(private_ip_address)
inventory[instance['LifecycleState'].replace(":","_")].append(private_ip_address)
- print json.dumps(inventory, sort_keys=True, indent=2)
+ print(json.dumps(inventory, sort_keys=True, indent=2))
if __name__=="__main__":
diff --git a/playbooks/roles/alton/templates/boto.j2 b/playbooks/roles/alton/templates/boto.j2
index 28f21ce1b07..09f5008b8dc 100644
--- a/playbooks/roles/alton/templates/boto.j2
+++ b/playbooks/roles/alton/templates/boto.j2
@@ -1,4 +1,4 @@
-{% for deployment, creds in ALTON_AWS_CREDENTIALS.iteritems() %}
+{% for deployment, creds in ALTON_AWS_CREDENTIALS.items() %}
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
diff --git a/playbooks/roles/config-encoders/filter_plugins/config_encoders.py b/playbooks/roles/config-encoders/filter_plugins/config_encoders.py
index 767520c3dfa..91a43f8256b 100644
--- a/playbooks/roles/config-encoders/filter_plugins/config_encoders.py
+++ b/playbooks/roles/config-encoders/filter_plugins/config_encoders.py
@@ -25,6 +25,8 @@
from ansible import errors
from copy import copy
import re
+import six
+from six.moves import map
def _str_is_bool(data):
@@ -167,7 +169,7 @@ def encode_apache(
elif block_type == 'options':
for o in data:
- for key, val in sorted(o.iteritems()):
+ for key, val in sorted(six.iteritems(o)):
rv += "%s%s " % (indent * (level-1), key)
rv += encode_apache(
val,
@@ -195,7 +197,7 @@ def encode_apache(
else:
rv += str(data)
- elif isinstance(data, basestring):
+ elif isinstance(data, six.string_types):
# Value is a string
if (
quote_all_strings or
@@ -242,7 +244,7 @@ def encode_erlang(
rv += "\n"
- for key, val in sorted(data.iteritems()):
+ for key, val in sorted(six.iteritems(data)):
rv += "%s{%s," % (indent*level, key)
if not isinstance(val, dict):
@@ -266,7 +268,7 @@ def encode_erlang(
rv += str(data).lower()
- elif isinstance(data, basestring):
+ elif isinstance(data, six.string_types):
# It's a string
atom_len = len(atom_value_indicator)
@@ -287,7 +289,7 @@ def encode_erlang(
for val in data:
if (
- isinstance(val, basestring) or
+ isinstance(val, six.string_types) or
_is_num(val)):
rv += "\n%s" % (indent*level)
@@ -336,10 +338,10 @@ def encode_haproxy(data, indent=" "):
if isinstance(section, dict):
# It's a section
- rv += "%s\n" % section.keys()[0]
+ rv += "%s\n" % list(section.keys())[0]
# Process all parameters of the section
- for param in section.values()[0]:
+ for param in list(section.values())[0]:
rv += "%s%s\n" % (indent, param)
else:
# It's a comment of a parameter
@@ -358,7 +360,7 @@ def encode_ini(
rv = ""
# First process all standalone properties
- for prop, val in sorted(data.iteritems()):
+ for prop, val in sorted(six.iteritems(data)):
if ucase_prop:
prop = prop.upper()
@@ -375,7 +377,7 @@ def encode_ini(
prop, delimiter, quote, _escape(item, quote), quote)
# Then process all sections
- for section, props in sorted(data.iteritems()):
+ for section, props in sorted(six.iteritems(data)):
if isinstance(props, dict):
if rv != "":
rv += "\n"
@@ -411,7 +413,7 @@ def encode_json(
if len(data) > 0:
rv += "\n"
- items = sorted(data.iteritems())
+ items = sorted(six.iteritems(data))
for key, val in items:
rv += '%s"%s": ' % (indent * (level+1), key)
@@ -445,7 +447,7 @@ def encode_json(
rv += str(data).lower()
- elif isinstance(data, basestring):
+ elif isinstance(data, six.string_types):
# It's a string
rv += '"%s"' % _escape(_escape(data), format='control')
@@ -495,7 +497,7 @@ def encode_logstash(
if prevtype in ('value', 'value_hash', 'array'):
rv += "{\n"
- items = sorted(data.iteritems())
+ items = sorted(six.iteritems(data))
for key, val in items:
if key[0] == section_prefix:
@@ -511,11 +513,11 @@ def encode_logstash(
# Last item of the loop
if items[-1] == (key, val):
if (
- isinstance(val, basestring) or
+ isinstance(val, six.string_types) or
_is_num(val) or
isinstance(val, bool) or (
isinstance(val, dict) and
- val.keys()[0][0] != section_prefix)):
+ list(val.keys())[0][0] != section_prefix)):
rv += "\n%s}\n" % (indent * level)
else:
rv += "%s}\n" % (indent * level)
@@ -538,7 +540,7 @@ def encode_logstash(
if (
items[-1] != (key, val) and (
- isinstance(val, basestring) or
+ isinstance(val, six.string_types) or
_is_num(val) or
isinstance(val, bool))):
rv += "\n"
@@ -558,7 +560,7 @@ def encode_logstash(
rv += str(data).lower()
- elif isinstance(data, basestring):
+ elif isinstance(data, six.string_types):
# It's a string
rv += '"%s"' % _escape(data)
@@ -567,7 +569,7 @@ def encode_logstash(
# It's a list
for val in data:
- if isinstance(val, dict) and val.keys()[0][0] == section_prefix:
+ if isinstance(val, dict) and list(val.keys())[0][0] == section_prefix:
# Value is a block
rv += encode_logstash(
@@ -614,16 +616,16 @@ def encode_nginx(data, indent=" ", level=0, block_semicolon=False):
if item_type in ('section', 'line'):
rv += "\n"
- rv += "%s%s {\n" % (level*indent, item.keys()[0])
+ rv += "%s%s {\n" % (level*indent, list(item.keys())[0])
rv += encode_nginx(
- item.values()[0],
+ list(item.values())[0],
level=level+1,
block_semicolon=block_semicolon)
rv += "%s}%s\n" % (level*indent, ';' if block_semicolon else '')
item_type = 'section'
- elif isinstance(item, basestring):
+ elif isinstance(item, six.string_types):
# Normal line
if item_type == 'section':
rv += "\n"
@@ -654,7 +656,7 @@ def encode_pam(
# Remember previous type to make newline between type blocks
prev_type = None
- for label, rule in sorted(data.iteritems()):
+ for label, rule in sorted(six.iteritems(data)):
if separate_types:
# Add extra newline to separate blocks of the same type
if prev_type is not None and prev_type != rule['type']:
@@ -676,9 +678,7 @@ def encode_pam(
if isinstance(rule['control'], list):
rv += "[%s]%s" % (
" ".join(
- map(
- lambda k: "=".join(map(str, k)),
- map(lambda x: x.items()[0], rule['control']))),
+ ["=".join(map(str, k)) for k in [list(x.items())[0] for x in rule['control']]]),
separator)
else:
rv += "%s%s" % (rule['control'], separator)
@@ -693,7 +693,7 @@ def encode_pam(
rv += ' '
if isinstance(arg, dict):
- rv += "=".join(map(str, arg.items()[0]))
+ rv += "=".join(map(str, list(arg.items())[0]))
else:
rv += arg
@@ -714,9 +714,9 @@ def encode_toml(
# It's a dict
# First process all standalone strings, numbers, booleans and lists
- for key, val in sorted(data.iteritems()):
+ for key, val in sorted(six.iteritems(data)):
if (
- isinstance(val, basestring) or
+ isinstance(val, six.string_types) or
_is_num(val) or
isinstance(val, bool) or (
isinstance(val, list) and
@@ -737,7 +737,7 @@ def encode_toml(
first = False
# Then process all data structures
- for key, val in sorted(data.iteritems()):
+ for key, val in sorted(six.iteritems(data)):
if (
isinstance(val, dict) or
isinstance(val, list) and isinstance(val[0], dict)):
@@ -798,7 +798,7 @@ def encode_toml(
if prevtype != 'list':
rv += "\n"
- elif isinstance(data, basestring):
+ elif isinstance(data, six.string_types):
# It's a string
rv += "%s%s%s" % (
@@ -858,7 +858,7 @@ def encode_xml(
if (
not (
isinstance(item, dict) and
- item.keys()[0].startswith(attribute_sign))):
+ list(item.keys())[0].startswith(attribute_sign))):
rv += encode_xml(
item,
attribute_sign=attribute_sign,
@@ -868,7 +868,7 @@ def encode_xml(
elif isinstance(data, dict):
# It's eiher an attribute or an element
- key, val = data.items()[0]
+ key, val = list(data.items())[0]
if key.startswith(attribute_sign):
# Process attribute
@@ -884,7 +884,7 @@ def encode_xml(
for item in val:
if (
isinstance(item, dict) and
- item.keys()[0].startswith(attribute_sign)):
+ list(item.keys())[0].startswith(attribute_sign)):
num_attrs += 1
rv += encode_xml(
item,
@@ -907,7 +907,7 @@ def encode_xml(
for item in val:
if (
isinstance(item, dict) and
- not item.keys()[0].startswith(attribute_sign)):
+ not list(item.keys())[0].startswith(attribute_sign)):
val_not_text = True
break
elif isinstance(val, dict):
@@ -947,14 +947,14 @@ def encode_yaml(
if isinstance(data, dict):
# It's a dictionary
- if len(data.keys()) == 0:
+ if len(list(data.keys())) == 0:
rv += "{}\n"
else:
- for i, (key, val) in enumerate(sorted(data.iteritems())):
+ for i, (key, val) in enumerate(sorted(six.iteritems(data))):
# Skip indentation only for the first pair
rv += "%s%s:" % ("" if i == 0 and skip_indent else level*indent, key)
- if isinstance(val, dict) and len(val.keys()) == 0:
+ if isinstance(val, dict) and len(list(val.keys())) == 0:
rv += " {}\n"
else:
if (
@@ -1042,12 +1042,11 @@ def template_replace(data, replacement):
# Walk through the data structure and try to replace all special strings
if isinstance(local_data, list):
- local_data = map(
- lambda x: template_replace(x, replacement), local_data)
+ local_data = [template_replace(x, replacement) for x in local_data]
elif isinstance(local_data, dict):
- for key, val in local_data.iteritems():
+ for key, val in six.iteritems(local_data):
local_data[key] = template_replace(val, replacement)
- elif isinstance(local_data, basestring):
+ elif isinstance(local_data, six.string_types):
# Replace the special string by it's evaluated value
p = re.compile(r'\{\[\{\s*(\w+)([^}\s]+|)\s*\}\]\}')
local_data = p.sub(__eval_replace, local_data)
diff --git a/playbooks/roles/edx_service/templates/config.yml.j2 b/playbooks/roles/edx_service/templates/config.yml.j2
index 9bf761203cf..93d89041fd5 100644
--- a/playbooks/roles/edx_service/templates/config.yml.j2
+++ b/playbooks/roles/edx_service/templates/config.yml.j2
@@ -2,7 +2,7 @@
# {{ ansible_managed }}
{% if edx_service_config_filter_nones -%}
- {% for key, value in edx_service_config.copy().iteritems() -%}
+ {% for key, value in edx_service_config.copy().items() -%}
{% if value is none -%}
{% do edx_service_config.pop(key) %}
{%- endif %}
diff --git a/playbooks/roles/gh_mirror/files/repos_from_orgs.py b/playbooks/roles/gh_mirror/files/repos_from_orgs.py
index 2e6e060ea6f..35a41cf5dae 100644
--- a/playbooks/roles/gh_mirror/files/repos_from_orgs.py
+++ b/playbooks/roles/gh_mirror/files/repos_from_orgs.py
@@ -6,6 +6,8 @@
# a yaml file containing a list of
# github organizations
+from __future__ import absolute_import
+from __future__ import print_function
import yaml
import sys
import requests
@@ -51,7 +53,7 @@ def refresh_cache():
with open(join(path, 'orgs.yml')) as f:
orgs = yaml.safe_load(f)
except IOError:
- print "Unable to read {}/orgs.yml, does it exist?".format(path)
+ print("Unable to read {}/orgs.yml, does it exist?".format(path))
sys.exit(1)
repos = []
@@ -97,7 +99,7 @@ def update_repos():
else:
check_running()
if not args.datadir:
- print "Please specificy a repository directory"
+ print("Please specificy a repository directory")
sys.exit(1)
if not os.path.exists('/var/tmp/repos.json'):
refresh_cache()
diff --git a/playbooks/roles/hadoop_common/templates/core-site.xml.j2 b/playbooks/roles/hadoop_common/templates/core-site.xml.j2
index 40aa4cba9e1..8d97d12e430 100644
--- a/playbooks/roles/hadoop_common/templates/core-site.xml.j2
+++ b/playbooks/roles/hadoop_common/templates/core-site.xml.j2
@@ -3,7 +3,7 @@
-{% for key, value in HADOOP_CORE_SITE_DEFAULT_CONFIG.iteritems() %}
+{% for key, value in HADOOP_CORE_SITE_DEFAULT_CONFIG.items() %}
{{ key }}
{{ value }}
diff --git a/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2 b/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2
index f8e758ab8ba..895f7a57979 100644
--- a/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2
+++ b/playbooks/roles/hadoop_common/templates/hdfs-site.xml.j2
@@ -2,7 +2,7 @@
-{% for key, value in HDFS_SITE_DEFAULT_CONFIG.iteritems() %}
+{% for key, value in HDFS_SITE_DEFAULT_CONFIG.items() %}
{{ key }}
{{ value }}
diff --git a/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2 b/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2
index cb28b54a7bd..e791db6f4cf 100644
--- a/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2
+++ b/playbooks/roles/hadoop_common/templates/mapred-site.xml.j2
@@ -2,7 +2,7 @@
-{% for key, value in MAPRED_SITE_DEFAULT_CONFIG.iteritems() %}
+{% for key, value in MAPRED_SITE_DEFAULT_CONFIG.items() %}
{{ key }}
{{ value }}
diff --git a/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2 b/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2
index 01d96307ccb..ab4007619de 100644
--- a/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2
+++ b/playbooks/roles/hadoop_common/templates/yarn-site.xml.j2
@@ -1,7 +1,7 @@
{% do YARN_SITE_DEFAULT_CONFIG.update(yarn_site_config) %}
-{% for key, value in YARN_SITE_DEFAULT_CONFIG.iteritems() %}
+{% for key, value in YARN_SITE_DEFAULT_CONFIG.items() %}
{{ key }}
{{ value }}
diff --git a/playbooks/roles/hive/templates/hive-site.xml.j2 b/playbooks/roles/hive/templates/hive-site.xml.j2
index 7ab279a6969..26d5b9ed9a9 100644
--- a/playbooks/roles/hive/templates/hive-site.xml.j2
+++ b/playbooks/roles/hive/templates/hive-site.xml.j2
@@ -2,7 +2,7 @@
-{% for key, value in HIVE_SITE_DEFAULT_CONFIG.iteritems() %}
+{% for key, value in HIVE_SITE_DEFAULT_CONFIG.items() %}
{{ key }}
{{ value }}
diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2
index 2ebf8796e63..8700534b4dc 100644
--- a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2
+++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/aws_config.j2
@@ -1,4 +1,4 @@
-{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
+{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.items() %}
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
diff --git a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2 b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2
index 2ebf8796e63..8700534b4dc 100644
--- a/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2
+++ b/playbooks/roles/jenkins_admin/templates/edx/var/jenkins/boto.j2
@@ -1,4 +1,4 @@
-{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.iteritems() %}
+{% for deployment, creds in JENKINS_ADMIN_AWS_CREDENTIALS.items() %}
[profile {{ deployment }}]
aws_access_key_id = {{ creds.access_id }}
aws_secret_access_key = {{ creds.secret_key }}
diff --git a/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml b/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml
index 528d374c3ad..eddc5d0dacd 100644
--- a/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml
+++ b/playbooks/roles/jenkins_analytics/templates/jenkins.config.main.xml
@@ -10,7 +10,7 @@
{% else %}
- {% for permission_group, permissions in jenkins_auth_permissions.iteritems() %}
+ {% for permission_group, permissions in jenkins_auth_permissions.items() %}
{% for permission in permissions %}
{% for user in jenkins_auth_users[permission_group] | default([]) %}
{{ permission }}:{{ user }}
diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2
index c8d1f648feb..ff4d86e94db 100644
--- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2
+++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/cms.j2
@@ -28,21 +28,21 @@ upstream cms-backend {
server {
# CMS configuration file for nginx, templated by ansible
-
+
# Proxy to a remote maintanence page
{% if NGINX_EDXAPP_ENABLE_S3_MAINTENANCE %}
- # Do not include a 502 error in NGINX_ERROR_PAGES when
+ # Do not include a 502 error in NGINX_ERROR_PAGES when
# NGINX_EDXAPP_ENABLE_S3_MAINTENANCE is enabled.
error_page 502 @maintenance;
{% include "s3_maintenance.j2" %}
-
+
{% endif %}
# error pages
- {% for k, v in NGINX_EDXAPP_ERROR_PAGES.iteritems() %}
+ {% for k, v in NGINX_EDXAPP_ERROR_PAGES.items() %}
error_page {{ k }} {{ v }};
{% endfor %}
@@ -61,7 +61,7 @@ error_page {{ k }} {{ v }};
ssl_certificate /etc/ssl/certs/{{ NGINX_SSL_CERTIFICATE|basename }};
ssl_certificate_key /etc/ssl/private/{{ NGINX_SSL_KEY|basename }};
{% endif %}
-
+
{% if NGINX_ENABLE_SSL or NGINX_REDIRECT_TO_HTTPS %}
# request the browser to use SSL for all connections
add_header Strict-Transport-Security "max-age={{ NGINX_HSTS_MAX_AGE }}; includeSubDomains";
@@ -80,11 +80,11 @@ error_page {{ k }} {{ v }};
access_log {{ nginx_log_dir }}/access.log {{ NGINX_LOG_FORMAT_NAME }};
error_log {{ nginx_log_dir }}/error.log error;
- # CS184 requires uploads of up to 4MB for submitting screenshots.
- # CMS requires larger value for course assest, values provided
+ # CS184 requires uploads of up to 4MB for submitting screenshots.
+ # CMS requires larger value for course assest, values provided
# via hiera.
client_max_body_size 100M;
-
+
rewrite ^(.*)/favicon.ico$ {{ NGINX_EDXAPP_FAVICON_PATH }} last;
{% include "python_lib.zip.j2" %}
diff --git a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2 b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2
index cde1d13c1c3..962fd2b86fb 100644
--- a/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2
+++ b/playbooks/roles/nginx/templates/edx/app/nginx/sites-available/lms.j2
@@ -75,7 +75,7 @@ server {
{% endif %}
# error pages
- {% for k, v in NGINX_EDXAPP_ERROR_PAGES.iteritems() %}
+ {% for k, v in NGINX_EDXAPP_ERROR_PAGES.items() %}
error_page {{ k }} {{ v }};
{% endfor %}
diff --git a/playbooks/roles/supervisor/files/pre_supervisor_checks.py b/playbooks/roles/supervisor/files/pre_supervisor_checks.py
index 1b09ec87112..796f7581d20 100755
--- a/playbooks/roles/supervisor/files/pre_supervisor_checks.py
+++ b/playbooks/roles/supervisor/files/pre_supervisor_checks.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
import boto.ec2
from boto.utils import get_instance_metadata, get_instance_identity
@@ -177,7 +179,7 @@ def edp_for_instance(instance_id):
instance_id=instance_id)
break
except Exception as e:
- print("Failed to get EDP for {}: {}".format(instance_id, str(e)))
+ print(("Failed to get EDP for {}: {}".format(instance_id, str(e))))
# With the time limit being 2 minutes we will
# try 5 times before giving up.
time.sleep(backoff)
diff --git a/requirements.txt b/requirements.txt
index 801ffaa8ed6..5b29874edeb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -28,7 +28,8 @@ ipaddress==1.0.22 # via cryptography
jinja2==2.8
jmespath==0.9.4 # via boto3, botocore
markupsafe==1.0
-mysql-python==1.2.5
+mysql-python==1.2.5 ; python_version == "2.7"
+mysqlclient==1.3.0; python_version >= '3.0'
networkx==1.11
paramiko==2.4.1
pathlib2==2.3.0
@@ -48,4 +49,4 @@ scandir==1.10.0 # via pathlib2
simplejson==3.16.0 # via datadog
six==1.12.0 # via bcrypt, cryptography, pathlib2, pynacl, python-dateutil
urllib3==1.22 # via requests
-wsgiref==0.1.2
+wsgiref==0.1.2 ; python_version == "2.7"
diff --git a/tests/test_mongodb_replica_set.py b/tests/test_mongodb_replica_set.py
index 80a9470fdbd..9bb940531d6 100644
--- a/tests/test_mongodb_replica_set.py
+++ b/tests/test_mongodb_replica_set.py
@@ -5,9 +5,10 @@
# 2. rename mongodb_replica_set to mongodb_replica_set.py
# 3. python test_mongodb_replica_set.py
+from __future__ import absolute_import
import mongodb_replica_set as mrs
import unittest, mock
-from urllib import quote_plus
+from six.moves.urllib.parse import quote_plus
from copy import deepcopy
class TestNoPatchingMongodbReplicaSet(unittest.TestCase):
diff --git a/util/ansible_msg.py b/util/ansible_msg.py
index 57e1f3460c2..f817d0bd496 100755
--- a/util/ansible_msg.py
+++ b/util/ansible_msg.py
@@ -1,6 +1,8 @@
#!/usr/bin/env python3.6
"""Simple utility for deciphering Ansible jsonized task output."""
+from __future__ import absolute_import
+from __future__ import print_function
import json
import sys
@@ -17,7 +19,7 @@
# junk:
# '==> default: failed: [localhost] (item=/edx/app/edx_ansible/edx_ansible/requirements.txt) => {"cmd": "/edx/app/edx...'
-print("Stdin is {} chars: {!r}...{!r}".format(len(junk), junk[:40], junk[-40:]))
+print(("Stdin is {} chars: {!r}...{!r}".format(len(junk), junk[:40], junk[-40:])))
junk = junk.replace('\n', '')
junk = junk[junk.index('=> {')+3:]
@@ -29,7 +31,7 @@
for key in GOOD_KEYS:
if data.get(key):
print(f"== {key} ===========================")
- print(data[key])
+ print((data[key]))
BAD_KEYS = ['stdout_lines', 'start', 'end', 'delta', 'changed', 'failed', 'rc', 'item']
diff --git a/util/asg_event_notifications_util.py b/util/asg_event_notifications_util.py
index 9961ef6a437..2b91303f79e 100644
--- a/util/asg_event_notifications_util.py
+++ b/util/asg_event_notifications_util.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import click
@@ -41,10 +43,10 @@ def show_asg_event_notifications():
event_notifications = get_asg_event_notifications(asg)
if event_notifications:
- print("Event notifications: {0} are set for ASG: {1}".format(event_notifications,
- asg))
+ print(("Event notifications: {0} are set for ASG: {1}".format(event_notifications,
+ asg)))
else:
- print("No Event Notifications found for ASG {}".format(asg))
+ print(("No Event Notifications found for ASG {}".format(asg)))
except(Exception, e):
print(e)
@@ -76,7 +78,7 @@ def create_asg_event_notifications(
asg_to_create_event_notifications.append(asg_name)
if confirm is False:
- print("Would have created the event notification for asgs {}".format(asg_to_create_event_notifications))
+ print(("Would have created the event notification for asgs {}".format(asg_to_create_event_notifications)))
else:
try:
for asg in asg_to_create_event_notifications:
@@ -85,8 +87,8 @@ def create_asg_event_notifications(
client.put_notification_configuration(AutoScalingGroupName=asg,
TopicARN=topic_arn, NotificationTypes=[event])
- print("Created {0} event notifications for auto scaling group {1}").format(event,
- asg)
+ print(("Created {0} event notifications for auto scaling group {1}").format(event,
+ asg))
except(Exception, e):
print(e)
diff --git a/util/aws_ip_locator/ipcollector.py b/util/aws_ip_locator/ipcollector.py
index 45846248a89..31d1f082e8c 100644
--- a/util/aws_ip_locator/ipcollector.py
+++ b/util/aws_ip_locator/ipcollector.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import click
import socket
@@ -19,13 +21,13 @@ def collect_ips(file_name):
print_header(entry['title'])
external_hostnames_key = 'external_hostnames'
- if entry.has_key(external_hostnames_key):
+ if external_hostnames_key in entry:
external_hostnames = entry[external_hostnames_key]
for hostname in external_hostnames:
print_line_item(hostname, get_ip_for_hostname(hostname))
ec2_instance_name_tags_key = 'ec2_instance_name_tags'
- if entry.has_key(ec2_instance_name_tags_key):
+ if ec2_instance_name_tags_key in entry:
ec2_name_tags = entry[ec2_instance_name_tags_key]
for pair in ec2_name_tags:
display_name = pair['display_name']
@@ -34,7 +36,7 @@ def collect_ips(file_name):
print_line_item(display_name, ip)
ec2_elb_name_tags_key = 'ec2_elb_name_tags'
- if entry.has_key(ec2_elb_name_tags_key):
+ if ec2_elb_name_tags_key in entry:
ec2_elb_name_tags = entry[ec2_elb_name_tags_key]
for pair in ec2_elb_name_tags:
display_name = pair['display_name']
@@ -43,7 +45,7 @@ def collect_ips(file_name):
print_line_item(display_name, ip)
elasticache_clusters_key = 'elasticache_clusters'
- if entry.has_key(elasticache_clusters_key):
+ if elasticache_clusters_key in entry:
elasticache_clusters = entry[elasticache_clusters_key]
for cluster in elasticache_clusters:
display_name = cluster['display_name']
@@ -51,15 +53,15 @@ def collect_ips(file_name):
print_line_item(display_name, get_elasticache_ip_by_cluster_id(cluster_id))
rds_instances_key = 'rds_instances'
- if entry.has_key(rds_instances_key):
+ if rds_instances_key in entry:
rds_instances = entry[rds_instances_key]
for instance in rds_instances:
display_name = instance['display_name']
instance_id = None
- if instance.has_key('instance_id'):
+ if 'instance_id' in instance:
instance_id = instance['instance_id']
print_line_item(display_name, get_rds_ip_by_instance_id(instance_id))
- elif instance.has_key('cluster_id'):
+ elif 'cluster_id' in instance:
cluster_id = instance['cluster_id']
instance_id = get_writer_instance_id_by_cluster_id(cluster_id)
print_line_item(display_name, get_rds_ip_by_instance_id(instance_id))
@@ -67,7 +69,7 @@ def collect_ips(file_name):
raise ValueError('Cant locate RDS instance without instance_id or cluster_id')
static_entries_key = 'static_entries'
- if entry.has_key(static_entries_key):
+ if static_entries_key in entry:
static_entries = entry[static_entries_key]
for item in static_entries:
display_name = item['display_name']
@@ -85,11 +87,11 @@ def print_header(name):
============================
{0}
============================"""
- print(header.format(name))
+ print((header.format(name)))
def print_line_item(target, ip):
line = "[ * ] {0} - {1}"
- print(line.format(target, ip))
+ print((line.format(target, ip)))
def get_instance_ip_by_name_tag(value):
client = boto3.client('ec2')
diff --git a/util/balancecontainers.py b/util/balancecontainers.py
index 86a376ec726..3a74f5efb6a 100644
--- a/util/balancecontainers.py
+++ b/util/balancecontainers.py
@@ -1,7 +1,10 @@
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
import logging
import os
import sys
+from six.moves import range
try:
# This script is used by docker.mk at parse-time, which means when you run
@@ -102,4 +105,4 @@ def arg_parse():
conts = [x[0] for x in shard["images"]]
line = middle.join(conts)
- print line
+ print(line)
diff --git a/util/check_dockerfile_coverage.py b/util/check_dockerfile_coverage.py
index b6267c0db70..358d5990e5d 100644
--- a/util/check_dockerfile_coverage.py
+++ b/util/check_dockerfile_coverage.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
import yaml
import os
import pathlib2
diff --git a/util/check_for_key_collisions/check_for_key_collisions.py b/util/check_for_key_collisions/check_for_key_collisions.py
index 6fb1568f54c..b06d6705351 100644
--- a/util/check_for_key_collisions/check_for_key_collisions.py
+++ b/util/check_for_key_collisions/check_for_key_collisions.py
@@ -1,7 +1,10 @@
+from __future__ import absolute_import
+from __future__ import print_function
import click
import yaml
import json
from collections import defaultdict
+import six
@click.command()
@click.option('--files', '-m', multiple=True)
@@ -10,25 +13,25 @@ def check_for_yaml_key_collisions(files):
for file_path in files:
content = None
if file_path.endswith(".yml") or file_path.endswith(".yaml"):
- stream = file(file_path, 'r')
+ stream = open(file_path, 'r')
content = yaml.safe_load(stream)
elif file_path.endswith(".json"):
with open(file_path, "r") as read_file:
content = json.load(read_file)
- for key, value in content.iteritems():
+ for key, value in six.iteritems(content):
values_for_keys[key].append(value)
collisions = {}
- for key,value in values_for_keys.iteritems():
+ for key,value in six.iteritems(values_for_keys):
if len(value) > 1:
collisions[key] = value
- if len(collisions.keys()) > 0:
- print(str.format("Found key collisions: {}", len(collisions)))
- for key,value in collisions.iteritems():
- print(str.format("{} {}", key, value))
+ if len(list(collisions.keys())) > 0:
+ print((str.format("Found key collisions: {}", len(collisions))))
+ for key,value in six.iteritems(collisions):
+ print((str.format("{} {}", key, value)))
exit(1)
else:
print("No collisions found")
diff --git a/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py b/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py
index ad6c25f6e48..15aa63e4c28 100644
--- a/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py
+++ b/util/check_rds_slow_query_logs/check_rds_slow_query_logs.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import click
@@ -94,7 +96,7 @@ def cli(db_engine, whitelist):
exit_status = 1
slow_query_logs_disabled_rds.append(db_identifier)
- print("Slow query logs are disabled for RDS Instances\n{0}".format(slow_query_logs_disabled_rds))
+ print(("Slow query logs are disabled for RDS Instances\n{0}".format(slow_query_logs_disabled_rds)))
exit(exit_status)
if __name__ == '__main__':
diff --git a/util/cloudflare/by_origin_purger/purger.py b/util/cloudflare/by_origin_purger/purger.py
index 6b5d3a126e5..916aedf4297 100644
--- a/util/cloudflare/by_origin_purger/purger.py
+++ b/util/cloudflare/by_origin_purger/purger.py
@@ -1,5 +1,8 @@
+from __future__ import absolute_import
+from __future__ import print_function
import requests
import click
+from six.moves import range
@@ -36,7 +39,7 @@ def purge(cloudflare_email, cloudflare_api_key, cloudflare_zone_id, origin, clou
chunks = divide_chunks(lines, chunk_size)
for chunk in chunks:
if not confirm:
- print(str.format("Will purge: {} at origin {} and {} others like it. Add --confirm to execute.", chunk[0]['url'], chunk[0]['headers']['Origin'], len(chunk)))
+ print((str.format("Will purge: {} at origin {} and {} others like it. Add --confirm to execute.", chunk[0]['url'], chunk[0]['headers']['Origin'], len(chunk))))
else:
headers = {'X-Auth-Email': cloudflare_email,
'X-Auth-Key': cloudflare_api_key,
@@ -46,7 +49,7 @@ def purge(cloudflare_email, cloudflare_api_key, cloudflare_zone_id, origin, clou
}
url = str.format("https://api.cloudflare.com/client/v4/zones/{cloudflare_zone_id}/purge_cache", cloudflare_zone_id=cloudflare_zone_id)
response = requests.delete(url, headers=headers, json=payload)
- print(response.json())
+ print((response.json()))
if __name__ == '__main__':
purge()
diff --git a/util/cluster_instance_monitoring.py b/util/cluster_instance_monitoring.py
index 970ec9636ee..7eb43bdf305 100644
--- a/util/cluster_instance_monitoring.py
+++ b/util/cluster_instance_monitoring.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import argparse
import sys
@@ -73,9 +75,9 @@ def find_active_instances(cluster_file, region):
if no_active_instances_triples:
print("Fail. There are no active instances for the following cluster(s)")
for triple in no_active_instances_triples:
- print('environment: ' + triple.split('-')[0])
- print('deployment: ' + triple.split('-')[1])
- print('cluster: ' + triple.split('-')[2])
+ print(('environment: ' + triple.split('-')[0]))
+ print(('deployment: ' + triple.split('-')[1]))
+ print(('cluster: ' + triple.split('-')[2]))
print('----')
sys.exit(1)
diff --git a/util/docker_images.py b/util/docker_images.py
index bc6a5622ff9..6d0ce439551 100644
--- a/util/docker_images.py
+++ b/util/docker_images.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
import yaml
import os
import pathlib2
@@ -21,7 +22,7 @@ def get_used_images(images):
with (config_file_path.open(mode='r')) as file:
try:
config = yaml.safe_load(file)
- except yaml.YAMLError, exc:
+ except yaml.YAMLError as exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
@@ -29,7 +30,7 @@ def get_used_images(images):
weights = config.get("weights")
# convert all images in config file to a list of tuples (, )
- weights_list = [x.items() for x in weights]
+ weights_list = [list(x.items()) for x in weights]
weights_list = list(itertools.chain.from_iterable(weights_list))
# performs intersection between weighted images and input images
diff --git a/util/elasticsearch/verify-index-copy.py b/util/elasticsearch/verify-index-copy.py
index 4e6ec8432cb..5f1cfecc1ca 100755
--- a/util/elasticsearch/verify-index-copy.py
+++ b/util/elasticsearch/verify-index-copy.py
@@ -3,6 +3,8 @@
Verifies that an index was correctly copied from one ES host to another.
"""
+from __future__ import absolute_import
+from __future__ import print_function
import itertools
import pprint
import random
@@ -56,7 +58,7 @@ def grouper(iterable, n):
"""
# grouper('ABCDEFG', 3, 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * n
- return itertools.izip_longest(*args)
+ return itertools.zip_longest(*args)
def docs_match(old_doc, new_doc):
@@ -122,8 +124,8 @@ def docs_match(old_doc, new_doc):
#if this fails something is horribly wrong
if set(diff_doc.keys()) != set(diff_types):
- print 'ERROR: expected to be diffing dictionaries, got something else! id: {}'.format(
- new_doc['_id'])
+ print('ERROR: expected to be diffing dictionaries, got something else! id: {}'.format(
+ new_doc['_id']))
for diff_type in diff_types:
for field in ignorable_fields:
@@ -163,13 +165,13 @@ def find_matching_ids(es, index, ids, docs):
if docs_match(docs[elt['_id']], elt):
matching += 1
else:
- print 'FAILURE: Documents with id {id} do not match: '.format(
+ print('FAILURE: Documents with id {id} do not match: '.format(
id=elt['_id']
- ) + repr({'diff': DeepDiff(docs[elt['_id']], elt), 'new': elt, 'old': docs[elt['_id']]})
+ ) + repr({'diff': DeepDiff(docs[elt['_id']], elt), 'new': elt, 'old': docs[elt['_id']]}))
else:
- print 'ERROR: Document with id {id} missing: {doc}'.format(
+ print('ERROR: Document with id {id} missing: {doc}'.format(
id=elt['_id'], doc=docs[elt['_id']]
- )
+ ))
return matching
@@ -198,12 +200,12 @@ def scan_documents(old_es, new_es, old_index, new_index):
matching += find_matching_ids(new_es, new_index, old_elt_ids, old_elt_docs)
total += len(old_elt_ids)
if total % 100 == 0:
- print 'processed {} items'.format(total)
+ print('processed {} items'.format(total))
ratio = float(matching)/total
- print "{}: scanned documents matching ({} out of {}, {:.6}%)".format(
+ print("{}: scanned documents matching ({} out of {}, {:.6}%)".format(
'OK' if ratio > SCAN_MATCH_THRESHOLD else 'FAILURE', matching, total, ratio * 100
- )
+ ))
def random_checks(old_es, new_es, old_index, new_index, total_document_count, check_percentage):
@@ -249,12 +251,12 @@ def random_checks(old_es, new_es, old_index, new_index, total_document_count, ch
current_offset += num_elts
if total % 100 == 0:
- print 'processed {} items'.format(total)
+ print('processed {} items'.format(total))
ratio = float(matching) / total
- print "{}: random documents matching ({} out of {}, {}%)".format(
+ print("{}: random documents matching ({} out of {}, {}%)".format(
'OK' if ratio > SCAN_MATCH_THRESHOLD else 'FAILURE', matching, total, int(ratio * 100)
- )
+ ))
def check_mappings(old_mapping, new_mapping):
@@ -267,10 +269,10 @@ def check_mappings(old_mapping, new_mapping):
deep_diff = DeepDiff(old_mapping, new_mapping)
if deep_diff != {}:
- print "FAILURE: Index mappings do not match"
+ print("FAILURE: Index mappings do not match")
pprint.pprint(deep_diff)
else:
- print "OK: Index mappings match"
+ print("OK: Index mappings match")
def main():
@@ -284,26 +286,26 @@ def main():
old_index = args.old[1]
new_index = args.new[1]
- old_stats = old_es.indices.stats(index=old_index)['indices'].values()[0]['primaries']
- new_stats = new_es.indices.stats(index=new_index)['indices'].values()[0]['primaries']
+ old_stats = list(old_es.indices.stats(index=old_index)['indices'].values())[0]['primaries']
+ new_stats = list(new_es.indices.stats(index=new_index)['indices'].values())[0]['primaries']
#compare document count
old_count = old_stats['docs']['count']
new_count = new_stats['docs']['count']
- print "{}: Document count ({} = {})".format(
+ print("{}: Document count ({} = {})".format(
'OK' if old_count == new_count else 'FAILURE', old_count, new_count
- )
+ ))
old_size = old_stats['store']['size_in_bytes']
new_size = new_stats['store']['size_in_bytes']
- print "{}: Index size ({} = {})".format(
+ print("{}: Index size ({} = {})".format(
'OK' if old_count == new_count else 'FAILURE', old_size, new_size
- )
+ ))
def get_mappings(es, index):
# for 1.5.x, there is an extra 'mappings' field that holds the mappings.
- mappings = es.indices.get_mapping(index=index).values()[0]
+ mappings = list(es.indices.get_mapping(index=index).values())[0]
new_style = mappings.get('mappings', None)
return new_style if new_style is not None else mappings
diff --git a/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py b/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py
index d02e73eb7ec..1c6b7d105f6 100644
--- a/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py
+++ b/util/elb_tls_policy_management_util/elb_tls_policy_management_util.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import click
import datetime
@@ -62,12 +64,12 @@ def get_elb_names():
def print_header(header):
print("\n\n----------------------------------------------")
- print("[ ] {0}".format(header))
+ print(("[ ] {0}".format(header)))
print("----------------------------------------------")
def print_line_item(line_item):
- print("[ * ] {0}".format(line_item))
+ print(("[ * ] {0}".format(line_item)))
def print_list(name, items_list):
@@ -198,7 +200,7 @@ def update_elb_policies(confirm, policy_version, names, port_override):
else:
for elb_name in elb_names_to_update:
tls_policy_name = create_tls_policy(elb_name, policy_version)
- print("Trying to update...{0}".format(elb_name))
+ print(("Trying to update...{0}".format(elb_name)))
client = get_client()
# Determine which policies are actually active
@@ -266,7 +268,7 @@ def update_elb_policies(confirm, policy_version, names, port_override):
LoadBalancerPort=tls_port,
PolicyNames=policy_names
)
- print("Updated {0}\n".format(elb_name))
+ print(("Updated {0}\n".format(elb_name)))
cli.add_command(show_available_policy_versions)
cli.add_command(show_elb_policy_versions)
diff --git a/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py b/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py
index 7ec741fa018..dddbfcc41a2 100644
--- a/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py
+++ b/util/jenkins/add_new_xqueues_to_dashboard/add_xqueue_to_dashboard.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import pprint
import re
diff --git a/util/jenkins/check-ses-limits.py b/util/jenkins/check-ses-limits.py
index ab16d49f42f..838c63afbe2 100755
--- a/util/jenkins/check-ses-limits.py
+++ b/util/jenkins/check-ses-limits.py
@@ -2,6 +2,8 @@
# This script is used by the monioring/check-seslimits Jenkins job
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import argparse
import sys
@@ -29,7 +31,7 @@ def __call__(self, parser, namespace, values, option_string=None):
if args.warning and args.warning >= args.critical:
warn_str = "Warning threshold ({})".format(args.warning)
crit_str = "Critical threshold ({})".format(args.critical)
- print("ERROR: {} >= {}".format(warn_str, crit_str))
+ print(("ERROR: {} >= {}".format(warn_str, crit_str)))
sys.exit(1)
exit_code = 0
@@ -49,8 +51,8 @@ def __call__(self, parser, namespace, values, option_string=None):
level = "WARNING"
if level:
- print("{} {}/{} ({}%) - {}".format(region, current, limit, percent,
- level))
+ print(("{} {}/{} ({}%) - {}".format(region, current, limit, percent,
+ level)))
exit_code += 1
sys.exit(exit_code)
diff --git a/util/jenkins/check_celery_progress/check_celery_progress.py b/util/jenkins/check_celery_progress/check_celery_progress.py
index 8d28e66c5b9..dd85a057a41 100644
--- a/util/jenkins/check_celery_progress/check_celery_progress.py
+++ b/util/jenkins/check_celery_progress/check_celery_progress.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import sys
import pickle
import json
@@ -34,7 +36,7 @@ def __init__(self, dev_test_mode=None, *args, **kwargs):
redis.exceptions.ConnectionError),
max_tries=MAX_TRIES)
def keys(self):
- return self.redis.keys()
+ return list(self.redis.keys())
@backoff.on_exception(backoff.expo,
(redis.exceptions.TimeoutError,
@@ -70,7 +72,7 @@ def hgetall(self, key):
max_tries=MAX_TRIES)
def delete(self, key):
if self.dev_test_mode:
- print("Test Mode: would have run redis.delete({})".format(key))
+ print(("Test Mode: would have run redis.delete({})".format(key)))
else:
return self.redis.delete(key)
@@ -80,7 +82,7 @@ def delete(self, key):
max_tries=MAX_TRIES)
def hset(self, *args):
if self.dev_test_mode:
- print("Test Mode: would have run redis.hset({})".format(args))
+ print(("Test Mode: would have run redis.hset({})".format(args)))
else:
return self.redis.hset(*args)
@@ -90,7 +92,7 @@ def hset(self, *args):
max_tries=MAX_TRIES)
def hmset(self, *args):
if self.dev_test_mode:
- print("Test Mode: would have run redis.hmset({})".format(args))
+ print(("Test Mode: would have run redis.hmset({})".format(args)))
else:
return self.redis.hmset(*args)
@@ -106,7 +108,7 @@ def __init__(self, dev_test_mode=None):
max_tries=MAX_TRIES)
def put_metric_data(self, *args, **kwargs):
if self.dev_test_mode:
- print("Test Mode: would have run put_metric_data({},{})".format(args, kwargs))
+ print(("Test Mode: would have run put_metric_data({},{})".format(args, kwargs)))
else:
return self.client.put_metric_data(*args, **kwargs)
@@ -204,13 +206,13 @@ def create_alert(opsgenie_api_key, environment, deploy, queue_name, threshold, i
alias = generate_alert_alias(environment, deploy, queue_name)
if dev_test_mode:
- print("Test Mode: would have created Alert: {}".format(alias))
+ print(("Test Mode: would have created Alert: {}".format(alias)))
else:
- print("Creating Alert: {}".format(alias))
+ print(("Creating Alert: {}".format(alias)))
response = AlertApi().create_alert(body=CreateAlertRequest(message=alert_msg, alias=alias, description=info))
- print('request id: {}'.format(response.request_id))
- print('took: {}'.format(response.took))
- print('result: {}'.format(response.result))
+ print(('request id: {}'.format(response.request_id)))
+ print(('took: {}'.format(response.took)))
+ print(('result: {}'.format(response.result)))
@backoff.on_exception(backoff.expo,
@@ -225,14 +227,14 @@ def close_alert(opsgenie_api_key, environment, deploy, queue_name, dev_test_mode
alias = generate_alert_alias(environment, deploy, queue_name)
if dev_test_mode:
- print("Test Mode: would have closed Alert: {}".format(alias))
+ print(("Test Mode: would have closed Alert: {}".format(alias)))
else:
- print("Closing Alert: {}".format(alias))
+ print(("Closing Alert: {}".format(alias)))
# Need body=CloseAlertRequest(source="") otherwise OpsGenie API complains that body must be a json object
response = AlertApi().close_alert(identifier=alias, identifier_type='alias', body=CloseAlertRequest(source=""))
- print('request id: {}'.format(response.request_id))
- print('took: {}'.format(response.took))
- print('result: {}'.format(response.result))
+ print(('request id: {}'.format(response.request_id)))
+ print(('took: {}'.format(response.took)))
+ print(('result: {}'.format(response.result)))
def extract_body(task):
@@ -324,7 +326,7 @@ def celery_connection(host, port):
broker_url = "redis://" + host + ":" + str(port)
celery_client = Celery(broker=broker_url)
except Exception as e:
- print("Exception in connection():", e)
+ print(("Exception in connection():", e))
return celery_client
@@ -351,7 +353,7 @@ def get_active_tasks(celery_control, queue_workers, queue_name):
'kwargs: REDACTED',
])
except Exception as e:
- print("Exception in get_active_tasks():", e)
+ print(("Exception in get_active_tasks():", e))
return (pretty_json(active_tasks), pretty_json(redacted_active_tasks))
@@ -376,8 +378,8 @@ def check_queues(host, port, environment, deploy, default_threshold, queue_thres
jenkins_build_url, max_metrics, dev_test_mode):
ret_val = 0
thresholds = dict(queue_threshold)
- print("Default Threshold (seconds): {}".format(default_threshold))
- print("Per Queue Thresholds (seconds):\n{}".format(pretty_json(thresholds)))
+ print(("Default Threshold (seconds): {}".format(default_threshold)))
+ print(("Per Queue Thresholds (seconds):\n{}".format(pretty_json(thresholds))))
timeout = 1
redis_client = RedisWrapper(host=host, port=port, socket_timeout=timeout,
@@ -403,11 +405,11 @@ def check_queues(host, port, environment, deploy, default_threshold, queue_thres
for queue in data:
queue_workers.setdefault(queue['name'], []).append(worker)
except Exception as e:
- print("Exception while getting queue to worker mappings:", e)
+ print(("Exception while getting queue to worker mappings:", e))
old_state = unpack_state(queue_age_hash)
# Temp debugging
- print("DEBUG: old_state\n{}\n".format(pretty_state(old_state)))
+ print(("DEBUG: old_state\n{}\n".format(pretty_state(old_state))))
queue_first_items = {}
current_time = datetime.datetime.now()
@@ -423,7 +425,7 @@ def check_queues(host, port, environment, deploy, default_threshold, queue_thres
new_state = build_new_state(old_state, queue_first_items, current_time)
# Temp debugging
- print("DEBUG: new_state from new_state() function\n{}\n".format(pretty_state(new_state)))
+ print(("DEBUG: new_state from new_state() function\n{}\n".format(pretty_state(new_state))))
for queue_name, first_item in queue_first_items.items():
redacted_body = ""
threshold = default_threshold
@@ -436,7 +438,7 @@ def check_queues(host, port, environment, deploy, default_threshold, queue_thres
try:
body = extract_body(first_item)
except Exception as error:
- print("ERROR: Unable to extract task body in queue {}, exception {}".format(queue_name, error))
+ print(("ERROR: Unable to extract task body in queue {}, exception {}".format(queue_name, error)))
ret_val = 1
redacted_body = {'task': body.get('task'), 'args': 'REDACTED', 'kwargs': 'REDACTED'}
active_tasks, redacted_active_tasks = get_active_tasks(celery_control, queue_workers, queue_name)
@@ -489,7 +491,7 @@ def check_queues(host, port, environment, deploy, default_threshold, queue_thres
new_state[queue_name]['alert_created'] = False
for queue_name in set(old_state.keys()) - set(new_state.keys()):
- print("DEBUG: Checking cleared queue {}".format(queue_name))
+ print(("DEBUG: Checking cleared queue {}".format(queue_name)))
if old_state[queue_name]['alert_created']:
close_alert(opsgenie_api_key, environment, deploy, queue_name, dev_test_mode=dev_test_mode)
@@ -497,12 +499,12 @@ def check_queues(host, port, environment, deploy, default_threshold, queue_thres
if new_state:
redis_client.hmset(QUEUE_AGE_HASH_NAME, pack_state(new_state))
# Temp Debugging
- print("DEBUG: new_state pushed to redis\n{}\n".format(pretty_state(new_state)))
+ print(("DEBUG: new_state pushed to redis\n{}\n".format(pretty_state(new_state))))
# Push next_task_age data to cloudwatch for tracking
if len(next_task_age_metric_data) > 0:
for metric_data_grouped in grouper(next_task_age_metric_data, max_metrics):
- print("next_task_age_metric_data {}".format(next_task_age_metric_data))
+ print(("next_task_age_metric_data {}".format(next_task_age_metric_data)))
cloudwatch.put_metric_data(Namespace=namespace, MetricData=next_task_age_metric_data)
sys.exit(ret_val)
diff --git a/util/jenkins/check_celery_progress/print_queue.py b/util/jenkins/check_celery_progress/print_queue.py
index 9a414699876..7e495fd4833 100644
--- a/util/jenkins/check_celery_progress/print_queue.py
+++ b/util/jenkins/check_celery_progress/print_queue.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import sys
import pickle
import json
@@ -10,6 +12,7 @@
from celery import Celery
from textwrap import dedent
from pprint import pprint
+from six.moves import range
MAX_TRIES = 5
@@ -26,7 +29,7 @@ def __init__(self, *args, **kwargs):
redis.exceptions.ConnectionError),
max_tries=MAX_TRIES)
def keys(self):
- return self.redis.keys()
+ return list(self.redis.keys())
@backoff.on_exception(backoff.expo,
(redis.exceptions.TimeoutError,
@@ -138,7 +141,7 @@ def celery_connection(host, port):
broker_url = "redis://" + host + ":" + str(port)
celery_client = Celery(broker=broker_url)
except Exception as e:
- print("Exception in connection():", e)
+ print(("Exception in connection():", e))
return celery_client
@@ -165,7 +168,7 @@ def get_active_tasks(celery_control, queue_workers, queue_name):
'kwargs: REDACTED',
])
except Exception as e:
- print("Exception in get_active_tasks():", e)
+ print(("Exception in get_active_tasks():", e))
return (pretty_json(active_tasks), pretty_json(redacted_active_tasks))
@@ -192,10 +195,10 @@ def check_queues(host, port, queue, items, body):
for queue in data:
queue_workers.setdefault(queue['name'], []).append(worker)
except Exception as e:
- print("Exception while getting queue to worker mappings:", e)
+ print(("Exception while getting queue to worker mappings:", e))
for count in range(items):
- print("Count: {}".format(count))
+ print(("Count: {}".format(count)))
queue_first_item = redis_client.lindex(queue_name, count)
# Check that queue_first_item is not None which is the case if the queue is empty
if queue_first_item is not None:
@@ -207,7 +210,7 @@ def check_queues(host, port, queue, items, body):
try:
extracted_body = extract_body(queue_first_item_decoded)
except Exception as error:
- print("ERROR: Unable to extract task body in queue {}, exception {}".format(queue_name, error))
+ print(("ERROR: Unable to extract task body in queue {}, exception {}".format(queue_name, error)))
ret_val = 1
active_tasks, redacted_active_tasks = get_active_tasks(celery_control, queue_workers, queue_name)
diff --git a/util/jenkins/check_celery_progress/test_check_celery_progress.py b/util/jenkins/check_celery_progress/test_check_celery_progress.py
index b7571c788da..25f6f4cb922 100644
--- a/util/jenkins/check_celery_progress/test_check_celery_progress.py
+++ b/util/jenkins/check_celery_progress/test_check_celery_progress.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
import unittest
import datetime
from datetime import timedelta
diff --git a/util/jenkins/check_table_size/check_table_size.py b/util/jenkins/check_table_size/check_table_size.py
index d98ffe2a84f..aa0840dd158 100644
--- a/util/jenkins/check_table_size/check_table_size.py
+++ b/util/jenkins/check_table_size/check_table_size.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import sys
@@ -43,7 +45,7 @@ def rds_extractor():
try:
regions_list = client_region.describe_regions()
except ClientError as e:
- print("Unable to connect to AWS with error :{}".format(e))
+ print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list["Regions"]:
client = RDSBotoWrapper(region_name=region["RegionName"])
@@ -109,7 +111,7 @@ def check_table_growth(rds_list, username, password, threshold, rds_threshold):
table_list.append(temp_dict)
return table_list
except Exception as ex:
- print ex
+ print(ex)
sys.exit(1)
@@ -135,13 +137,13 @@ def controller(username, password, threshold, rdsthreshold, rdsignore):
"""
rds_threshold = dict(rdsthreshold)
rds_list = rds_extractor()
- filtered_rds_list = list(filter(lambda x: x['name'] not in rdsignore, rds_list))
+ filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore])
table_list = check_table_growth(filtered_rds_list, username, password, threshold, rds_threshold)
if len(table_list) > 0:
format_string = "{:<40}{:<20}{:<50}{}"
- print(format_string.format("RDS Name","Database Name", "Table Name", "Size"))
+ print((format_string.format("RDS Name","Database Name", "Table Name", "Size")))
for items in table_list:
- print(format_string.format(items["rds"], items["db"], items["table"], str(items["size"]) + " MB"))
+ print((format_string.format(items["rds"], items["db"], items["table"], str(items["size"]) + " MB")))
exit(1)
exit(0)
diff --git a/util/jenkins/cloudflare-hit-rate.py b/util/jenkins/cloudflare-hit-rate.py
index 955ca3a4c2f..4bb2c68dd30 100644
--- a/util/jenkins/cloudflare-hit-rate.py
+++ b/util/jenkins/cloudflare-hit-rate.py
@@ -3,6 +3,8 @@
https://api.cloudflare.com/#zone-analytics-dashboard
"""
+from __future__ import absolute_import
+from __future__ import print_function
import requests
import argparse
import sys
@@ -30,7 +32,7 @@ def calcualte_cache_hit_rate(zone_id, auth_key, email, threshold):
sys.exit(1)
except Exception as error:
- print("JSON Error: {} \n Content returned from API call: {}".format(error, res.text))
+ print(("JSON Error: {} \n Content returned from API call: {}".format(error, res.text)))
diff --git a/util/jenkins/export_dead_locks/export_dead_locks.py b/util/jenkins/export_dead_locks/export_dead_locks.py
index e8268bf6411..f2bf79f2309 100644
--- a/util/jenkins/export_dead_locks/export_dead_locks.py
+++ b/util/jenkins/export_dead_locks/export_dead_locks.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import sys
@@ -47,7 +49,7 @@ def rds_extractor(environment):
try:
regions_list = client_region.describe_regions()
except ClientError as e:
- print("Unable to connect to AWS with error :{}".format(e))
+ print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list["Regions"]:
rds_client = RDSBotoWrapper(region_name=region["RegionName"])
@@ -112,7 +114,7 @@ def rds_controller(rds_list, username, password, hostname, splunkusername, splun
@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times')
def main(username, password, environment, hostname, splunkusername, splunkpassword, port, indexname, rdsignore):
rds_list = rds_extractor(environment)
- filtered_rds_list = list(filter(lambda x: x['name'] not in rdsignore, rds_list))
+ filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore])
rds_controller(filtered_rds_list, username, password, hostname, splunkusername, splunkpassword, port, indexname)
diff --git a/util/jenkins/export_slow_logs/export_slow_query_logs.py b/util/jenkins/export_slow_logs/export_slow_query_logs.py
index 26e632c6b53..c5f1df1bb52 100644
--- a/util/jenkins/export_slow_logs/export_slow_query_logs.py
+++ b/util/jenkins/export_slow_logs/export_slow_query_logs.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import sys
@@ -62,7 +64,7 @@ def rds_extractor(environment):
try:
regions_list = client_region.describe_regions()
except ClientError as e:
- print("Unable to connect to AWS with error :{}".format(e))
+ print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list["Regions"]:
client = RDSBotoWrapper(region_name=region["RegionName"])
@@ -102,9 +104,9 @@ def rds_controller(rds_list, username, password):
loggroupname= "/slowlogs/" + rds_host_endpoint
try:
client.create_log_group(logGroupName=loggroupname)
- print('Created CloudWatch log group named "%s"', loggroupname)
+ print(('Created CloudWatch log group named "%s"', loggroupname))
except ClientError:
- print('CloudWatch log group named "%s" already exists', loggroupname)
+ print(('CloudWatch log group named "%s" already exists', loggroupname))
LOG_STREAM = time.strftime('%Y-%m-%d') + "/[$LATEST]" + uuid.uuid4().hex
client.create_log_stream(logGroupName=loggroupname, logStreamName=LOG_STREAM)
for tables in rds_result:
@@ -138,7 +140,7 @@ def rds_controller(rds_list, username, password):
@click.option('--rdsignore', '-i', multiple=True, help='RDS name tags to not check, can be specified multiple times')
def main(username, password, environment, rdsignore):
rds_list = rds_extractor(environment)
- filtered_rds_list = list(filter(lambda x: x['name'] not in rdsignore, rds_list))
+ filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore])
rds_controller(filtered_rds_list, username, password)
diff --git a/util/jenkins/list_mysql_process/list_mysql_process.py b/util/jenkins/list_mysql_process/list_mysql_process.py
index dbd4953b06c..343acb9ce04 100644
--- a/util/jenkins/list_mysql_process/list_mysql_process.py
+++ b/util/jenkins/list_mysql_process/list_mysql_process.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import sys
@@ -43,7 +45,7 @@ def rds_extractor(environment):
try:
regions_list = client_region.describe_regions()
except ClientError as e:
- print("Unable to connect to AWS with error :{}".format(e))
+ print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list["Regions"]:
client = RDSBotoWrapper(region_name=region["RegionName"])
@@ -103,7 +105,7 @@ def check_queries_running(rds_list, username, password):
process_list.append(temp_dict)
return process_list
except Exception as ex:
- print ex
+ print(ex)
sys.exit(1)
@@ -126,14 +128,14 @@ def controller(username, password, environment, rdsignore):
Get this from cli args
"""
rds_list = rds_extractor(environment)
- filtered_rds_list = list(filter(lambda x: x['name'] not in rdsignore, rds_list))
+ filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore])
process_list = check_queries_running(filtered_rds_list, username, password)
if len(process_list) > 0:
format_string = "{:<20}{:<20}{:<30}{:<20}{:<20}{:<70}{}"
- print(format_string.format("Query ID", "User Name", "Host", "Command", "Time Executed", "State", "Info"))
+ print((format_string.format("Query ID", "User Name", "Host", "Command", "Time Executed", "State", "Info")))
for items in process_list:
- print(format_string.format(items["id"], items["user"], items["host"], items["command"],
- str(items["time"]) + " sec", items["state"], items["info"]))
+ print((format_string.format(items["id"], items["user"], items["host"], items["command"],
+ str(items["time"]) + " sec", items["state"], items["info"])))
exit(0)
diff --git a/util/jenkins/missing_alerts_checker/missing_alerts_checker.py b/util/jenkins/missing_alerts_checker/missing_alerts_checker.py
new file mode 100644
index 00000000000..dec759040d2
--- /dev/null
+++ b/util/jenkins/missing_alerts_checker/missing_alerts_checker.py
@@ -0,0 +1,272 @@
+from __future__ import absolute_import
+from __future__ import print_function
+import boto3
+import requests
+import click
+from botocore.exceptions import ClientError
+import sys
+import re
+
+
+class NewRelic:
+ def __init__(self, new_relic_api_key):
+ self.url_alert_extractor = "https://api.newrelic.com/v2/alerts_policies.json"
+ self.headers = {'X-Api-Key': new_relic_api_key}
+
+ def new_relic_policies_extractor(self):
+ """
+ Return:
+ Return list of all alert policies extracted from New relic
+ {
+ "policy": {
+ "id": int,
+ "incident_preference": str,
+ "name": str,
+ "created_at": int,
+ "updated_at": int
+ }
+ }
+ """
+ response = requests.get(self.url_alert_extractor, headers=self.headers)
+ if response.status_code != 200:
+ print("Unable to communicate with New relic.")
+ sys.exit(1)
+ try:
+ alert_policies = response.json()
+ except ValueError:
+ print(("Failed to parse response json. Got:\n{}".format(response.text)))
+ sys.exit(1)
+ return alert_policies
+
+
+class InfraAlerts:
+ def edc_extractor(self):
+ """
+ Return list of all EC2 instances with EDC's tags across all the regions
+ Returns:
+ [
+ {
+ 'name': name,
+ 'ID': instance.id
+ }
+ ]
+ """
+ client_region = boto3.client('ec2')
+ filter_tags = [
+ {
+ "Name": "tag:environment",
+ "Values": ["*"]
+ },
+ {
+ "Name": "tag:deployment",
+ "Values": ["*"]
+ },
+ {
+ "Name": "tag:cluster",
+ "Values": ["*"]
+ },
+ {
+ 'Name': 'instance-state-name',
+ 'Values': ['running']
+ }
+ ]
+ instance_list = []
+ try:
+ regions_list = client_region.describe_regions()
+ except ClientError as e:
+ print(("Unable to connect to AWS with error :{}".format(e)))
+ sys.exit(1)
+ for region in regions_list['Regions']:
+ client = boto3.resource('ec2', region_name=region['RegionName'])
+ response = client.instances.filter(Filters=filter_tags)
+ for instance in response:
+ temp_dict = {}
+ for tag in instance.tags:
+ if tag['Key'] == "Name":
+ name = tag['Value']
+ temp_dict = {
+ 'name': name,
+ 'ID': instance.id
+ }
+ break
+ else:
+ pass
+ instance_list.append(temp_dict)
+ return instance_list
+
+ def missing_alerts_checker(self, instance_list, alert_policies):
+ """
+ Arguments:
+ instance_list (list):
+ List of all instances for which we find alerts
+ alert_policies list(dict):
+ List of all existing alerts new relic
+ Return:
+ Return list of all instances which have no alert in new Relic
+ [
+ {
+ 'name': name,
+ 'ID': instance.id
+ }
+ ]
+ """
+ result_instance = []
+ for instance in instance_list:
+ if not any(policy["name"] == instance["name"] + "-infrastructure" for policy in alert_policies["policies"]):
+ result_instance.append(instance)
+ return result_instance
+
+
+class AppAlerts:
+ def __init__(self, new_relic_api_key):
+ self.url_app_extractor = "https://api.newrelic.com/v2/applications.json"
+ self.headers = {'X-Api-Key': new_relic_api_key}
+
+ def new_relic_app_extractor(self):
+ """
+ Return:
+ Return list all applications in new relic
+ """
+ response = requests.get(self.url_app_extractor, headers=self.headers)
+ if response.status_code != 200:
+ print("Unable to communicate with New relic.")
+ sys.exit(1)
+ try:
+ apps_list = response.json()
+ except ValueError:
+ print(("Failed to parse response json. Got:\n{}".format(response.text)))
+ sys.exit(1)
+ return apps_list["applications"]
+
+ def missing_alerts_checker(self, app_list, alert_policies):
+ """
+ Arguments:
+ app_list (list):
+ List of all applications for which we find alerts
+ alert_policies list(dict):
+ List of all existing alerts new relic
+ Return:
+ Return list of all applications which have no alert in new Relic
+ """
+ result_apps = []
+ for apps in app_list:
+ if not any(policy["name"] == apps["name"] + "-application" for policy in alert_policies["policies"]):
+ result_apps.append(apps)
+ return result_apps
+
+
+class BrowserAlerts:
+ def __init__(self, new_relic_api_key):
+ self.url_browser_extractor = "https://api.newrelic.com/v2/browser_applications.json"
+ self.headers = {'X-Api-Key': new_relic_api_key}
+
+ def new_relic_browser_extractor(self):
+ """
+ Return:
+ Return list all browser applications in new relic
+ [
+ {
+ "id": "integer",
+ "name": "string",
+ "browser_monitoring_key": "string",
+ "loader_script": "string"
+ }
+ ]
+ """
+ response = requests.get(self.url_browser_extractor, headers=self.headers)
+ if response.status_code != 200:
+ print("Unable to communicate with New relic.")
+ sys.exit(1)
+ try:
+ browser_list = response.json()
+ except ValueError:
+ raise Exception("Failed to parse response json. Got:\n{}".format(response.text))
+ return browser_list["browser_applications"]
+
+ def missing_alerts_checker(self, browser_list, alert_policies):
+ """
+ Arguments:
+ browser_list (list):
+ List of all browser applications for which we find alerts
+ alert_policies list(dict):
+ List of all existing alerts new relic
+ Return:
+ Return list of all browser applications which have no alert in new Relic
+ [
+ {
+ "id": "integer",
+ "name": "string",
+ "browser_monitoring_key": "string",
+ "loader_script": "string"
+ }
+ ]
+ """
+ result_browser = []
+ for browser in browser_list:
+ if not any(policy["name"] == browser["name"].rstrip() + "-browser" for policy in alert_policies["policies"]):
+ result_browser.append(browser)
+ return result_browser
+
+
+@click.command()
+@click.option('--new-relic-api-key', required=True, help='API Key to use to speak with NewRelic.')
+@click.option('--ignore', '-i', multiple=True, help='App name regex to filter out, can be specified multiple times')
+def controller(new_relic_api_key,ignore):
+ """
+ Control execution of all other functions
+ Arguments:
+ new_relic_api_key (str):
+ Get this from cli args
+ """
+ flag = 0
+ # Initializing object of classes
+ infracheck = InfraAlerts()
+ new_relic_obj = NewRelic(new_relic_api_key)
+ # Get list of all instances in different regions
+ instance_list = infracheck.edc_extractor()
+ # Get list of all alert policies in new relic
+ alert_policies = new_relic_obj.new_relic_policies_extractor()
+ # Get list of all instances without alerts
+ missing_alerts_list = infracheck.missing_alerts_checker(instance_list, alert_policies)
+ filtered_missing_alerts_list = list([x for x in missing_alerts_list if not any(re.search(r, x['name']) for r in ignore)])
+ format_string = "{:<30}{}"
+ print((format_string.format("Instance ID", "Instance Name")))
+ for instance_wo_alerts in filtered_missing_alerts_list:
+ print((format_string.format(instance_wo_alerts["ID"], instance_wo_alerts["name"])))
+ flag = 1
+
+ # Initializing object of classes
+ appcheck = AppAlerts(new_relic_api_key)
+ new_relic_obj = NewRelic(new_relic_api_key)
+ # Get list of all applications from new relic
+ apps_list = appcheck.new_relic_app_extractor()
+ # Get list of all applications without alerts
+ missing_alerts_list_app = appcheck.missing_alerts_checker(apps_list, alert_policies)
+ filtered_missing_alerts_list_app = list([x for x in missing_alerts_list_app if not any(re.search(r, x['name']) for r in ignore)])
+ format_string = "{:<20}{}"
+ print("")
+ print((format_string.format("Application ID", "Application Name")))
+ for instance_wo_alerts in filtered_missing_alerts_list_app:
+ print((format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"])))
+ flag = 1
+
+ # Initializing object of classes
+ browsercheck = BrowserAlerts(new_relic_api_key)
+ new_relic_obj = NewRelic(new_relic_api_key)
+ # Get list of all browser applications from new relic
+ browser_list = browsercheck.new_relic_browser_extractor()
+ # Get list of all browser applications without alerts
+ missing_alerts_list_browser = browsercheck.missing_alerts_checker(browser_list, alert_policies)
+ filtered_missing_alerts_list_browser = list([x for x in missing_alerts_list_browser if not any(re.search(r, x['name']) for r in ignore)])
+ format_string = "{:<20}{}"
+ print("")
+ print((format_string.format("Browser ID", "Browser Name")))
+ for instance_wo_alerts in filtered_missing_alerts_list_browser:
+ print((format_string.format(instance_wo_alerts["id"], instance_wo_alerts["name"])))
+ flag = 1
+ sys.exit(flag)
+
+
+if __name__ == '__main__':
+ controller()
+
diff --git a/util/jenkins/primary_keys/check_primary_keys.py b/util/jenkins/primary_keys/check_primary_keys.py
index fc237535faf..2e9209d8881 100644
--- a/util/jenkins/primary_keys/check_primary_keys.py
+++ b/util/jenkins/primary_keys/check_primary_keys.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import sys
@@ -5,6 +7,7 @@
import pymysql
import click
from datetime import datetime, timedelta, timezone
+from six.moves import range
MAX_TRIES = 5
PERIOD = 360
@@ -94,7 +97,7 @@ def send_an_email(to_addr, from_addr, primary_keys_message, region):
)
message += """"""
- print("Sending the following as email to {}".format(to_addr))
+ print(("Sending the following as email to {}".format(to_addr)))
print(message)
ses_client.send_email(
Source=from_addr,
@@ -140,10 +143,10 @@ def get_rds_from_all_regions():
try:
regions_list = ec2_client.describe_regions()
except ClientError as e:
- print("Unable to connect to AWS with error :{}".format(e))
+ print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list["Regions"]:
- print("Getting RDS instances in region {}".format(region["RegionName"]))
+ print(("Getting RDS instances in region {}".format(region["RegionName"])))
rds_client = RDSBotoWrapper(region_name=region["RegionName"])
response = rds_client.describe_db_instances()
for instance in response.get('DBInstances'):
@@ -180,7 +183,7 @@ def check_primary_keys(rds_list, username, password, environment, deploy):
metric_data = []
tables_reaching_exhaustion_limit = []
for rds_instance in rds_list:
- print("Checking rds instance {}".format(rds_instance["name"]))
+ print(("Checking rds instance {}".format(rds_instance["name"])))
rds_host_endpoint = rds_instance["Endpoint"]
rds_port = rds_instance["Port"]
connection = pymysql.connect(host=rds_host_endpoint,
@@ -241,8 +244,8 @@ def check_primary_keys(rds_list, username, password, environment, deploy):
table_name_combined = "{}.{}".format(db_name, table_name)
table_percent = result_table[6]
if table_percent > 70:
- print("RDS {} Table {}: Primary keys {}% full".format(
- rds_instance["name"], table_name_combined, table_percent))
+ print(("RDS {} Table {}: Primary keys {}% full".format(
+ rds_instance["name"], table_name_combined, table_percent)))
metric_data.append({
'MetricName': metric_name,
'Dimensions': [{
@@ -280,7 +283,7 @@ def check_primary_keys(rds_list, username, password, environment, deploy):
cloudwatch.put_metric_data(Namespace=namespace, MetricData=metric_data)
return tables_reaching_exhaustion_limit
except Exception as e:
- print("Please see the following exception ", e)
+ print(("Please see the following exception ", e))
sys.exit(1)
@@ -315,9 +318,9 @@ def get_metrics_and_calcuate_diff(namespace, metric_name, dimension, value, curr
no_of_days = time_diff.days
increase_over_time_period = current_usage/no_of_days
days_remaining_before_exhaustion = consumed_keys_percentage/increase_over_time_period
- print("Days remaining for {table} table on db {db}: {days}".format(table=value,
+ print(("Days remaining for {table} table on db {db}: {days}".format(table=value,
db=dimension,
- days=days_remaining_before_exhaustion))
+ days=days_remaining_before_exhaustion)))
return days_remaining_before_exhaustion
@@ -342,7 +345,7 @@ def controller(username, password, environment, deploy, region, recipient, sende
"""
# get list of all the RDSes across all the regions and deployments
rds_list = get_rds_from_all_regions()
- filtered_rds_list = list(filter(lambda x: x['name'] not in rdsignore, rds_list))
+ filtered_rds_list = list([x for x in rds_list if x['name'] not in rdsignore])
table_list = check_primary_keys(filtered_rds_list, username, password, environment, deploy)
if len(table_list) > 0:
send_an_email(recipient[0], sender[0], table_list, region[0])
diff --git a/util/jenkins/rds_alarms_checker/missing_rds_alarms.py b/util/jenkins/rds_alarms_checker/missing_rds_alarms.py
index 02f103517c7..2f7b3cd43c4 100644
--- a/util/jenkins/rds_alarms_checker/missing_rds_alarms.py
+++ b/util/jenkins/rds_alarms_checker/missing_rds_alarms.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
from botocore.exceptions import ClientError
import sys
@@ -51,7 +53,7 @@ def rds_extractor():
try:
regions_list = client_region.describe_regions()
except ClientError as e:
- print("Unable to connect to AWS with error :{}".format(e))
+ print(("Unable to connect to AWS with error :{}".format(e)))
sys.exit(1)
for region in regions_list["Regions"]:
client = RDSBotoWrapper(region_name=region["RegionName"])
@@ -92,8 +94,8 @@ def controller(whitelist):
if alarms_count < 1:
missing_alarm.append(db["name"])
if len(missing_alarm) > 0:
- print "RDS Name"
- print '\n'.join(str(p) for p in missing_alarm)
+ print("RDS Name")
+ print('\n'.join(str(p) for p in missing_alarm))
sys.exit(1)
sys.exit(0)
diff --git a/util/jenkins/update_celery_monitoring/create_celery_dashboard.py b/util/jenkins/update_celery_monitoring/create_celery_dashboard.py
index 7ae2ac7e3ea..0b1668d14b4 100644
--- a/util/jenkins/update_celery_monitoring/create_celery_dashboard.py
+++ b/util/jenkins/update_celery_monitoring/create_celery_dashboard.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import pprint
import re
diff --git a/util/jenkins/update_celery_monitoring/update_celery_monitoring.py b/util/jenkins/update_celery_monitoring/update_celery_monitoring.py
index dc10d5b80e1..696d41b529b 100644
--- a/util/jenkins/update_celery_monitoring/update_celery_monitoring.py
+++ b/util/jenkins/update_celery_monitoring/update_celery_monitoring.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import re
import redis
import click
@@ -24,7 +26,7 @@ def __init__(self, *args, **kwargs):
redis.exceptions.ConnectionError),
max_tries=MAX_TRIES)
def keys(self):
- return self.redis.keys()
+ return list(self.redis.keys())
@backoff.on_exception(backoff.expo,
(redis.exceptions.TimeoutError,
@@ -57,7 +59,7 @@ def list_metrics(self, *args, **kwargs):
max_tries=MAX_TRIES)
def put_metric_data(self, *args, **kwargs):
if self.dev_test_mode:
- print("Test Mode: would have run put_metric_data({},{})".format(args, kwargs))
+ print(("Test Mode: would have run put_metric_data({},{})".format(args, kwargs)))
else:
return self.client.put_metric_data(*args, **kwargs)
@@ -72,7 +74,7 @@ def describe_alarms(self, *args, **kwargs):
max_tries=MAX_TRIES)
def put_metric_alarm(self, *args, **kwargs):
if self.dev_test_mode:
- print("Test Mode: would have run put_metric_alarm({},{})".format(args, kwargs))
+ print(("Test Mode: would have run put_metric_alarm({},{})".format(args, kwargs)))
else:
return self.client.put_metric_alarm(*args, **kwargs)
@@ -216,7 +218,7 @@ def check_queues(host, port, environment, deploy, max_metrics, threshold,
existing_alarms = cloudwatch.describe_alarms(AlarmNames=[alarm_name])['MetricAlarms']
do_put_alarm = False
if len(existing_alarms) > 1:
- print("WARNINING: found multiple existing alarms for {}".format(alarm_name))
+ print(("WARNINING: found multiple existing alarms for {}".format(alarm_name)))
pprint(existing_alarms)
do_put_alarm = True
elif len(existing_alarms) == 1:
@@ -244,14 +246,14 @@ def check_queues(host, port, environment, deploy, max_metrics, threshold,
existing_alarm.get('AlarmActions')[0] == actions[0]):
do_put_alarm = True
if do_put_alarm:
- print('Updating existing alarm "{}"'.format(alarm_name))
+ print(('Updating existing alarm "{}"'.format(alarm_name)))
else:
do_put_alarm = True
- print('Creating new alarm "{}"'.format(alarm_name))
+ print(('Creating new alarm "{}"'.format(alarm_name)))
if not do_put_alarm:
- print('Not updating alarm "{}", no changes'.format(alarm_name))
+ print(('Not updating alarm "{}", no changes'.format(alarm_name)))
else:
- print('put_alarm_metric: {}'.format(alarm_name))
+ print(('put_alarm_metric: {}'.format(alarm_name)))
cloudwatch.put_metric_alarm(AlarmName=alarm_name,
AlarmDescription=alarm_name,
Namespace=namespace,
diff --git a/util/old/import_xml_courses.py b/util/old/import_xml_courses.py
index 5d1e1e16255..20436b40ffc 100644
--- a/util/old/import_xml_courses.py
+++ b/util/old/import_xml_courses.py
@@ -13,6 +13,8 @@
# version:
#}
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
from os.path import basename
import yaml
@@ -63,4 +65,4 @@
"EDXAPP_XML_MAPPINGS": all_xml_mappings,
"EDXAPP_XML_FROM_GIT": True
}
- print yaml.safe_dump(edxapp_xml_courses, default_flow_style=False)
+ print(yaml.safe_dump(edxapp_xml_courses, default_flow_style=False))
diff --git a/util/parsefiles.py b/util/parsefiles.py
index 6d45396cd01..f667aab5a3d 100644
--- a/util/parsefiles.py
+++ b/util/parsefiles.py
@@ -1,3 +1,5 @@
+from __future__ import absolute_import
+from __future__ import print_function
import os
import pathlib2
import logging
@@ -6,6 +8,7 @@
import networkx as nx
from collections import namedtuple
import argparse
+import six
TRAVIS_BUILD_DIR = os.environ.get("TRAVIS_BUILD_DIR")
DOCKER_PATH_ROOT = pathlib2.Path(TRAVIS_BUILD_DIR, "docker", "build")
@@ -147,7 +150,7 @@ def _open_yaml_file(file_str):
try:
yaml_file = yaml.safe_load(file)
return yaml_file
- except yaml.YAMLError, exc:
+ except yaml.YAMLError as exc:
LOGGER.error("error in configuration file: %s" % str(exc))
sys.exit(1)
@@ -333,7 +336,7 @@ def _get_role_name(role):
"""
if isinstance(role, dict):
return role['role']
- elif isinstance(role, basestring):
+ elif isinstance(role, six.string_types):
return role
else:
LOGGER.warning("role %s could not be resolved to a role name." % role)
@@ -464,4 +467,4 @@ def arg_parse():
all_plays = set(set(docker_plays) | set( modified_docker_files) | set(docker_plays_dir))
- print " ".join(all_plays)
+ print(" ".join(all_plays))
diff --git a/util/pingdom/create_pingdom_alerts.py b/util/pingdom/create_pingdom_alerts.py
index b049a9cd7c4..e689193eaa5 100644
--- a/util/pingdom/create_pingdom_alerts.py
+++ b/util/pingdom/create_pingdom_alerts.py
@@ -1,9 +1,12 @@
+from __future__ import absolute_import
+from __future__ import print_function
import json
import click
import yaml
import requests
import json
+from six.moves import map
class PingdomInvalidResponse(Exception):
@@ -38,16 +41,16 @@ def main(noop, pingdom_email, pingdom_password,
checks_by_hostname = build_checks_by_hostname(pingdom_email,
pingdom_password,
pingdom_api_key)
- hostnames = checks_by_hostname.keys()
+ hostnames = list(checks_by_hostname.keys())
for alert_config in config_file_content['checks']:
if alert_config['host'] not in hostnames:
# Create new check
if noop:
- print("Would CREATE: {0}, but you set the noop flag.".format(
- alert_config))
+ print(("Would CREATE: {0}, but you set the noop flag.".format(
+ alert_config)))
else:
- print("CREATE: {0}".format(alert_config))
+ print(("CREATE: {0}".format(alert_config)))
create_check(pingdom_email, pingdom_password,
pingdom_api_key, alert_config)
@@ -55,12 +58,12 @@ def main(noop, pingdom_email, pingdom_password,
# Updating existing check
existing_check = checks_by_hostname[alert_config['host']]
if noop:
- print("""
+ print(("""
Has changes, would UPDATE: {0},
but you set the noop flag.
- """.format(alert_config))
+ """.format(alert_config)))
else:
- print("Attempting UPDATE: {0}".format(alert_config))
+ print(("Attempting UPDATE: {0}".format(alert_config)))
# We always update because the parameters to POST check
# and the paramters returned by GET check differ.
# It would be difficult to figure out if changes
@@ -83,7 +86,7 @@ def replace_user_names_with_userids(pingdom_email,
for user in alert['users']:
if 'userids' in alert:
user_ids.extend(
- map(lambda x: x.strip(), alert['userids'].split(',')))
+ [x.strip() for x in alert['userids'].split(',')])
if user not in user_ids_by_name:
raise PingdomInvalidResponse(
'Pingdom has no user with the name {0}'.format(user))
@@ -103,7 +106,7 @@ def integration_names_to_ids(config_file_content):
if('integrationids' in alert):
integration_ids.extend(
alert['integrationids'].split(','))
- if integration not in integration_ids_by_name.keys():
+ if integration not in list(integration_ids_by_name.keys()):
print(
"""
You specified a integration
@@ -213,15 +216,15 @@ def build_userid_by_name(pingdom_email, pingdom_password, pingdom_api_key):
def print_request_and_response(response):
print("Request:")
for key in response.request.headers:
- print("{0}: {1}".format(key, response.request.headers[key]))
+ print(("{0}: {1}".format(key, response.request.headers[key])))
print("")
- print(response.request.body)
+ print((response.request.body))
print("------------------")
print("Response:")
for key in response.headers:
- print("{0}: {1}".format(key, response.headers[key]))
+ print(("{0}: {1}".format(key, response.headers[key])))
print("")
- print(response.content.decode('utf-8'))
+ print((response.content.decode('utf-8')))
print("------------------")
diff --git a/util/publish_rds_logs_to_cloudwatch.py b/util/publish_rds_logs_to_cloudwatch.py
index a36972c54f1..1c1d1923275 100755
--- a/util/publish_rds_logs_to_cloudwatch.py
+++ b/util/publish_rds_logs_to_cloudwatch.py
@@ -6,6 +6,8 @@
./publish_rds_logs_to_cloudwatch --db_engine aurora --db_identifier edx-aurora-cluster
"""
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import argparse
@@ -30,8 +32,8 @@ def publish_rds_logs_to_cloudwatch(db_engine,db_identifier,logs_to_publish):
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
id=response["DBInstance"]["DBInstanceIdentifier"]
logs_exports_to_cloudwatch=response["DBInstance"]["EnabledCloudwatchLogsExports"]
- print("RDS MySQL DB {} logs {} are enabled to exports to cloudwatch" \
- .format(id,logs_exports_to_cloudwatch))
+ print(("RDS MySQL DB {} logs {} are enabled to exports to cloudwatch" \
+ .format(id,logs_exports_to_cloudwatch)))
elif db_engine == "aurora":
response = client.modify_db_cluster(
DBClusterIdentifier=db_identifier,
@@ -44,8 +46,8 @@ def publish_rds_logs_to_cloudwatch(db_engine,db_identifier,logs_to_publish):
if response["ResponseMetadata"]["HTTPStatusCode"] == 200:
id=response["DBCluster"]["DBClusterIdentifier"]
logs_exports_to_cloudwatch=response["DBCluster"]["EnabledCloudwatchLogsExports"]
- print("RDS Aurora Cluster {} logs {} are enabled to exports to cloudwatch" \
- .format(id,logs_exports_to_cloudwatch))
+ print(("RDS Aurora Cluster {} logs {} are enabled to exports to cloudwatch" \
+ .format(id,logs_exports_to_cloudwatch)))
else:
print("db_engine valid options are: mysql or aurora")
exit()
diff --git a/util/rabbitmq/shovel.py b/util/rabbitmq/shovel.py
index 88ede3768c6..dfddd99d7ba 100644
--- a/util/rabbitmq/shovel.py
+++ b/util/rabbitmq/shovel.py
@@ -1,9 +1,12 @@
#!/usr/bin/env python
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
import subprocess
import requests
from requests.exceptions import HTTPError
import sys
+import six
parser=argparse.ArgumentParser(description='Shovels between RabbitMQ Clusters')
parser.add_argument('--src_host',action='store',dest='src_host')
@@ -26,7 +29,7 @@ def list_vhosts():
response.raise_for_status()
vhosts=[v['name'] for v in response.json() if v['name'].startswith('/')]
except Exception as ex:
- print "Failed to get vhosts: {}".format(ex)
+ print("Failed to get vhosts: {}".format(ex))
sys.exit(1)
return vhosts
@@ -38,7 +41,7 @@ def list_queues():
response.raise_for_status()
queues=[q['name'] for q in response.json()]
except Exception as ex:
- print "Failed to get queues: {}".format(ex)
+ print("Failed to get queues: {}".format(ex))
sys.exit(1)
return queues
@@ -65,10 +68,10 @@ def create_shovel(shovel,arg):
q=queue.split('.')
if (q[0]!='celeryev' and q[-1]!='pidbox'):
args='{{"src-uri": "{}", "src-queue": "{}","dest-uri": "{}","dest-queue": "{}"}}'.format(src_uri,queue,dest_uri,queue)
- print "Running shovel for queue:{}".format(queue)
+ print("Running shovel for queue:{}".format(queue))
shovel_output=create_shovel(queue,args)
if shovel_output is not None:
- content=unicode(shovel_output,"utf-8")
+ content=six.text_type(shovel_output,"utf-8")
output[queue]=content
for k,v in output.items():
- print k,v
+ print(k,v)
diff --git a/util/rds_sgs/rds_sgs.py b/util/rds_sgs/rds_sgs.py
index 6d6d4e14b34..0d718f14afa 100755
--- a/util/rds_sgs/rds_sgs.py
+++ b/util/rds_sgs/rds_sgs.py
@@ -1,5 +1,7 @@
#!/usr/bin/python3
+from __future__ import absolute_import
+from __future__ import print_function
import boto3
import click
@@ -71,10 +73,10 @@ def command(mode):
if sources[source][sg_id]:
output = "{} ({})".format(output, sources[source][sg_id])
sgs.append(output)
- print("{: <40} {: <11} {: <70} {}".format(db['DBInstanceIdentifier'], ports, source, ", ".join(sgs)))
+ print(("{: <40} {: <11} {: <70} {}".format(db['DBInstanceIdentifier'], ports, source, ", ".join(sgs))))
if mode == 'by_sg':
for sg,dbs in dbs_by_sg.items():
- print("{: <70} {: <4} {}".format(sg, len(dbs), ", ".join(dbs)))
+ print(("{: <70} {: <4} {}".format(sg, len(dbs), ", ".join(dbs))))
if __name__ == '__main__':
command()
diff --git a/util/vpc-tools/asg_lifcycle_watcher.py b/util/vpc-tools/asg_lifcycle_watcher.py
index 5475e8521dc..15f1ab8d512 100644
--- a/util/vpc-tools/asg_lifcycle_watcher.py
+++ b/util/vpc-tools/asg_lifcycle_watcher.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
__author__ = 'e0d'
"""
diff --git a/util/vpc-tools/tag-old-ebs.py b/util/vpc-tools/tag-old-ebs.py
index 4a4112b4be3..90fdae41335 100644
--- a/util/vpc-tools/tag-old-ebs.py
+++ b/util/vpc-tools/tag-old-ebs.py
@@ -3,6 +3,7 @@
For a given aws account, go through all un-attached volumes and tag them.
"""
+from __future__ import absolute_import
import boto
import boto.utils
import argparse
@@ -14,6 +15,7 @@
import yaml
# needs to be pip installed
import netaddr
+from six.moves import filter
LOG_FORMAT = "%(asctime)s %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
TIMEOUT = 300
@@ -50,13 +52,13 @@ def potential_devices(root_device):
relevant_devices = lambda x: x.startswith(basename(root_device))
all_devices = os.listdir(device_dir)
- all_devices = filter(relevant_devices, all_devices)
+ all_devices = list(filter(relevant_devices, all_devices))
logging.info("Potential devices on {}: {}".format(root_device, all_devices))
if len(all_devices) > 1:
all_devices.remove(basename(root_device))
- return map(lambda x: join(device_dir, x), all_devices)
+ return [join(device_dir, x) for x in all_devices]
def get_tags_for_disk(mountpoint):
tag_data = {}
diff --git a/util/vpc-tools/vpc_dns.py b/util/vpc-tools/vpc_dns.py
index d8e051a75ac..3f34b7793e9 100644
--- a/util/vpc-tools/vpc_dns.py
+++ b/util/vpc-tools/vpc_dns.py
@@ -22,6 +22,8 @@
# python vpc_dns.py -s stage-stack -z vpc.example.com
#
+from __future__ import absolute_import
+from __future__ import print_function
import argparse
import boto
import datetime
@@ -78,13 +80,13 @@ def add_or_update_record(dns_records):
""".format(record.record_name, record.record_type,
record.record_ttl, record.record_values)
if args.noop:
- print("Would have updated DNS record:\n{}".format(status_msg))
+ print(("Would have updated DNS record:\n{}".format(status_msg)))
else:
- print("Updating DNS record:\n{}".format(status_msg))
+ print(("Updating DNS record:\n{}".format(status_msg)))
if record.record_name in record_names:
- print("Unable to create record for {} with value {} because one already exists!".format(
- record.record_values, record.record_name))
+ print(("Unable to create record for {} with value {} because one already exists!".format(
+ record.record_values, record.record_name)))
sys.exit(1)
record_names.add(record.record_name)
@@ -97,15 +99,15 @@ def add_or_update_record(dns_records):
# If the record name already points to something.
# Delete the existing connection. If the record has
# the same type and name skip it.
- if record.record_name in old_records.keys():
+ if record.record_name in list(old_records.keys()):
if record.record_name + "." == old_records[record.record_name].name and \
record.record_type == old_records[record.record_name].type:
- print("Record for {} already exists and is identical, skipping.\n".format(
- record.record_name))
+ print(("Record for {} already exists and is identical, skipping.\n".format(
+ record.record_name)))
continue
if args.force:
- print("Deleting record:\n{}".format(status_msg))
+ print(("Deleting record:\n{}".format(status_msg)))
change = change_set.add_change(
'DELETE',
record.record_name,
@@ -133,7 +135,7 @@ def add_or_update_record(dns_records):
else:
print("Submitting the following change set:\n")
xml_doc = xml.dom.minidom.parseString(change_set.to_xml())
- print(xml_doc.toprettyxml(newl='')) # newl='' to remove extra newlines
+ print((xml_doc.toprettyxml(newl=''))) # newl='' to remove extra newlines
if not args.noop:
r53.change_rrsets(zone_id, change_set.to_xml())
@@ -152,21 +154,21 @@ def get_or_create_hosted_zone(zone_name):
if args.noop:
if parent_zone:
- print("Would have created/updated zone: {} parent: {}".format(
- zone_name, parent_zone_name))
+ print(("Would have created/updated zone: {} parent: {}".format(
+ zone_name, parent_zone_name)))
else:
- print("Would have created/updated zone: {}".format(
- zone_name, parent_zone_name))
+ print(("Would have created/updated zone: {}".format(
+ zone_name, parent_zone_name)))
return zone
if not zone:
- print("zone {} does not exist, creating".format(zone_name))
+ print(("zone {} does not exist, creating".format(zone_name)))
ts = datetime.datetime.utcnow().strftime('%Y-%m-%d-%H:%M:%SUTC')
zone = r53.create_hosted_zone(
zone_name, comment="Created by vpc_dns script - {}".format(ts))
if parent_zone:
- print("Updating parent zone {}".format(parent_zone_name))
+ print(("Updating parent zone {}".format(parent_zone_name)))
dns_records = set()
dns_records.add(DNSRecord(parent_zone, zone_name, 'NS', 900, zone.NameServers))
@@ -188,7 +190,7 @@ def get_dns_from_instances(elb):
instance = ec2_con.get_all_instances(
instance_ids=[inst.id])[0].instances[0]
except IndexError:
- print("instance {} attached to elb {}".format(inst, elb))
+ print(("instance {} attached to elb {}".format(inst, elb)))
sys.exit(1)
try:
env_tag = instance.tags['environment']
@@ -200,8 +202,8 @@ def get_dns_from_instances(elb):
play_tag = instance.tags['role']
break # only need the first instance for tag info
except KeyError:
- print("Instance {}, attached to elb {} does not "
- "have a tag for environment, play or deployment".format(inst, elb))
+ print(("Instance {}, attached to elb {} does not "
+ "have a tag for environment, play or deployment".format(inst, elb)))
sys.exit(1)
return env_tag, deployment_tag, play_tag
@@ -244,7 +246,7 @@ def update_elb_rds_dns(zone):
# the ELB_BAN_LIST
if any(name in elb.name for name in ELB_BAN_LIST):
- print("Skipping {} because it is on the ELB ban list".format(elb.name))
+ print(("Skipping {} because it is on the ELB ban list".format(elb.name)))
continue
dns_records.add(DNSRecord(zone, fqdn, 'CNAME', 600, [elb.dns_name]))
diff --git a/util/vpc-tools/vpcutil.py b/util/vpc-tools/vpcutil.py
index 1834e8e901f..05e1110116c 100644
--- a/util/vpc-tools/vpcutil.py
+++ b/util/vpc-tools/vpcutil.py
@@ -1,3 +1,4 @@
+from __future__ import absolute_import
import boto
import boto.rds2
import boto.rds
@@ -35,4 +36,4 @@ def rds_subnet_group_name_for_stack_name(stack_name, region='us-east-1', aws_id=
def all_stack_names(region='us-east-1', aws_id=None, aws_secret=None):
vpc_conn = boto.connect_vpc(aws_id, aws_secret)
return [vpc.tags[CFN_TAG_KEY] for vpc in vpc_conn.get_all_vpcs()
- if CFN_TAG_KEY in vpc.tags.keys()]
+ if CFN_TAG_KEY in list(vpc.tags.keys())]